mirror of https://github.com/docker/docker-py.git
Compare commits
317 Commits
Author | SHA1 | Date |
---|---|---|
|
6e6a273573 | |
|
526a9db743 | |
|
e5c3eb18b6 | |
|
820769e23c | |
|
db7f8b8bb6 | |
|
747d23b9d7 | |
|
fad84c371a | |
|
5a8a42466e | |
|
03e43be6af | |
|
80a584651b | |
|
8ee28517c7 | |
|
d9f9b965b2 | |
|
fba6ffe297 | |
|
99ce2e6d56 | |
|
504ce6193c | |
|
bb0edd1f66 | |
|
e47e966e94 | |
|
a8bac88221 | |
|
e031cf0c23 | |
|
b1265470e6 | |
|
6bbf741c8c | |
|
96ef4d3bee | |
|
a3652028b1 | |
|
1ab40c8e92 | |
|
b33088e0ca | |
|
45488acfc1 | |
|
20879eca6a | |
|
4f2a26d21e | |
|
7785ad913d | |
|
d8e9bcb278 | |
|
2a059a9f19 | |
|
e33e0a437e | |
|
b86573a3e3 | |
|
e34bcf20d9 | |
|
205d2f2bd0 | |
|
b6464dbed9 | |
|
9ad4bddc9e | |
|
336e65fc3c | |
|
4c6437d292 | |
|
0fd79c8c0d | |
|
3d79ce8c60 | |
|
dd82f9ae8e | |
|
e91b280074 | |
|
cb21af7f69 | |
|
1818712b8c | |
|
d50cc429c2 | |
|
047df6b0d3 | |
|
ae45d477c4 | |
|
f128956034 | |
|
bd164f928a | |
|
249654d4d9 | |
|
694d9792e6 | |
|
eeb9ea1937 | |
|
08956b5fbc | |
|
b8a6987cd5 | |
|
f467fd9df9 | |
|
3ec5a6849a | |
|
1784cc2962 | |
|
6ceb08273c | |
|
097382b973 | |
|
0fad869cc6 | |
|
2a5f354b50 | |
|
7d8a161b12 | |
|
5388413dde | |
|
3d0a3f1d77 | |
|
a9b5494fd0 | |
|
cb8f2c6630 | |
|
7140969239 | |
|
586988ce2d | |
|
fd2f5029f0 | |
|
db4878118b | |
|
976c84c481 | |
|
b3349c88ef | |
|
b2378db7f1 | |
|
911f866f72 | |
|
26e07251d4 | |
|
c9e3efddb8 | |
|
4a88112345 | |
|
b70cbd0129 | |
|
7752996f78 | |
|
5abae2dc8e | |
|
c38656dc78 | |
|
378325363e | |
|
0f0b20a6a7 | |
|
bea63224e0 | |
|
8b9ad7807f | |
|
c68d532f54 | |
|
a9a3775b15 | |
|
3948540c89 | |
|
0566f1260c | |
|
cc76c9c20d | |
|
09f12f2046 | |
|
6aec90a41b | |
|
8447f7b0f0 | |
|
601476733c | |
|
ec58856ee3 | |
|
fad792bfc7 | |
|
9313536601 | |
|
8a3402c049 | |
|
ee2310595d | |
|
dbc061f4fa | |
|
4571f7f9b4 | |
|
0618951093 | |
|
806d36a8cd | |
|
79c4c38b42 | |
|
62b4bb8489 | |
|
5064995bc4 | |
|
54ec0c6bf7 | |
|
83e93228ea | |
|
fb974de27a | |
|
f0d38fb7f4 | |
|
84414e343e | |
|
78439ebbe1 | |
|
0318ad8e7e | |
|
8ca9c6394f | |
|
bc4c0d7cf4 | |
|
14e8d07d45 | |
|
c5e582c413 | |
|
9cadad009e | |
|
443a35360f | |
|
e011ff5be8 | |
|
7870503c52 | |
|
a18f91bf08 | |
|
a662d5a305 | |
|
1d697680d2 | |
|
576e47aaac | |
|
3178c8d48b | |
|
a02ba74333 | |
|
aaf68b7f98 | |
|
f84623225e | |
|
7cd7458f2f | |
|
e9d4ddfaec | |
|
aca129dd69 | |
|
ee9151f336 | |
|
34e6829dd4 | |
|
22718ba59a | |
|
d38b41a13c | |
|
3afb4b61c3 | |
|
82cf559b5a | |
|
8590eaad3c | |
|
30022984f6 | |
|
bc0a5fbacd | |
|
923e067ddd | |
|
1c27ec1f0c | |
|
2494d63f36 | |
|
e901eac7a8 | |
|
fc86ab0d85 | |
|
45bf9f9115 | |
|
c03aeb659e | |
|
58aa62bb15 | |
|
ff0b4ac60b | |
|
66402435d1 | |
|
42789818be | |
|
ab5e927300 | |
|
b7daa52feb | |
|
3f0095a7c1 | |
|
631b332cd9 | |
|
7f1bde162f | |
|
cd2c35a9b6 | |
|
828d06f5f5 | |
|
dff849f6bb | |
|
52fb27690c | |
|
547cc5794d | |
|
003a16503a | |
|
c6c2bbdcda | |
|
73421027be | |
|
55f47299c4 | |
|
3ee3a2486f | |
|
868e996269 | |
|
26753c81de | |
|
0031ac2186 | |
|
b2a18d7209 | |
|
d69de54d7c | |
|
1a4cacdfb6 | |
|
26064dd6b5 | |
|
05e143429e | |
|
23cf16f03a | |
|
ab43018b02 | |
|
9bdb5ba2ba | |
|
be942f8390 | |
|
bf026265e0 | |
|
d2d097efbb | |
|
acdafbc116 | |
|
ea4cefe4fd | |
|
adf5a97b12 | |
|
d9298647d9 | |
|
bb40ba051f | |
|
52e29bd446 | |
|
da62a28837 | |
|
0ee9f260e4 | |
|
b9ca58a56d | |
|
cf6210316f | |
|
2e6dad7983 | |
|
4e19cc48df | |
|
56dd6de7df | |
|
bb11197ee3 | |
|
3ffdd8a1c5 | |
|
ce40d4bb34 | |
|
4765f62441 | |
|
74e0c5eb8c | |
|
7168e09b16 | |
|
f16c4e1147 | |
|
2933af2ca7 | |
|
a6db044bd4 | |
|
e131955685 | |
|
e0a3abfc37 | |
|
a48a5a9647 | |
|
ac5f6ef93a | |
|
4bb99311e2 | |
|
bbbc29191a | |
|
72bcd1616d | |
|
4150fc4d9d | |
|
a9de343210 | |
|
ecace769f5 | |
|
7172269b06 | |
|
fcb35f4197 | |
|
3c5f0d0ee1 | |
|
7779b84e87 | |
|
df59f538c2 | |
|
aae6be0c58 | |
|
b8258679b3 | |
|
b27faa62e7 | |
|
63618b5e11 | |
|
a9265197d2 | |
|
264688e37c | |
|
d06db4d9e1 | |
|
dbb28a5af1 | |
|
f9b85586ca | |
|
c5fc193857 | |
|
3c3aa69997 | |
|
4a3cddf4bf | |
|
62af2bbb13 | |
|
8da03e0126 | |
|
5705d12813 | |
|
2fa56879a2 | |
|
e0d186d754 | |
|
1abeb46dfa | |
|
582f6277ce | |
|
2cf3900030 | |
|
19d6cd8a01 | |
|
a9748a8b70 | |
|
5fcc293ba2 | |
|
650aad3a5f | |
|
f42a81dca2 | |
|
d58ca97207 | |
|
13c316de69 | |
|
7ac8b56730 | |
|
f53e615e0f | |
|
50a0ff596f | |
|
4b44fa7e5d | |
|
96c12726fd | |
|
8945fda6be | |
|
18fdc23b7c | |
|
8813c3d2e0 | |
|
7a2ec95951 | |
|
ac9ae1f249 | |
|
a34dd8b1a9 | |
|
0892fcfc12 | |
|
2403774e76 | |
|
a60bd9a454 | |
|
30d482d359 | |
|
d2aa221638 | |
|
d4310b2db0 | |
|
c8fba210a2 | |
|
31775a1532 | |
|
563124163a | |
|
c239d66d5d | |
|
d836bb8703 | |
|
55f405e04a | |
|
7d316641a3 | |
|
148f9161e1 | |
|
d09fe8d225 | |
|
69087ab977 | |
|
43ca2f8ff9 | |
|
fe995ae79f | |
|
e6689e0bb9 | |
|
2807fde6c9 | |
|
d065daf522 | |
|
c15ee3925d | |
|
00da4dc0ea | |
|
6de6936f5d | |
|
407dcfd65b | |
|
94d7983ef0 | |
|
56d4b09700 | |
|
b3aa239432 | |
|
9556b890f9 | |
|
caef663729 | |
|
ccab78840e | |
|
f520b4c4eb | |
|
6d1dffe3e5 | |
|
a653052276 | |
|
9e007469ef | |
|
da32a2f1a2 | |
|
514f98a0d6 | |
|
d7b16ef0fb | |
|
78f5249ed0 | |
|
8615a61bd1 | |
|
caab390696 | |
|
f0517f842b | |
|
0edea80c41 | |
|
10ff403079 | |
|
ce2669e3ed | |
|
f0ab0ed25d | |
|
2426a5ffd5 | |
|
b72926b382 | |
|
2f3e0f9fc4 | |
|
3ec7fee736 | |
|
4757eea80c | |
|
f1af005eca | |
|
ab0d65e2e0 | |
|
d8bbbf2351 | |
|
1757c974fa | |
|
990ef4904c | |
|
bf1a3518f9 | |
|
656db96b4a | |
|
755fd73566 | |
|
19171d0e1e |
|
@ -9,3 +9,6 @@ max_line_length = 80
|
|||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
[*.{yaml,yml}]
|
||||
indent_size = 2
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
# GitHub code owners
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
#
|
||||
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
|
||||
|
||||
* @aiordache @ulyssessouza
|
|
@ -0,0 +1,72 @@
|
|||
name: Python package
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
DOCKER_BUILDKIT: '1'
|
||||
FORCE_COLOR: 1
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip install -U ruff==0.1.8
|
||||
- name: Run ruff
|
||||
run: ruff docker tests
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- run: pip3 install build && python -m build .
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
allow-prereleases: true
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install --upgrade pip
|
||||
pip3 install '.[ssh,dev]'
|
||||
- name: Run unit tests
|
||||
run: |
|
||||
docker logout
|
||||
rm -rf ~/.docker
|
||||
py.test -v --cov=docker tests/unit
|
||||
|
||||
integration-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
variant: [ "integration-dind", "integration-dind-ssl", "integration-dind-ssh" ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
- name: make ${{ matrix.variant }}
|
||||
run: |
|
||||
docker logout
|
||||
rm -rf ~/.docker
|
||||
make ${{ matrix.variant }}
|
|
@ -0,0 +1,53 @@
|
|||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Release Tag WITHOUT `v` Prefix (e.g. 6.0.0)"
|
||||
required: true
|
||||
dry-run:
|
||||
description: 'Dry run'
|
||||
required: false
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
env:
|
||||
DOCKER_BUILDKIT: '1'
|
||||
FORCE_COLOR: 1
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Generate Package
|
||||
run: |
|
||||
pip3 install build
|
||||
python -m build .
|
||||
env:
|
||||
# This is also supported by Hatch; see
|
||||
# https://github.com/ofek/hatch-vcs#version-source-environment-variables
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ inputs.tag }}
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
if: '! inputs.dry-run'
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
- name: Create GitHub release
|
||||
uses: ncipollo/release-action@v1
|
||||
if: '! inputs.dry-run'
|
||||
with:
|
||||
artifacts: "dist/*"
|
||||
generateReleaseNotes: true
|
||||
draft: true
|
||||
commit: ${{ github.sha }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
tag: ${{ inputs.tag }}
|
|
@ -13,6 +13,10 @@ html/*
|
|||
_build/
|
||||
README.rst
|
||||
|
||||
# setuptools_scm
|
||||
_version.py
|
||||
|
||||
env/
|
||||
venv/
|
||||
.idea/
|
||||
*.iml
|
||||
|
|
|
@ -3,8 +3,15 @@ version: 2
|
|||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: '3.12'
|
||||
|
||||
python:
|
||||
version: 3.5
|
||||
install:
|
||||
- requirements: docs-requirements.txt
|
||||
- requirements: requirements.txt
|
||||
- method: pip
|
||||
path: .
|
||||
extra_requirements:
|
||||
- ssh
|
||||
- docs
|
||||
|
|
20
.travis.yml
20
.travis.yml
|
@ -1,20 +0,0 @@
|
|||
sudo: false
|
||||
language: python
|
||||
matrix:
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOXENV=py27
|
||||
- python: 3.5
|
||||
env: TOXENV=py35
|
||||
- python: 3.6
|
||||
env: TOXENV=py36
|
||||
- python: 3.7
|
||||
env: TOXENV=py37
|
||||
dist: xenial
|
||||
sudo: true
|
||||
- env: TOXENV=flake8
|
||||
|
||||
install:
|
||||
- pip install tox==2.9.1
|
||||
script:
|
||||
- tox
|
|
@ -44,7 +44,7 @@ paragraph in the Docker contribution guidelines.
|
|||
Before we can review your pull request, please ensure that nothing has been
|
||||
broken by your changes by running the test suite. You can do so simply by
|
||||
running `make test` in the project root. This also includes coding style using
|
||||
`flake8`
|
||||
`ruff`
|
||||
|
||||
### 3. Write clear, self-contained commits
|
||||
|
||||
|
|
22
Dockerfile
22
Dockerfile
|
@ -1,19 +1,13 @@
|
|||
ARG PYTHON_VERSION=2.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
FROM python:${PYTHON_VERSION}
|
||||
|
||||
# Add SSH keys and set permissions
|
||||
COPY tests/ssh-keys /root/.ssh
|
||||
RUN chmod -R 600 /root/.ssh
|
||||
|
||||
RUN mkdir /src
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
COPY requirements.txt /src/requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY test-requirements.txt /src/test-requirements.txt
|
||||
RUN pip install -r test-requirements.txt
|
||||
|
||||
COPY . /src
|
||||
RUN pip install .
|
||||
ARG VERSION=0.0.0.dev0
|
||||
RUN --mount=type=cache,target=/cache/pip \
|
||||
PIP_CACHE_DIR=/cache/pip \
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \
|
||||
pip install .[ssh]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
ARG PYTHON_VERSION=3.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
FROM python:${PYTHON_VERSION}
|
||||
|
||||
|
@ -9,7 +11,12 @@ RUN addgroup --gid $gid sphinx \
|
|||
&& useradd --uid $uid --gid $gid -M sphinx
|
||||
|
||||
WORKDIR /src
|
||||
COPY requirements.txt docs-requirements.txt ./
|
||||
RUN pip install -r requirements.txt -r docs-requirements.txt
|
||||
COPY . .
|
||||
|
||||
ARG VERSION=0.0.0.dev0
|
||||
RUN --mount=type=cache,target=/cache/pip \
|
||||
PIP_CACHE_DIR=/cache/pip \
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \
|
||||
pip install .[ssh,docs]
|
||||
|
||||
USER sphinx
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
ARG PYTHON_VERSION=3.7
|
||||
|
||||
FROM python:${PYTHON_VERSION}
|
||||
|
||||
RUN mkdir /src
|
||||
WORKDIR /src
|
||||
|
||||
COPY requirements.txt /src/requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY test-requirements.txt /src/test-requirements.txt
|
||||
RUN pip install -r test-requirements.txt
|
||||
|
||||
COPY . /src
|
||||
RUN pip install .
|
|
@ -1,143 +0,0 @@
|
|||
#!groovy
|
||||
|
||||
def imageNameBase = "dockerbuildbot/docker-py"
|
||||
def imageNamePy2
|
||||
def imageNamePy3
|
||||
def imageDindSSH
|
||||
def images = [:]
|
||||
|
||||
def buildImage = { name, buildargs, pyTag ->
|
||||
img = docker.image(name)
|
||||
try {
|
||||
img.pull()
|
||||
} catch (Exception exc) {
|
||||
img = docker.build(name, buildargs)
|
||||
img.push()
|
||||
}
|
||||
if (pyTag?.trim()) images[pyTag] = img.id
|
||||
}
|
||||
|
||||
def buildImages = { ->
|
||||
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2", cleanWorkspace: true) {
|
||||
stage("build image") {
|
||||
checkout(scm)
|
||||
|
||||
imageNamePy2 = "${imageNameBase}:py2-${gitCommit()}"
|
||||
imageNamePy3 = "${imageNameBase}:py3-${gitCommit()}"
|
||||
imageDindSSH = "${imageNameBase}:sshdind-${gitCommit()}"
|
||||
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
|
||||
buildImage(imageDindSSH, "-f tests/Dockerfile-ssh-dind .", "")
|
||||
buildImage(imageNamePy2, "-f tests/Dockerfile --build-arg PYTHON_VERSION=2.7 .", "py2.7")
|
||||
buildImage(imageNamePy3, "-f tests/Dockerfile --build-arg PYTHON_VERSION=3.7 .", "py3.7")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def getDockerVersions = { ->
|
||||
def dockerVersions = ["19.03.12"]
|
||||
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2") {
|
||||
def result = sh(script: """docker run --rm \\
|
||||
--entrypoint=python \\
|
||||
${imageNamePy3} \\
|
||||
/src/scripts/versions.py
|
||||
""", returnStdout: true
|
||||
)
|
||||
dockerVersions = dockerVersions + result.trim().tokenize(' ')
|
||||
}
|
||||
return dockerVersions
|
||||
}
|
||||
|
||||
def getAPIVersion = { engineVersion ->
|
||||
def versionMap = [
|
||||
'18.09': '1.39',
|
||||
'19.03': '1.40'
|
||||
]
|
||||
def result = versionMap[engineVersion.substring(0, 5)]
|
||||
if (!result) {
|
||||
return '1.40'
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
def runTests = { Map settings ->
|
||||
def dockerVersion = settings.get("dockerVersion", null)
|
||||
def pythonVersion = settings.get("pythonVersion", null)
|
||||
def testImage = settings.get("testImage", null)
|
||||
def apiVersion = getAPIVersion(dockerVersion)
|
||||
|
||||
if (!testImage) {
|
||||
throw new Exception("Need test image object, e.g.: `runTests(testImage: img)`")
|
||||
}
|
||||
if (!dockerVersion) {
|
||||
throw new Exception("Need Docker version to test, e.g.: `runTests(dockerVersion: '19.03.12')`")
|
||||
}
|
||||
if (!pythonVersion) {
|
||||
throw new Exception("Need Python version being tested, e.g.: `runTests(pythonVersion: 'py2.7')`")
|
||||
}
|
||||
|
||||
{ ->
|
||||
wrappedNode(label: "amd64 && ubuntu-1804 && overlay2", cleanWorkspace: true) {
|
||||
stage("test python=${pythonVersion} / docker=${dockerVersion}") {
|
||||
checkout(scm)
|
||||
def dindContainerName = "dpy-dind-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
|
||||
def testContainerName = "dpy-tests-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
|
||||
def testNetwork = "dpy-testnet-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
|
||||
withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') {
|
||||
try {
|
||||
sh """docker network create ${testNetwork}"""
|
||||
sh """docker run --rm -d --name ${dindContainerName} -v /tmp --privileged --network ${testNetwork} \\
|
||||
${imageDindSSH} dockerd -H tcp://0.0.0.0:2375
|
||||
"""
|
||||
sh """docker run --rm \\
|
||||
--name ${testContainerName} \\
|
||||
-e "DOCKER_HOST=tcp://${dindContainerName}:2375" \\
|
||||
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
|
||||
--network ${testNetwork} \\
|
||||
--volumes-from ${dindContainerName} \\
|
||||
-v ~/.docker/config.json:/root/.docker/config.json \\
|
||||
${testImage} \\
|
||||
py.test -v -rxs --cov=docker --ignore=tests/ssh tests/
|
||||
"""
|
||||
sh """docker stop ${dindContainerName}"""
|
||||
// start DIND container with SSH
|
||||
sh """docker run --rm -d --name ${dindContainerName} -v /tmp --privileged --network ${testNetwork} \\
|
||||
${imageDindSSH} dockerd --experimental"""
|
||||
sh """docker exec ${dindContainerName} sh -c /usr/sbin/sshd """
|
||||
// run SSH tests only
|
||||
sh """docker run --rm \\
|
||||
--name ${testContainerName} \\
|
||||
-e "DOCKER_HOST=ssh://${dindContainerName}:22" \\
|
||||
-e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
|
||||
--network ${testNetwork} \\
|
||||
--volumes-from ${dindContainerName} \\
|
||||
-v ~/.docker/config.json:/root/.docker/config.json \\
|
||||
${testImage} \\
|
||||
py.test -v -rxs --cov=docker tests/ssh
|
||||
"""
|
||||
} finally {
|
||||
sh """
|
||||
docker stop ${dindContainerName}
|
||||
docker network rm ${testNetwork}
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
buildImages()
|
||||
|
||||
def dockerVersions = getDockerVersions()
|
||||
|
||||
def testMatrix = [failFast: false]
|
||||
|
||||
for (imgKey in new ArrayList(images.keySet())) {
|
||||
for (version in dockerVersions) {
|
||||
testMatrix["${imgKey}_${version}"] = runTests([testImage: images[imgKey], dockerVersion: version, pythonVersion: imgKey])
|
||||
}
|
||||
}
|
||||
|
||||
parallel(testMatrix)
|
26
MAINTAINERS
26
MAINTAINERS
|
@ -11,15 +11,19 @@
|
|||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"shin-",
|
||||
"glours",
|
||||
"milas",
|
||||
]
|
||||
[Org.Alumni]
|
||||
people = [
|
||||
"aiordache",
|
||||
"aanand",
|
||||
"bfirsh",
|
||||
"dnephin",
|
||||
"mnowster",
|
||||
"mpetazzoni",
|
||||
"shin-",
|
||||
"ulyssessouza",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
@ -35,6 +39,11 @@
|
|||
Email = "aanand@docker.com"
|
||||
GitHub = "aanand"
|
||||
|
||||
[people.aiordache]
|
||||
Name = "Anca Iordache"
|
||||
Email = "anca.iordache@docker.com"
|
||||
GitHub = "aiordache"
|
||||
|
||||
[people.bfirsh]
|
||||
Name = "Ben Firshman"
|
||||
Email = "b@fir.sh"
|
||||
|
@ -45,6 +54,16 @@
|
|||
Email = "dnephin@gmail.com"
|
||||
GitHub = "dnephin"
|
||||
|
||||
[people.glours]
|
||||
Name = "Guillaume Lours"
|
||||
Email = "705411+glours@users.noreply.github.com"
|
||||
GitHub = "glours"
|
||||
|
||||
[people.milas]
|
||||
Name = "Milas Bowman"
|
||||
Email = "devnull@milas.dev"
|
||||
GitHub = "milas"
|
||||
|
||||
[people.mnowster]
|
||||
Name = "Mazz Mosley"
|
||||
Email = "mazz@houseofmnowster.com"
|
||||
|
@ -59,3 +78,8 @@
|
|||
Name = "Joffrey F"
|
||||
Email = "joffrey@docker.com"
|
||||
GitHub = "shin-"
|
||||
|
||||
[people.ulyssessouza]
|
||||
Name = "Ulysses Domiciano Souza"
|
||||
Email = "ulysses.souza@docker.com"
|
||||
GitHub = "ulyssessouza"
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
include test-requirements.txt
|
||||
include requirements.txt
|
||||
include README.md
|
||||
include README.rst
|
||||
include LICENSE
|
||||
recursive-include tests *.py
|
||||
recursive-include tests/unit/testdata *
|
||||
recursive-include tests/integration/testdata *
|
||||
recursive-include tests/gpg-keys *
|
214
Makefile
214
Makefile
|
@ -1,51 +1,75 @@
|
|||
TEST_API_VERSION ?= 1.39
|
||||
TEST_ENGINE_VERSION ?= 19.03.13
|
||||
TEST_API_VERSION ?= 1.45
|
||||
TEST_ENGINE_VERSION ?= 26.1
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
PLATFORM := Windows
|
||||
else
|
||||
PLATFORM := $(shell sh -c 'uname -s 2>/dev/null || echo Unknown')
|
||||
endif
|
||||
|
||||
ifeq ($(PLATFORM),Linux)
|
||||
uid_args := "--build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g)"
|
||||
endif
|
||||
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER ?= $(shell git describe --match '[0-9]*' --dirty='.m' --always --tags 2>/dev/null | sed -r 's/-([0-9]+)/.dev\1/' | sed 's/-/+/')
|
||||
ifeq ($(SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER),)
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER = "0.0.0.dev0"
|
||||
endif
|
||||
|
||||
.PHONY: all
|
||||
all: test
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
-docker rm -f dpy-dind-py2 dpy-dind-py3 dpy-dind-certs dpy-dind-ssl
|
||||
-docker rm -f dpy-dind dpy-dind-certs dpy-dind-ssl
|
||||
find -name "__pycache__" | xargs rm -rf
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
docker build -t docker-sdk-python -f tests/Dockerfile --build-arg PYTHON_VERSION=2.7 --build-arg APT_MIRROR .
|
||||
|
||||
.PHONY: build-dind-ssh
|
||||
build-dind-ssh:
|
||||
docker build -t docker-dind-ssh -f tests/Dockerfile-ssh-dind --build-arg ENGINE_VERSION=${TEST_ENGINE_VERSION} --build-arg API_VERSION=${TEST_API_VERSION} --build-arg APT_MIRROR .
|
||||
docker build \
|
||||
--pull \
|
||||
-t docker-dind-ssh \
|
||||
-f tests/Dockerfile-ssh-dind \
|
||||
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
|
||||
--build-arg ENGINE_VERSION=${TEST_ENGINE_VERSION} \
|
||||
--build-arg API_VERSION=${TEST_API_VERSION} \
|
||||
.
|
||||
|
||||
.PHONY: build-py3
|
||||
build-py3:
|
||||
docker build -t docker-sdk-python3 -f tests/Dockerfile --build-arg APT_MIRROR .
|
||||
.PHONY: build
|
||||
build:
|
||||
docker build \
|
||||
--pull \
|
||||
-t docker-sdk-python3 \
|
||||
-f tests/Dockerfile \
|
||||
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
|
||||
.
|
||||
|
||||
.PHONY: build-docs
|
||||
build-docs:
|
||||
docker build -t docker-sdk-python-docs -f Dockerfile-docs --build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g) .
|
||||
docker build \
|
||||
-t docker-sdk-python-docs \
|
||||
-f Dockerfile-docs \
|
||||
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
|
||||
$(uid_args) \
|
||||
.
|
||||
|
||||
.PHONY: build-dind-certs
|
||||
build-dind-certs:
|
||||
docker build -t dpy-dind-certs -f tests/Dockerfile-dind-certs .
|
||||
docker build \
|
||||
-t dpy-dind-certs \
|
||||
-f tests/Dockerfile-dind-certs \
|
||||
--build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \
|
||||
.
|
||||
|
||||
.PHONY: test
|
||||
test: flake8 unit-test unit-test-py3 integration-dind integration-dind-ssl
|
||||
test: ruff unit-test integration-dind integration-dind-ssl
|
||||
|
||||
.PHONY: unit-test
|
||||
unit-test: build
|
||||
docker run -t --rm docker-sdk-python py.test tests/unit
|
||||
|
||||
.PHONY: unit-test-py3
|
||||
unit-test-py3: build-py3
|
||||
docker run -t --rm docker-sdk-python3 py.test tests/unit
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test: build
|
||||
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python py.test -v tests/integration/${file}
|
||||
|
||||
.PHONY: integration-test-py3
|
||||
integration-test-py3: build-py3
|
||||
docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file}
|
||||
|
||||
.PHONY: setup-network
|
||||
|
@ -53,70 +77,108 @@ setup-network:
|
|||
docker network inspect dpy-tests || docker network create dpy-tests
|
||||
|
||||
.PHONY: integration-dind
|
||||
integration-dind: integration-dind-py2 integration-dind-py3
|
||||
integration-dind: build setup-network
|
||||
docker rm -vf dpy-dind || :
|
||||
|
||||
.PHONY: integration-dind-py2
|
||||
integration-dind-py2: build setup-network
|
||||
docker rm -vf dpy-dind-py2 || :
|
||||
docker run -d --network dpy-tests --name dpy-dind-py2 --privileged\
|
||||
docker:${TEST_ENGINE_VERSION}-dind dockerd -H tcp://0.0.0.0:2375 --experimental
|
||||
docker run -t --rm --env="DOCKER_HOST=tcp://dpy-dind-py2:2375" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
|
||||
--network dpy-tests docker-sdk-python py.test tests/integration/${file}
|
||||
docker rm -vf dpy-dind-py2
|
||||
docker run \
|
||||
--detach \
|
||||
--name dpy-dind \
|
||||
--network dpy-tests \
|
||||
--pull=always \
|
||||
--privileged \
|
||||
docker:${TEST_ENGINE_VERSION}-dind \
|
||||
dockerd -H tcp://0.0.0.0:2375 --experimental
|
||||
|
||||
.PHONY: integration-dind-py3
|
||||
integration-dind-py3: build-py3 setup-network
|
||||
docker rm -vf dpy-dind-py3 || :
|
||||
docker run -d --network dpy-tests --name dpy-dind-py3 --privileged\
|
||||
docker:${TEST_ENGINE_VERSION}-dind dockerd -H tcp://0.0.0.0:2375 --experimental
|
||||
docker run -t --rm --env="DOCKER_HOST=tcp://dpy-dind-py3:2375" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
|
||||
--network dpy-tests docker-sdk-python3 py.test tests/integration/${file}
|
||||
docker rm -vf dpy-dind-py3
|
||||
# Wait for Docker-in-Docker to come to life
|
||||
docker run \
|
||||
--network dpy-tests \
|
||||
--rm \
|
||||
--tty \
|
||||
busybox \
|
||||
sh -c 'while ! nc -z dpy-dind 2375; do sleep 1; done'
|
||||
|
||||
.PHONY: integration-ssh-py2
|
||||
integration-ssh-py2: build-dind-ssh build setup-network
|
||||
docker rm -vf dpy-dind-py2 || :
|
||||
docker run -d --network dpy-tests --name dpy-dind-py2 --privileged\
|
||||
docker run \
|
||||
--env="DOCKER_HOST=tcp://dpy-dind:2375" \
|
||||
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
|
||||
--network dpy-tests \
|
||||
--rm \
|
||||
--tty \
|
||||
docker-sdk-python3 \
|
||||
py.test tests/integration/${file}
|
||||
|
||||
docker rm -vf dpy-dind
|
||||
|
||||
|
||||
.PHONY: integration-dind-ssh
|
||||
integration-dind-ssh: build-dind-ssh build setup-network
|
||||
docker rm -vf dpy-dind-ssh || :
|
||||
docker run -d --network dpy-tests --name dpy-dind-ssh --privileged \
|
||||
docker-dind-ssh dockerd --experimental
|
||||
# start SSH daemon
|
||||
docker exec dpy-dind-py2 sh -c "/usr/sbin/sshd"
|
||||
docker run -t --rm --env="DOCKER_HOST=ssh://dpy-dind-py2" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
|
||||
--network dpy-tests docker-sdk-python py.test tests/ssh/${file}
|
||||
docker rm -vf dpy-dind-py2
|
||||
|
||||
.PHONY: integration-ssh-py3
|
||||
integration-ssh-py3: build-dind-ssh build-py3 setup-network
|
||||
docker rm -vf dpy-dind-py3 || :
|
||||
docker run -d --network dpy-tests --name dpy-dind-py3 --privileged\
|
||||
docker-dind-ssh dockerd --experimental
|
||||
# start SSH daemon
|
||||
docker exec dpy-dind-py3 sh -c "/usr/sbin/sshd"
|
||||
docker run -t --rm --env="DOCKER_HOST=ssh://dpy-dind-py3" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
|
||||
--network dpy-tests docker-sdk-python3 py.test tests/ssh/${file}
|
||||
docker rm -vf dpy-dind-py3
|
||||
# start SSH daemon for known key
|
||||
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/known_ed25519 -p 22"
|
||||
docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/unknown_ed25519 -p 2222"
|
||||
docker run \
|
||||
--tty \
|
||||
--rm \
|
||||
--env="DOCKER_HOST=ssh://dpy-dind-ssh" \
|
||||
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
|
||||
--env="UNKNOWN_DOCKER_SSH_HOST=ssh://dpy-dind-ssh:2222" \
|
||||
--network dpy-tests \
|
||||
docker-sdk-python3 py.test tests/ssh/${file}
|
||||
docker rm -vf dpy-dind-ssh
|
||||
|
||||
|
||||
.PHONY: integration-dind-ssl
|
||||
integration-dind-ssl: build-dind-certs build build-py3
|
||||
integration-dind-ssl: build-dind-certs build setup-network
|
||||
docker rm -vf dpy-dind-certs dpy-dind-ssl || :
|
||||
docker run -d --name dpy-dind-certs dpy-dind-certs
|
||||
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\
|
||||
--env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl\
|
||||
--network dpy-tests --network-alias docker -v /tmp --privileged\
|
||||
docker:${TEST_ENGINE_VERSION}-dind\
|
||||
dockerd --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\
|
||||
--tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375 --experimental
|
||||
docker run -t --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
|
||||
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
|
||||
--network dpy-tests docker-sdk-python py.test tests/integration/${file}
|
||||
docker run -t --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
|
||||
--env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}"\
|
||||
--network dpy-tests docker-sdk-python3 py.test tests/integration/${file}
|
||||
|
||||
docker run \
|
||||
--detach \
|
||||
--env="DOCKER_CERT_PATH=/certs" \
|
||||
--env="DOCKER_HOST=tcp://localhost:2375" \
|
||||
--env="DOCKER_TLS_VERIFY=1" \
|
||||
--name dpy-dind-ssl \
|
||||
--network dpy-tests \
|
||||
--network-alias docker \
|
||||
--pull=always \
|
||||
--privileged \
|
||||
--volume /tmp \
|
||||
--volumes-from dpy-dind-certs \
|
||||
docker:${TEST_ENGINE_VERSION}-dind \
|
||||
dockerd \
|
||||
--tlsverify \
|
||||
--tlscacert=/certs/ca.pem \
|
||||
--tlscert=/certs/server-cert.pem \
|
||||
--tlskey=/certs/server-key.pem \
|
||||
-H tcp://0.0.0.0:2375 \
|
||||
--experimental
|
||||
|
||||
# Wait for Docker-in-Docker to come to life
|
||||
docker run \
|
||||
--network dpy-tests \
|
||||
--rm \
|
||||
--tty \
|
||||
busybox \
|
||||
sh -c 'while ! nc -z dpy-dind-ssl 2375; do sleep 1; done'
|
||||
|
||||
docker run \
|
||||
--env="DOCKER_CERT_PATH=/certs" \
|
||||
--env="DOCKER_HOST=tcp://docker:2375" \
|
||||
--env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \
|
||||
--env="DOCKER_TLS_VERIFY=1" \
|
||||
--network dpy-tests \
|
||||
--rm \
|
||||
--volumes-from dpy-dind-ssl \
|
||||
--tty \
|
||||
docker-sdk-python3 \
|
||||
py.test tests/integration/${file}
|
||||
|
||||
docker rm -vf dpy-dind-ssl dpy-dind-certs
|
||||
|
||||
.PHONY: flake8
|
||||
flake8: build
|
||||
docker run -t --rm docker-sdk-python flake8 docker tests
|
||||
.PHONY: ruff
|
||||
ruff: build
|
||||
docker run -t --rm docker-sdk-python3 ruff docker tests
|
||||
|
||||
.PHONY: docs
|
||||
docs: build-docs
|
||||
|
@ -124,4 +186,4 @@ docs: build-docs
|
|||
|
||||
.PHONY: shell
|
||||
shell: build
|
||||
docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python python
|
||||
docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 python
|
||||
|
|
11
README.md
11
README.md
|
@ -1,18 +1,17 @@
|
|||
# Docker SDK for Python
|
||||
|
||||
[](https://travis-ci.org/docker/docker-py)
|
||||
[](https://github.com/docker/docker-py/actions/workflows/ci.yml)
|
||||
|
||||
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
|
||||
|
||||
## Installation
|
||||
|
||||
The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
|
||||
The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip:
|
||||
|
||||
pip install docker
|
||||
|
||||
If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
|
||||
|
||||
pip install docker[tls]
|
||||
> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support.
|
||||
> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
|
||||
|
||||
## Usage
|
||||
|
||||
|
@ -58,7 +57,7 @@ You can stream logs:
|
|||
|
||||
```python
|
||||
>>> for line in container.logs(stream=True):
|
||||
... print line.strip()
|
||||
... print(line.strip())
|
||||
Reticulating spline 2...
|
||||
Reticulating spline 3...
|
||||
...
|
||||
|
|
13
appveyor.yml
13
appveyor.yml
|
@ -1,13 +0,0 @@
|
|||
version: '{branch}-{build}'
|
||||
|
||||
install:
|
||||
- "SET PATH=C:\\Python37-x64;C:\\Python37-x64\\Scripts;%PATH%"
|
||||
- "python --version"
|
||||
- "python -m pip install --upgrade pip"
|
||||
- "pip install tox==2.9.1"
|
||||
|
||||
# Build the binary after tests
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- "tox"
|
|
@ -1,10 +1,7 @@
|
|||
# flake8: noqa
|
||||
from .api import APIClient
|
||||
from .client import DockerClient, from_env
|
||||
from .context import Context
|
||||
from .context import ContextAPI
|
||||
from .context import Context, ContextAPI
|
||||
from .tls import TLSConfig
|
||||
from .version import version, version_info
|
||||
from .version import __version__
|
||||
|
||||
__version__ = version
|
||||
__title__ = 'docker'
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
# flake8: noqa
|
||||
from .client import APIClient
|
||||
|
|
|
@ -3,16 +3,12 @@ import logging
|
|||
import os
|
||||
import random
|
||||
|
||||
from .. import auth
|
||||
from .. import constants
|
||||
from .. import errors
|
||||
from .. import utils
|
||||
|
||||
from .. import auth, constants, errors, utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BuildApiMixin(object):
|
||||
class BuildApiMixin:
|
||||
def build(self, path=None, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, timeout=None,
|
||||
custom_context=False, encoding=None, pull=False,
|
||||
|
@ -76,6 +72,7 @@ class BuildApiMixin(object):
|
|||
forcerm (bool): Always remove intermediate containers, even after
|
||||
unsuccessful builds
|
||||
dockerfile (str): path within the build context to the Dockerfile
|
||||
gzip (bool): If set to ``True``, gzip compression/encoding is used
|
||||
buildargs (dict): A dictionary of build arguments
|
||||
container_limits (dict): A dictionary of limits applied to each
|
||||
container created by the build process. Valid keys:
|
||||
|
@ -128,13 +125,16 @@ class BuildApiMixin(object):
|
|||
raise errors.DockerException(
|
||||
'Can not use custom encoding if gzip is enabled'
|
||||
)
|
||||
|
||||
if tag is not None:
|
||||
if not utils.match_tag(tag):
|
||||
raise errors.DockerException(
|
||||
f"invalid tag '{tag}': invalid reference format"
|
||||
)
|
||||
for key in container_limits.keys():
|
||||
if key not in constants.CONTAINER_LIMITS_KEYS:
|
||||
raise errors.DockerException(
|
||||
'Invalid container_limits key {0}'.format(key)
|
||||
f"invalid tag '{tag}': invalid reference format"
|
||||
)
|
||||
|
||||
if custom_context:
|
||||
if not fileobj:
|
||||
raise TypeError("You must specify fileobj with custom_context")
|
||||
|
@ -150,10 +150,10 @@ class BuildApiMixin(object):
|
|||
dockerignore = os.path.join(path, '.dockerignore')
|
||||
exclude = None
|
||||
if os.path.exists(dockerignore):
|
||||
with open(dockerignore, 'r') as f:
|
||||
with open(dockerignore) as f:
|
||||
exclude = list(filter(
|
||||
lambda x: x != '' and x[0] != '#',
|
||||
[l.strip() for l in f.read().splitlines()]
|
||||
[line.strip() for line in f.read().splitlines()]
|
||||
))
|
||||
dockerfile = process_dockerfile(dockerfile, path)
|
||||
context = utils.tar(
|
||||
|
@ -275,10 +275,24 @@ class BuildApiMixin(object):
|
|||
return self._stream_helper(response, decode=decode)
|
||||
|
||||
@utils.minimum_version('1.31')
|
||||
def prune_builds(self):
|
||||
def prune_builds(self, filters=None, keep_storage=None, all=None):
|
||||
"""
|
||||
Delete the builder cache
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the prune list.
|
||||
Needs Docker API v1.39+
|
||||
Available filters:
|
||||
- dangling (bool): When set to true (or 1), prune only
|
||||
unused and untagged images.
|
||||
- until (str): Can be Unix timestamps, date formatted
|
||||
timestamps, or Go duration strings (e.g. 10m, 1h30m) computed
|
||||
relative to the daemon's local time.
|
||||
keep_storage (int): Amount of disk space in bytes to keep for cache.
|
||||
Needs Docker API v1.39+
|
||||
all (bool): Remove all types of build cache.
|
||||
Needs Docker API v1.39+
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary containing information about the operation's
|
||||
result. The ``SpaceReclaimed`` key indicates the amount of
|
||||
|
@ -289,7 +303,20 @@ class BuildApiMixin(object):
|
|||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/build/prune")
|
||||
return self._result(self._post(url), True)
|
||||
if (filters, keep_storage, all) != (None, None, None) \
|
||||
and utils.version_lt(self._version, '1.39'):
|
||||
raise errors.InvalidVersion(
|
||||
'`filters`, `keep_storage`, and `all` args are only available '
|
||||
'for API version > 1.38'
|
||||
)
|
||||
params = {}
|
||||
if filters is not None:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
if keep_storage is not None:
|
||||
params['keep-storage'] = keep_storage
|
||||
if all is not None:
|
||||
params['all'] = all
|
||||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
def _set_auth_headers(self, headers):
|
||||
log.debug('Looking for auth config')
|
||||
|
@ -313,9 +340,8 @@ class BuildApiMixin(object):
|
|||
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
|
||||
|
||||
log.debug(
|
||||
'Sending auth config ({0})'.format(
|
||||
', '.join(repr(k) for k in auth_data.keys())
|
||||
)
|
||||
"Sending auth config (%s)",
|
||||
', '.join(repr(k) for k in auth_data),
|
||||
)
|
||||
|
||||
if auth_data:
|
||||
|
@ -335,18 +361,15 @@ def process_dockerfile(dockerfile, path):
|
|||
abs_dockerfile = os.path.join(path, dockerfile)
|
||||
if constants.IS_WINDOWS_PLATFORM and path.startswith(
|
||||
constants.WINDOWS_LONGPATH_PREFIX):
|
||||
abs_dockerfile = '{}{}'.format(
|
||||
constants.WINDOWS_LONGPATH_PREFIX,
|
||||
os.path.normpath(
|
||||
abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):]
|
||||
)
|
||||
)
|
||||
normpath = os.path.normpath(
|
||||
abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):])
|
||||
abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}'
|
||||
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
|
||||
os.path.relpath(abs_dockerfile, path).startswith('..')):
|
||||
# Dockerfile not in context - read data to insert into tar later
|
||||
with open(abs_dockerfile, 'r') as df:
|
||||
with open(abs_dockerfile) as df:
|
||||
return (
|
||||
'.dockerfile.{0:x}'.format(random.getrandbits(160)),
|
||||
f'.dockerfile.{random.getrandbits(160):x}',
|
||||
df.read()
|
||||
)
|
||||
|
||||
|
|
|
@ -1,21 +1,31 @@
|
|||
import json
|
||||
import struct
|
||||
import urllib
|
||||
from functools import partial
|
||||
|
||||
import requests
|
||||
import requests.adapters
|
||||
import requests.exceptions
|
||||
import six
|
||||
import websocket
|
||||
|
||||
from .. import auth
|
||||
from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
|
||||
DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
|
||||
MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES)
|
||||
from ..errors import (DockerException, InvalidVersion, TLSParameterError,
|
||||
create_api_error_from_http_exception)
|
||||
from ..constants import (
|
||||
DEFAULT_MAX_POOL_SIZE,
|
||||
DEFAULT_NUM_POOLS,
|
||||
DEFAULT_NUM_POOLS_SSH,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_USER_AGENT,
|
||||
IS_WINDOWS_PLATFORM,
|
||||
MINIMUM_DOCKER_API_VERSION,
|
||||
STREAM_HEADER_SIZE_BYTES,
|
||||
)
|
||||
from ..errors import (
|
||||
DockerException,
|
||||
InvalidVersion,
|
||||
TLSParameterError,
|
||||
create_api_error_from_http_exception,
|
||||
)
|
||||
from ..tls import TLSConfig
|
||||
from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
|
||||
from ..transport import UnixHTTPAdapter
|
||||
from ..utils import check_resource, config, update_headers, utils
|
||||
from ..utils.json_stream import json_stream
|
||||
from ..utils.proxy import ProxyConfig
|
||||
|
@ -107,7 +117,7 @@ class APIClient(
|
|||
user_agent=DEFAULT_USER_AGENT, num_pools=None,
|
||||
credstore_env=None, use_ssh_client=False,
|
||||
max_pool_size=DEFAULT_MAX_POOL_SIZE):
|
||||
super(APIClient, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
if tls and not base_url:
|
||||
raise TLSParameterError(
|
||||
|
@ -160,10 +170,10 @@ class APIClient(
|
|||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
except NameError:
|
||||
except NameError as err:
|
||||
raise DockerException(
|
||||
'Install pypiwin32 package to enable npipe:// support'
|
||||
)
|
||||
) from err
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self.base_url = 'http+docker://localnpipe'
|
||||
elif base_url.startswith('ssh://'):
|
||||
|
@ -172,10 +182,10 @@ class APIClient(
|
|||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size, shell_out=use_ssh_client
|
||||
)
|
||||
except NameError:
|
||||
except NameError as err:
|
||||
raise DockerException(
|
||||
'Install paramiko package to enable ssh:// support'
|
||||
)
|
||||
) from err
|
||||
self.mount('http+docker://ssh', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://ssh'
|
||||
|
@ -184,7 +194,7 @@ class APIClient(
|
|||
if isinstance(tls, TLSConfig):
|
||||
tls.configure_client(self)
|
||||
elif tls:
|
||||
self._custom_adapter = SSLHTTPAdapter(
|
||||
self._custom_adapter = requests.adapters.HTTPAdapter(
|
||||
pool_connections=num_pools)
|
||||
self.mount('https://', self._custom_adapter)
|
||||
self.base_url = base_url
|
||||
|
@ -192,35 +202,34 @@ class APIClient(
|
|||
# version detection needs to be after unix adapter mounting
|
||||
if version is None or (isinstance(
|
||||
version,
|
||||
six.string_types
|
||||
str
|
||||
) and version.lower() == 'auto'):
|
||||
self._version = self._retrieve_server_version()
|
||||
else:
|
||||
self._version = version
|
||||
if not isinstance(self._version, six.string_types):
|
||||
if not isinstance(self._version, str):
|
||||
raise DockerException(
|
||||
'Version parameter must be a string or None. Found {0}'.format(
|
||||
type(version).__name__
|
||||
)
|
||||
'Version parameter must be a string or None. '
|
||||
f'Found {type(version).__name__}'
|
||||
)
|
||||
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
|
||||
raise InvalidVersion(
|
||||
'API versions below {} are no longer supported by this '
|
||||
'library.'.format(MINIMUM_DOCKER_API_VERSION)
|
||||
f'API versions below {MINIMUM_DOCKER_API_VERSION} are '
|
||||
f'no longer supported by this library.'
|
||||
)
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
return self.version(api_version=False)["ApiVersion"]
|
||||
except KeyError:
|
||||
except KeyError as ke:
|
||||
raise DockerException(
|
||||
'Invalid response from docker daemon: key "ApiVersion"'
|
||||
' is missing.'
|
||||
)
|
||||
) from ke
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
'Error while fetching server API version: {0}'.format(e)
|
||||
)
|
||||
f'Error while fetching server API version: {e}'
|
||||
) from e
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
|
@ -246,28 +255,26 @@ class APIClient(
|
|||
|
||||
def _url(self, pathfmt, *args, **kwargs):
|
||||
for arg in args:
|
||||
if not isinstance(arg, six.string_types):
|
||||
if not isinstance(arg, str):
|
||||
raise ValueError(
|
||||
'Expected a string but found {0} ({1}) '
|
||||
'instead'.format(arg, type(arg))
|
||||
f'Expected a string but found {arg} ({type(arg)}) instead'
|
||||
)
|
||||
|
||||
quote_f = partial(six.moves.urllib.parse.quote, safe="/:")
|
||||
quote_f = partial(urllib.parse.quote, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
formatted_path = pathfmt.format(*args)
|
||||
if kwargs.get('versioned_api', True):
|
||||
return '{0}/v{1}{2}'.format(
|
||||
self.base_url, self._version, pathfmt.format(*args)
|
||||
)
|
||||
return f'{self.base_url}/v{self._version}{formatted_path}'
|
||||
else:
|
||||
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
|
||||
return f'{self.base_url}{formatted_path}'
|
||||
|
||||
def _raise_for_status(self, response):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
raise create_api_error_from_http_exception(e)
|
||||
raise create_api_error_from_http_exception(e) from e
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
|
@ -284,7 +291,7 @@ class APIClient(
|
|||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None and isinstance(data, dict):
|
||||
for k, v in six.iteritems(data):
|
||||
for k, v in iter(data.items()):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
elif data is not None:
|
||||
|
@ -312,7 +319,16 @@ class APIClient(
|
|||
return self._create_websocket_connection(full_url)
|
||||
|
||||
def _create_websocket_connection(self, url):
|
||||
return websocket.create_connection(url)
|
||||
try:
|
||||
import websocket
|
||||
return websocket.create_connection(url)
|
||||
except ImportError as ie:
|
||||
raise DockerException(
|
||||
'The `websocket-client` library is required '
|
||||
'for using websocket connections. '
|
||||
'You can install the `docker` library '
|
||||
'with the [websocket] extra to install it.'
|
||||
) from ie
|
||||
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
|
@ -320,12 +336,10 @@ class APIClient(
|
|||
sock = response.raw._fp.fp.raw.sock
|
||||
elif self.base_url.startswith('http+docker://ssh'):
|
||||
sock = response.raw._fp.fp.channel
|
||||
elif six.PY3:
|
||||
else:
|
||||
sock = response.raw._fp.fp.raw
|
||||
if self.base_url.startswith("https://"):
|
||||
sock = sock._sock
|
||||
else:
|
||||
sock = response.raw._fp.fp._sock
|
||||
try:
|
||||
# Keep a reference to the response to stop it being garbage
|
||||
# collected. If the response is garbage collected, it will
|
||||
|
@ -343,8 +357,7 @@ class APIClient(
|
|||
|
||||
if response.raw._fp.chunked:
|
||||
if decode:
|
||||
for chunk in json_stream(self._stream_helper(response, False)):
|
||||
yield chunk
|
||||
yield from json_stream(self._stream_helper(response, False))
|
||||
else:
|
||||
reader = response.raw
|
||||
while not reader.closed:
|
||||
|
@ -400,10 +413,19 @@ class APIClient(
|
|||
def _stream_raw_result(self, response, chunk_size=1, decode=True):
|
||||
''' Stream result for TTY-enabled container and raw binary data'''
|
||||
self._raise_for_status(response)
|
||||
for out in response.iter_content(chunk_size, decode):
|
||||
yield out
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
yield from response.iter_content(chunk_size, decode)
|
||||
|
||||
def _read_from_socket(self, response, stream, tty=True, demux=False):
|
||||
"""Consume all data from the socket, close the response and return the
|
||||
data. If stream=True, then a generator is returned instead and the
|
||||
caller is responsible for closing the response.
|
||||
"""
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
gen = frames_iter(socket, tty)
|
||||
|
@ -418,8 +440,11 @@ class APIClient(
|
|||
if stream:
|
||||
return gen
|
||||
else:
|
||||
# Wait for all the frames, concatenate them, and return the result
|
||||
return consume_socket_output(gen, demux=demux)
|
||||
try:
|
||||
# Wait for all frames, concatenate them, and return the result
|
||||
return consume_socket_output(gen, demux=demux)
|
||||
finally:
|
||||
response.close()
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we're
|
||||
|
@ -465,12 +490,12 @@ class APIClient(
|
|||
self._result(res, binary=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
sep = six.binary_type()
|
||||
sep = b''
|
||||
if stream:
|
||||
return self._multiplexed_response_stream_helper(res)
|
||||
else:
|
||||
return sep.join(
|
||||
[x for x in self._multiplexed_buffer_helper(res)]
|
||||
list(self._multiplexed_buffer_helper(res))
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
|
@ -479,7 +504,7 @@ class APIClient(
|
|||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super(APIClient, self).get_adapter(url)
|
||||
return super().get_adapter(url)
|
||||
except requests.exceptions.InvalidSchema as e:
|
||||
if self._custom_adapter:
|
||||
return self._custom_adapter
|
||||
|
@ -497,7 +522,7 @@ class APIClient(
|
|||
Args:
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise``$HOME/.dockercfg``)
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
import base64
|
||||
|
||||
import six
|
||||
|
||||
from .. import utils
|
||||
|
||||
|
||||
class ConfigApiMixin(object):
|
||||
class ConfigApiMixin:
|
||||
@utils.minimum_version('1.30')
|
||||
def create_config(self, name, data, labels=None):
|
||||
def create_config(self, name, data, labels=None, templating=None):
|
||||
"""
|
||||
Create a config
|
||||
|
||||
|
@ -15,6 +13,9 @@ class ConfigApiMixin(object):
|
|||
name (string): Name of the config
|
||||
data (bytes): Config data to be stored
|
||||
labels (dict): A mapping of labels to assign to the config
|
||||
templating (dict): dictionary containing the name of the
|
||||
templating driver to be used expressed as
|
||||
{ name: <templating_driver_name>}
|
||||
|
||||
Returns (dict): ID of the newly created config
|
||||
"""
|
||||
|
@ -22,12 +23,12 @@ class ConfigApiMixin(object):
|
|||
data = data.encode('utf-8')
|
||||
|
||||
data = base64.b64encode(data)
|
||||
if six.PY3:
|
||||
data = data.decode('ascii')
|
||||
data = data.decode('ascii')
|
||||
body = {
|
||||
'Data': data,
|
||||
'Name': name,
|
||||
'Labels': labels
|
||||
'Labels': labels,
|
||||
'Templating': templating
|
||||
}
|
||||
|
||||
url = self._url('/configs/create')
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
from datetime import datetime
|
||||
|
||||
import six
|
||||
|
||||
from .. import errors
|
||||
from .. import utils
|
||||
from .. import errors, utils
|
||||
from ..constants import DEFAULT_DATA_CHUNK_SIZE
|
||||
from ..types import CancellableStream
|
||||
from ..types import ContainerConfig
|
||||
from ..types import EndpointConfig
|
||||
from ..types import HostConfig
|
||||
from ..types import NetworkingConfig
|
||||
from ..types import (
|
||||
CancellableStream,
|
||||
ContainerConfig,
|
||||
EndpointConfig,
|
||||
HostConfig,
|
||||
NetworkingConfig,
|
||||
)
|
||||
|
||||
|
||||
class ContainerApiMixin(object):
|
||||
class ContainerApiMixin:
|
||||
@utils.check_resource('container')
|
||||
def attach(self, container, stdout=True, stderr=True,
|
||||
stream=False, logs=False, demux=False):
|
||||
|
@ -114,7 +113,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
@utils.check_resource('container')
|
||||
def commit(self, container, repository=None, tag=None, message=None,
|
||||
author=None, changes=None, conf=None):
|
||||
author=None, pause=True, changes=None, conf=None):
|
||||
"""
|
||||
Commit a container to an image. Similar to the ``docker commit``
|
||||
command.
|
||||
|
@ -125,6 +124,7 @@ class ContainerApiMixin(object):
|
|||
tag (str): The tag to push
|
||||
message (str): A commit message
|
||||
author (str): The name of the author
|
||||
pause (bool): Whether to pause the container before committing
|
||||
changes (str): Dockerfile instructions to apply while committing
|
||||
conf (dict): The configuration for the container. See the
|
||||
`Engine API documentation
|
||||
|
@ -141,6 +141,7 @@ class ContainerApiMixin(object):
|
|||
'tag': tag,
|
||||
'comment': message,
|
||||
'author': author,
|
||||
'pause': pause,
|
||||
'changes': changes
|
||||
}
|
||||
u = self._url("/commit")
|
||||
|
@ -225,7 +226,7 @@ class ContainerApiMixin(object):
|
|||
mac_address=None, labels=None, stop_signal=None,
|
||||
networking_config=None, healthcheck=None,
|
||||
stop_timeout=None, runtime=None,
|
||||
use_config_proxy=True):
|
||||
use_config_proxy=True, platform=None):
|
||||
"""
|
||||
Creates a container. Parameters are similar to those for the ``docker
|
||||
run`` command except it doesn't support the attach options (``-a``).
|
||||
|
@ -244,9 +245,9 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
container_id = client.api.create_container(
|
||||
'busybox', 'ls', ports=[1111, 2222],
|
||||
host_config=cli.create_host_config(port_bindings={
|
||||
host_config=client.api.create_host_config(port_bindings={
|
||||
1111: 4567,
|
||||
2222: None
|
||||
})
|
||||
|
@ -258,22 +259,24 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
|
||||
client.api.create_host_config(
|
||||
port_bindings={1111: ('127.0.0.1', 4567)}
|
||||
)
|
||||
|
||||
Or without host port assignment:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
|
||||
client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
|
||||
|
||||
If you wish to use UDP instead of TCP (default), you need to declare
|
||||
ports as such in both the config and host config:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
container_id = client.api.create_container(
|
||||
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
|
||||
host_config=cli.create_host_config(port_bindings={
|
||||
host_config=client.api.create_host_config(port_bindings={
|
||||
'1111/udp': 4567, 2222: None
|
||||
})
|
||||
)
|
||||
|
@ -283,7 +286,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={
|
||||
client.api.create_host_config(port_bindings={
|
||||
1111: [1234, 4567]
|
||||
})
|
||||
|
||||
|
@ -291,7 +294,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
cli.create_host_config(port_bindings={
|
||||
client.api.create_host_config(port_bindings={
|
||||
1111: [
|
||||
('192.168.0.100', 1234),
|
||||
('192.168.0.101', 1234)
|
||||
|
@ -307,9 +310,9 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
container_id = client.api.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
|
||||
host_config=cli.create_host_config(binds={
|
||||
host_config=client.api.create_host_config(binds={
|
||||
'/home/user1/': {
|
||||
'bind': '/mnt/vol2',
|
||||
'mode': 'rw',
|
||||
|
@ -317,6 +320,11 @@ class ContainerApiMixin(object):
|
|||
'/var/www': {
|
||||
'bind': '/mnt/vol1',
|
||||
'mode': 'ro',
|
||||
},
|
||||
'/autofs/user1': {
|
||||
'bind': '/mnt/vol3',
|
||||
'mode': 'rw',
|
||||
'propagation': 'shared'
|
||||
}
|
||||
})
|
||||
)
|
||||
|
@ -326,11 +334,12 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
container_id = cli.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
|
||||
host_config=cli.create_host_config(binds=[
|
||||
container_id = client.api.create_container(
|
||||
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2', '/mnt/vol3'],
|
||||
host_config=client.api.create_host_config(binds=[
|
||||
'/home/user1/:/mnt/vol2',
|
||||
'/var/www:/mnt/vol1:ro',
|
||||
'/autofs/user1:/mnt/vol3:rw,shared',
|
||||
])
|
||||
)
|
||||
|
||||
|
@ -346,15 +355,15 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
networking_config = docker_client.create_networking_config({
|
||||
'network1': docker_client.create_endpoint_config(
|
||||
networking_config = client.api.create_networking_config({
|
||||
'network1': client.api.create_endpoint_config(
|
||||
ipv4_address='172.28.0.124',
|
||||
aliases=['foo', 'bar'],
|
||||
links=['container2']
|
||||
)
|
||||
})
|
||||
|
||||
ctnr = docker_client.create_container(
|
||||
ctnr = client.api.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
|
||||
|
@ -398,6 +407,7 @@ class ContainerApiMixin(object):
|
|||
configuration file (``~/.docker/config.json`` by default)
|
||||
contains a proxy configuration, the corresponding environment
|
||||
variables will be set in the container being created.
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``.
|
||||
|
||||
Returns:
|
||||
A dictionary with an image 'Id' key and a 'Warnings' key.
|
||||
|
@ -408,7 +418,7 @@ class ContainerApiMixin(object):
|
|||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(volumes, six.string_types):
|
||||
if isinstance(volumes, str):
|
||||
volumes = [volumes, ]
|
||||
|
||||
if isinstance(environment, dict):
|
||||
|
@ -427,16 +437,22 @@ class ContainerApiMixin(object):
|
|||
stop_signal, networking_config, healthcheck,
|
||||
stop_timeout, runtime
|
||||
)
|
||||
return self.create_container_from_config(config, name)
|
||||
return self.create_container_from_config(config, name, platform)
|
||||
|
||||
def create_container_config(self, *args, **kwargs):
|
||||
return ContainerConfig(self._version, *args, **kwargs)
|
||||
|
||||
def create_container_from_config(self, config, name=None):
|
||||
def create_container_from_config(self, config, name=None, platform=None):
|
||||
u = self._url("/containers/create")
|
||||
params = {
|
||||
'name': name
|
||||
}
|
||||
if platform:
|
||||
if utils.version_lt(self._version, '1.41'):
|
||||
raise errors.InvalidVersion(
|
||||
'platform is not supported for API version < 1.41'
|
||||
)
|
||||
params['platform'] = platform
|
||||
res = self._post_json(u, data=config, params=params)
|
||||
return self._result(res, True)
|
||||
|
||||
|
@ -581,10 +597,13 @@ class ContainerApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
|
||||
volumes_from=['nostalgic_newton'])
|
||||
>>> client.api.create_host_config(
|
||||
... privileged=True,
|
||||
... cap_drop=['MKNOD'],
|
||||
... volumes_from=['nostalgic_newton'],
|
||||
... )
|
||||
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
|
||||
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
|
||||
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
|
||||
|
||||
"""
|
||||
if not kwargs:
|
||||
|
@ -612,11 +631,11 @@ class ContainerApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> docker_client.create_network('network1')
|
||||
>>> networking_config = docker_client.create_networking_config({
|
||||
'network1': docker_client.create_endpoint_config()
|
||||
>>> client.api.create_network('network1')
|
||||
>>> networking_config = client.api.create_networking_config({
|
||||
'network1': client.api.create_endpoint_config()
|
||||
})
|
||||
>>> container = docker_client.create_container(
|
||||
>>> container = client.api.create_container(
|
||||
img, command, networking_config=networking_config
|
||||
)
|
||||
|
||||
|
@ -650,7 +669,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> endpoint_config = client.create_endpoint_config(
|
||||
>>> endpoint_config = client.api.create_endpoint_config(
|
||||
aliases=['web', 'app'],
|
||||
links={'app_db': 'db', 'another': None},
|
||||
ipv4_address='132.65.0.123'
|
||||
|
@ -668,7 +687,8 @@ class ContainerApiMixin(object):
|
|||
container (str): The container to diff
|
||||
|
||||
Returns:
|
||||
(str)
|
||||
(list) A list of dictionaries containing the attributes `Path`
|
||||
and `Kind`.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -729,7 +749,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
>>> c = docker.APIClient()
|
||||
>>> f = open('./sh_bin.tar', 'wb')
|
||||
>>> bits, stat = c.get_archive(container, '/bin/sh')
|
||||
>>> bits, stat = c.api.get_archive(container, '/bin/sh')
|
||||
>>> print(stat)
|
||||
{'name': 'sh', 'size': 1075464, 'mode': 493,
|
||||
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
|
||||
|
@ -790,7 +810,7 @@ class ContainerApiMixin(object):
|
|||
url = self._url("/containers/{0}/kill", container)
|
||||
params = {}
|
||||
if signal is not None:
|
||||
if not isinstance(signal, six.string_types):
|
||||
if not isinstance(signal, str):
|
||||
signal = int(signal)
|
||||
params['signal'] = signal
|
||||
res = self._post(url, params=params)
|
||||
|
@ -816,14 +836,15 @@ class ContainerApiMixin(object):
|
|||
tail (str or int): Output specified number of lines at the end of
|
||||
logs. Either an integer of number of lines or the string
|
||||
``all``. Default ``all``
|
||||
since (datetime or int): Show logs since a given datetime or
|
||||
integer epoch (in seconds)
|
||||
since (datetime, int, or float): Show logs since a given datetime,
|
||||
integer epoch (in seconds) or float (in fractional seconds)
|
||||
follow (bool): Follow log output. Default ``False``
|
||||
until (datetime or int): Show logs that occurred before the given
|
||||
datetime or integer epoch (in seconds)
|
||||
until (datetime, int, or float): Show logs that occurred before
|
||||
the given datetime, integer epoch (in seconds), or
|
||||
float (in fractional seconds)
|
||||
|
||||
Returns:
|
||||
(generator or str)
|
||||
(generator of bytes or bytes)
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -845,10 +866,12 @@ class ContainerApiMixin(object):
|
|||
params['since'] = utils.datetime_to_timestamp(since)
|
||||
elif (isinstance(since, int) and since > 0):
|
||||
params['since'] = since
|
||||
elif (isinstance(since, float) and since > 0.0):
|
||||
params['since'] = since
|
||||
else:
|
||||
raise errors.InvalidArgument(
|
||||
'since value should be datetime or positive int, '
|
||||
'not {}'.format(type(since))
|
||||
'since value should be datetime or positive int/float,'
|
||||
f' not {type(since)}'
|
||||
)
|
||||
|
||||
if until is not None:
|
||||
|
@ -860,10 +883,12 @@ class ContainerApiMixin(object):
|
|||
params['until'] = utils.datetime_to_timestamp(until)
|
||||
elif (isinstance(until, int) and until > 0):
|
||||
params['until'] = until
|
||||
elif (isinstance(until, float) and until > 0.0):
|
||||
params['until'] = until
|
||||
else:
|
||||
raise errors.InvalidArgument(
|
||||
'until value should be datetime or positive int, '
|
||||
'not {}'.format(type(until))
|
||||
f'until value should be datetime or positive int/float, '
|
||||
f'not {type(until)}'
|
||||
)
|
||||
|
||||
url = self._url("/containers/{0}/logs", container)
|
||||
|
@ -916,7 +941,7 @@ class ContainerApiMixin(object):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
>>> cli.port('7174d6347063', 80)
|
||||
>>> client.api.port('7174d6347063', 80)
|
||||
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
|
||||
"""
|
||||
res = self._get(self._url("/containers/{0}/json", container))
|
||||
|
@ -935,7 +960,7 @@ class ContainerApiMixin(object):
|
|||
return port_settings.get(private_port)
|
||||
|
||||
for protocol in ['tcp', 'udp', 'sctp']:
|
||||
h_ports = port_settings.get(private_port + '/' + protocol)
|
||||
h_ports = port_settings.get(f"{private_port}/{protocol}")
|
||||
if h_ports:
|
||||
break
|
||||
|
||||
|
@ -951,7 +976,7 @@ class ContainerApiMixin(object):
|
|||
container (str): The container where the file(s) will be extracted
|
||||
path (str): Path inside the container where the file(s) will be
|
||||
extracted. Must exist.
|
||||
data (bytes): tar data to be extracted
|
||||
data (bytes or stream): tar data to be extracted
|
||||
|
||||
Returns:
|
||||
(bool): True if the call succeeds.
|
||||
|
@ -1095,10 +1120,10 @@ class ContainerApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> container = cli.create_container(
|
||||
>>> container = client.api.create_container(
|
||||
... image='busybox:latest',
|
||||
... command='/bin/sleep 30')
|
||||
>>> cli.start(container=container.get('Id'))
|
||||
>>> client.api.start(container=container.get('Id'))
|
||||
"""
|
||||
if args or kwargs:
|
||||
raise errors.DeprecatedMethod(
|
||||
|
@ -1111,7 +1136,7 @@ class ContainerApiMixin(object):
|
|||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource('container')
|
||||
def stats(self, container, decode=None, stream=True):
|
||||
def stats(self, container, decode=None, stream=True, one_shot=None):
|
||||
"""
|
||||
Stream statistics for a specific container. Similar to the
|
||||
``docker stats`` command.
|
||||
|
@ -1123,6 +1148,9 @@ class ContainerApiMixin(object):
|
|||
False by default.
|
||||
stream (bool): If set to false, only the current stats will be
|
||||
returned instead of a stream. True by default.
|
||||
one_shot (bool): If set to true, Only get a single stat instead of
|
||||
waiting for 2 cycles. Must be used with stream=false. False by
|
||||
default.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -1130,16 +1158,30 @@ class ContainerApiMixin(object):
|
|||
|
||||
"""
|
||||
url = self._url("/containers/{0}/stats", container)
|
||||
params = {
|
||||
'stream': stream
|
||||
}
|
||||
if one_shot is not None:
|
||||
if utils.version_lt(self._version, '1.41'):
|
||||
raise errors.InvalidVersion(
|
||||
'one_shot is not supported for API version < 1.41'
|
||||
)
|
||||
params['one-shot'] = one_shot
|
||||
if stream:
|
||||
return self._stream_helper(self._get(url, stream=True),
|
||||
decode=decode)
|
||||
if one_shot:
|
||||
raise errors.InvalidArgument(
|
||||
'one_shot is only available in conjunction with '
|
||||
'stream=False'
|
||||
)
|
||||
return self._stream_helper(
|
||||
self._get(url, stream=True, params=params), decode=decode
|
||||
)
|
||||
else:
|
||||
if decode:
|
||||
raise errors.InvalidArgument(
|
||||
"decode is only available in conjunction with stream=True"
|
||||
)
|
||||
return self._result(self._get(url, params={'stream': False}),
|
||||
json=True)
|
||||
return self._result(self._get(url, params=params), json=True)
|
||||
|
||||
@utils.check_resource('container')
|
||||
def stop(self, container, timeout=None):
|
||||
|
|
|
@ -4,7 +4,7 @@ from datetime import datetime
|
|||
from .. import auth, types, utils
|
||||
|
||||
|
||||
class DaemonApiMixin(object):
|
||||
class DaemonApiMixin:
|
||||
@utils.minimum_version('1.25')
|
||||
def df(self):
|
||||
"""
|
||||
|
@ -109,7 +109,7 @@ class DaemonApiMixin(object):
|
|||
the Docker server.
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise``$HOME/.dockercfg``)
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
(dict): The response from the login request
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
import six
|
||||
|
||||
from .. import errors
|
||||
from .. import utils
|
||||
from .. import errors, utils
|
||||
from ..types import CancellableStream
|
||||
|
||||
|
||||
class ExecApiMixin(object):
|
||||
class ExecApiMixin:
|
||||
@utils.check_resource('container')
|
||||
def exec_create(self, container, cmd, stdout=True, stderr=True,
|
||||
stdin=False, tty=False, privileged=False, user='',
|
||||
|
@ -45,7 +43,7 @@ class ExecApiMixin(object):
|
|||
'Setting environment for exec is not supported in API < 1.25'
|
||||
)
|
||||
|
||||
if isinstance(cmd, six.string_types):
|
||||
if isinstance(cmd, str):
|
||||
cmd = utils.split_command(cmd)
|
||||
|
||||
if isinstance(environment, dict):
|
||||
|
@ -127,9 +125,10 @@ class ExecApiMixin(object):
|
|||
detach (bool): If true, detach from the exec command.
|
||||
Default: False
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
stream (bool): Stream response data. Default: False
|
||||
stream (bool): Return response data progressively as an iterator
|
||||
of strings, rather than a single string.
|
||||
socket (bool): Return the connection socket to allow custom
|
||||
read/write operations.
|
||||
read/write operations. Must be closed by the caller when done.
|
||||
demux (bool): Return stdout and stderr separately
|
||||
|
||||
Returns:
|
||||
|
@ -163,7 +162,15 @@ class ExecApiMixin(object):
|
|||
stream=True
|
||||
)
|
||||
if detach:
|
||||
return self._result(res)
|
||||
try:
|
||||
return self._result(res)
|
||||
finally:
|
||||
res.close()
|
||||
if socket:
|
||||
return self._get_raw_response_socket(res)
|
||||
return self._read_from_socket(res, stream, tty=tty, demux=demux)
|
||||
|
||||
output = self._read_from_socket(res, stream, tty=tty, demux=demux)
|
||||
if stream:
|
||||
return CancellableStream(output, res)
|
||||
else:
|
||||
return output
|
||||
|
|
|
@ -1,15 +1,13 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
import six
|
||||
|
||||
from .. import auth, errors, utils
|
||||
from ..constants import DEFAULT_DATA_CHUNK_SIZE
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ImageApiMixin(object):
|
||||
class ImageApiMixin:
|
||||
|
||||
@utils.check_resource('image')
|
||||
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
|
||||
|
@ -31,7 +29,7 @@ class ImageApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> image = cli.get_image("busybox:latest")
|
||||
>>> image = client.api.get_image("busybox:latest")
|
||||
>>> f = open('/tmp/busybox-latest.tar', 'wb')
|
||||
>>> for chunk in image:
|
||||
>>> f.write(chunk)
|
||||
|
@ -49,7 +47,7 @@ class ImageApiMixin(object):
|
|||
image (str): The image to show history for
|
||||
|
||||
Returns:
|
||||
(str): The history of the image
|
||||
(list): The history of the image
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -81,10 +79,18 @@ class ImageApiMixin(object):
|
|||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'filter': name,
|
||||
'only_ids': 1 if quiet else 0,
|
||||
'all': 1 if all else 0,
|
||||
}
|
||||
if name:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
# only use "filter" on API 1.24 and under, as it is deprecated
|
||||
params['filter'] = name
|
||||
else:
|
||||
if filters:
|
||||
filters['reference'] = name
|
||||
else:
|
||||
filters = {'reference': name}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
res = self._result(self._get(self._url("/images/json"), params=params),
|
||||
|
@ -122,7 +128,7 @@ class ImageApiMixin(object):
|
|||
|
||||
params = _import_image_params(
|
||||
repository, tag, image,
|
||||
src=(src if isinstance(src, six.string_types) else None),
|
||||
src=(src if isinstance(src, str) else None),
|
||||
changes=changes
|
||||
)
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
|
@ -131,7 +137,7 @@ class ImageApiMixin(object):
|
|||
return self._result(
|
||||
self._post(u, data=None, params=params)
|
||||
)
|
||||
elif isinstance(src, six.string_types): # from file path
|
||||
elif isinstance(src, str): # from file path
|
||||
with open(src, 'rb') as f:
|
||||
return self._result(
|
||||
self._post(
|
||||
|
@ -371,7 +377,8 @@ class ImageApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> for line in cli.pull('busybox', stream=True, decode=True):
|
||||
>>> resp = client.api.pull('busybox', stream=True, decode=True)
|
||||
... for line in resp:
|
||||
... print(json.dumps(line, indent=4))
|
||||
{
|
||||
"status": "Pulling image (latest) from busybox",
|
||||
|
@ -450,7 +457,12 @@ class ImageApiMixin(object):
|
|||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
>>> for line in cli.push('yourname/app', stream=True, decode=True):
|
||||
>>> resp = client.api.push(
|
||||
... 'yourname/app',
|
||||
... stream=True,
|
||||
... decode=True,
|
||||
... )
|
||||
... for line in resp:
|
||||
... print(line)
|
||||
{'status': 'Pushing repository yourname/app (1 tags)'}
|
||||
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
|
||||
|
@ -501,13 +513,14 @@ class ImageApiMixin(object):
|
|||
res = self._delete(self._url("/images/{0}", image), params=params)
|
||||
return self._result(res, True)
|
||||
|
||||
def search(self, term):
|
||||
def search(self, term, limit=None):
|
||||
"""
|
||||
Search for images on Docker Hub. Similar to the ``docker search``
|
||||
command.
|
||||
|
||||
Args:
|
||||
term (str): A term to search for.
|
||||
limit (int): The maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
(list of dicts): The response of the search.
|
||||
|
@ -516,8 +529,12 @@ class ImageApiMixin(object):
|
|||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {'term': term}
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
|
||||
return self._result(
|
||||
self._get(self._url("/images/search"), params={'term': term}),
|
||||
self._get(self._url("/images/search"), params=params),
|
||||
True
|
||||
)
|
||||
|
||||
|
@ -541,7 +558,7 @@ class ImageApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
|
||||
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
|
||||
force=True)
|
||||
"""
|
||||
params = {
|
||||
|
@ -558,7 +575,7 @@ class ImageApiMixin(object):
|
|||
def is_file(src):
|
||||
try:
|
||||
return (
|
||||
isinstance(src, six.string_types) and
|
||||
isinstance(src, str) and
|
||||
os.path.isfile(src)
|
||||
)
|
||||
except TypeError: # a data string will make isfile() raise a TypeError
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
from ..errors import InvalidVersion
|
||||
from ..utils import check_resource, minimum_version
|
||||
from ..utils import version_lt
|
||||
from .. import utils
|
||||
from ..errors import InvalidVersion
|
||||
from ..utils import check_resource, minimum_version, version_lt
|
||||
|
||||
|
||||
class NetworkApiMixin(object):
|
||||
class NetworkApiMixin:
|
||||
def networks(self, names=None, ids=None, filters=None):
|
||||
"""
|
||||
List networks. Similar to the ``docker network ls`` command.
|
||||
|
@ -75,7 +74,7 @@ class NetworkApiMixin(object):
|
|||
Example:
|
||||
A network using the bridge driver:
|
||||
|
||||
>>> client.create_network("network1", driver="bridge")
|
||||
>>> client.api.create_network("network1", driver="bridge")
|
||||
|
||||
You can also create more advanced networks with custom IPAM
|
||||
configurations. For example, setting the subnet to
|
||||
|
@ -90,7 +89,7 @@ class NetworkApiMixin(object):
|
|||
>>> ipam_config = docker.types.IPAMConfig(
|
||||
pool_configs=[ipam_pool]
|
||||
)
|
||||
>>> docker_client.create_network("network1", driver="bridge",
|
||||
>>> client.api.create_network("network1", driver="bridge",
|
||||
ipam=ipam_config)
|
||||
"""
|
||||
if options is not None and not isinstance(options, dict):
|
||||
|
@ -216,7 +215,8 @@ class NetworkApiMixin(object):
|
|||
def connect_container_to_network(self, container, net_id,
|
||||
ipv4_address=None, ipv6_address=None,
|
||||
aliases=None, links=None,
|
||||
link_local_ips=None, driver_opt=None):
|
||||
link_local_ips=None, driver_opt=None,
|
||||
mac_address=None):
|
||||
"""
|
||||
Connect a container to a network.
|
||||
|
||||
|
@ -235,13 +235,16 @@ class NetworkApiMixin(object):
|
|||
network, using the IPv6 protocol. Defaults to ``None``.
|
||||
link_local_ips (:py:class:`list`): A list of link-local
|
||||
(IPv4/IPv6) addresses.
|
||||
mac_address (str): The MAC address of this container on the
|
||||
network. Defaults to ``None``.
|
||||
"""
|
||||
data = {
|
||||
"Container": container,
|
||||
"EndpointConfig": self.create_endpoint_config(
|
||||
aliases=aliases, links=links, ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address, link_local_ips=link_local_ips,
|
||||
driver_opt=driver_opt
|
||||
driver_opt=driver_opt,
|
||||
mac_address=mac_address
|
||||
),
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
import six
|
||||
|
||||
from .. import auth, utils
|
||||
|
||||
|
||||
class PluginApiMixin(object):
|
||||
class PluginApiMixin:
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('name')
|
||||
def configure_plugin(self, name, options):
|
||||
|
@ -21,7 +19,7 @@ class PluginApiMixin(object):
|
|||
url = self._url('/plugins/{0}/set', name)
|
||||
data = options
|
||||
if isinstance(data, dict):
|
||||
data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
|
||||
data = [f'{k}={v}' for k, v in data.items()]
|
||||
res = self._post_json(url, data=data)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
@ -53,19 +51,20 @@ class PluginApiMixin(object):
|
|||
return True
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def disable_plugin(self, name):
|
||||
def disable_plugin(self, name, force=False):
|
||||
"""
|
||||
Disable an installed plugin.
|
||||
|
||||
Args:
|
||||
name (string): The name of the plugin. The ``:latest`` tag is
|
||||
optional, and is the default if omitted.
|
||||
force (bool): To enable the force query parameter.
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/{0}/disable', name)
|
||||
res = self._post(url)
|
||||
res = self._post(url, params={'force': force})
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
import base64
|
||||
|
||||
import six
|
||||
|
||||
from .. import errors
|
||||
from .. import utils
|
||||
from .. import errors, utils
|
||||
|
||||
|
||||
class SecretApiMixin(object):
|
||||
class SecretApiMixin:
|
||||
@utils.minimum_version('1.25')
|
||||
def create_secret(self, name, data, labels=None, driver=None):
|
||||
"""
|
||||
|
@ -25,8 +22,7 @@ class SecretApiMixin(object):
|
|||
data = data.encode('utf-8')
|
||||
|
||||
data = base64.b64encode(data)
|
||||
if six.PY3:
|
||||
data = data.decode('ascii')
|
||||
data = data.decode('ascii')
|
||||
body = {
|
||||
'Data': data,
|
||||
'Name': name,
|
||||
|
|
|
@ -7,9 +7,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
|
|||
|
||||
def raise_version_error(param, min_version):
|
||||
raise errors.InvalidVersion(
|
||||
'{} is not supported in API version < {}'.format(
|
||||
param, min_version
|
||||
)
|
||||
f'{param} is not supported in API version < {min_version}'
|
||||
)
|
||||
|
||||
if update_config is not None:
|
||||
|
@ -45,7 +43,7 @@ def _check_api_features(version, task_template, update_config, endpoint_spec,
|
|||
if task_template is not None:
|
||||
if 'ForceUpdate' in task_template and utils.version_lt(
|
||||
version, '1.25'):
|
||||
raise_version_error('force_update', '1.25')
|
||||
raise_version_error('force_update', '1.25')
|
||||
|
||||
if task_template.get('Placement'):
|
||||
if utils.version_lt(version, '1.30'):
|
||||
|
@ -113,7 +111,7 @@ def _merge_task_template(current, override):
|
|||
return merged
|
||||
|
||||
|
||||
class ServiceApiMixin(object):
|
||||
class ServiceApiMixin:
|
||||
@utils.minimum_version('1.24')
|
||||
def create_service(
|
||||
self, task_template, name=None, labels=None, mode=None,
|
||||
|
@ -262,7 +260,7 @@ class ServiceApiMixin(object):
|
|||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def services(self, filters=None):
|
||||
def services(self, filters=None, status=None):
|
||||
"""
|
||||
List services.
|
||||
|
||||
|
@ -270,6 +268,8 @@ class ServiceApiMixin(object):
|
|||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id``, ``name`` , ``label`` and ``mode``.
|
||||
Default: ``None``.
|
||||
status (bool): Include the service task count of running and
|
||||
desired tasks. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries containing data about each service.
|
||||
|
@ -281,6 +281,12 @@ class ServiceApiMixin(object):
|
|||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
if status is not None:
|
||||
if utils.version_lt(self._version, '1.41'):
|
||||
raise errors.InvalidVersion(
|
||||
'status is not supported in API version < 1.41'
|
||||
)
|
||||
params['status'] = status
|
||||
url = self._url('/services')
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
import http.client as http_client
|
||||
import logging
|
||||
from six.moves import http_client
|
||||
|
||||
from .. import errors, types, utils
|
||||
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
|
||||
from .. import errors
|
||||
from .. import types
|
||||
from .. import utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwarmApiMixin(object):
|
||||
class SwarmApiMixin:
|
||||
|
||||
def create_swarm_spec(self, *args, **kwargs):
|
||||
"""
|
||||
|
@ -58,10 +57,10 @@ class SwarmApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> spec = client.create_swarm_spec(
|
||||
>>> spec = client.api.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200
|
||||
)
|
||||
>>> client.init_swarm(
|
||||
>>> client.api.init_swarm(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
|
||||
force_new_cluster=False, swarm_spec=spec
|
||||
)
|
||||
|
@ -85,7 +84,7 @@ class SwarmApiMixin(object):
|
|||
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, swarm_spec=None,
|
||||
default_addr_pool=None, subnet_size=None,
|
||||
data_path_addr=None):
|
||||
data_path_addr=None, data_path_port=None):
|
||||
"""
|
||||
Initialize a new Swarm using the current connected engine as the first
|
||||
node.
|
||||
|
@ -118,6 +117,9 @@ class SwarmApiMixin(object):
|
|||
networks created from the default subnet pool. Default: None
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
data_path_port (int): Port number to use for data path traffic.
|
||||
Acceptable port range is 1024 to 49151. If set to ``None`` or
|
||||
0, the default port 4789 will be used. Default: None
|
||||
|
||||
Returns:
|
||||
(str): The ID of the created node.
|
||||
|
@ -166,6 +168,14 @@ class SwarmApiMixin(object):
|
|||
)
|
||||
data['DataPathAddr'] = data_path_addr
|
||||
|
||||
if data_path_port is not None:
|
||||
if utils.version_lt(self._version, '1.40'):
|
||||
raise errors.InvalidVersion(
|
||||
'Data path port is only available for '
|
||||
'API version >= 1.40'
|
||||
)
|
||||
data['DataPathPort'] = data_path_port
|
||||
|
||||
response = self._post_json(url, data=data)
|
||||
return self._result(response, json=True)
|
||||
|
||||
|
@ -354,8 +364,8 @@ class SwarmApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> key = client.get_unlock_key()
|
||||
>>> client.unlock_node(key)
|
||||
>>> key = client.api.get_unlock_key()
|
||||
>>> client.unlock_swarm(key)
|
||||
|
||||
"""
|
||||
if isinstance(key, dict):
|
||||
|
@ -396,7 +406,7 @@ class SwarmApiMixin(object):
|
|||
'Role': 'manager',
|
||||
'Labels': {'foo': 'bar'}
|
||||
}
|
||||
>>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
|
||||
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
|
||||
node_spec=node_spec)
|
||||
|
||||
"""
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
from .. import errors
|
||||
from .. import utils
|
||||
from .. import errors, utils
|
||||
|
||||
|
||||
class VolumeApiMixin(object):
|
||||
class VolumeApiMixin:
|
||||
def volumes(self, filters=None):
|
||||
"""
|
||||
List volumes currently registered by the docker daemon. Similar to the
|
||||
|
@ -21,7 +20,7 @@ class VolumeApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> cli.volumes()
|
||||
>>> client.api.volumes()
|
||||
{u'Volumes': [{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'},
|
||||
|
@ -56,15 +55,18 @@ class VolumeApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> volume = cli.create_volume(name='foobar', driver='local',
|
||||
driver_opts={'foo': 'bar', 'baz': 'false'},
|
||||
labels={"key": "value"})
|
||||
>>> print(volume)
|
||||
>>> volume = client.api.create_volume(
|
||||
... name='foobar',
|
||||
... driver='local',
|
||||
... driver_opts={'foo': 'bar', 'baz': 'false'},
|
||||
... labels={"key": "value"},
|
||||
... )
|
||||
... print(volume)
|
||||
{u'Driver': u'local',
|
||||
u'Labels': {u'key': u'value'},
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar',
|
||||
u'Scope': u'local'}
|
||||
u'Labels': {u'key': u'value'},
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar',
|
||||
u'Scope': u'local'}
|
||||
|
||||
"""
|
||||
url = self._url('/volumes/create')
|
||||
|
@ -104,7 +106,7 @@ class VolumeApiMixin(object):
|
|||
|
||||
Example:
|
||||
|
||||
>>> cli.inspect_volume('foobar')
|
||||
>>> client.api.inspect_volume('foobar')
|
||||
{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'}
|
||||
|
|
|
@ -2,14 +2,11 @@ import base64
|
|||
import json
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
from . import credentials
|
||||
from . import errors
|
||||
from . import credentials, errors
|
||||
from .utils import config
|
||||
|
||||
INDEX_NAME = 'docker.io'
|
||||
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
|
||||
INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
|
||||
TOKEN_USERNAME = '<token>'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -18,21 +15,21 @@ log = logging.getLogger(__name__)
|
|||
def resolve_repository_name(repo_name):
|
||||
if '://' in repo_name:
|
||||
raise errors.InvalidRepository(
|
||||
'Repository name cannot contain a scheme ({0})'.format(repo_name)
|
||||
f'Repository name cannot contain a scheme ({repo_name})'
|
||||
)
|
||||
|
||||
index_name, remote_name = split_repo_name(repo_name)
|
||||
if index_name[0] == '-' or index_name[-1] == '-':
|
||||
raise errors.InvalidRepository(
|
||||
'Invalid index name ({0}). Cannot begin or end with a'
|
||||
' hyphen.'.format(index_name)
|
||||
f'Invalid index name ({index_name}). '
|
||||
'Cannot begin or end with a hyphen.'
|
||||
)
|
||||
return resolve_index_name(index_name), remote_name
|
||||
|
||||
|
||||
def resolve_index_name(index_name):
|
||||
index_name = convert_to_hostname(index_name)
|
||||
if index_name == 'index.' + INDEX_NAME:
|
||||
if index_name == f"index.{INDEX_NAME}":
|
||||
index_name = INDEX_NAME
|
||||
return index_name
|
||||
|
||||
|
@ -98,12 +95,10 @@ class AuthConfig(dict):
|
|||
"""
|
||||
|
||||
conf = {}
|
||||
for registry, entry in six.iteritems(entries):
|
||||
for registry, entry in entries.items():
|
||||
if not isinstance(entry, dict):
|
||||
log.debug(
|
||||
'Config entry for key {0} is not auth config'.format(
|
||||
registry
|
||||
)
|
||||
f'Config entry for key {registry} is not auth config'
|
||||
)
|
||||
# We sometimes fall back to parsing the whole config as if it
|
||||
# was the auth config by itself, for legacy purposes. In that
|
||||
|
@ -111,17 +106,11 @@ class AuthConfig(dict):
|
|||
# keys is not formatted properly.
|
||||
if raise_on_error:
|
||||
raise errors.InvalidConfigFile(
|
||||
'Invalid configuration for registry {0}'.format(
|
||||
registry
|
||||
)
|
||||
f'Invalid configuration for registry {registry}'
|
||||
)
|
||||
return {}
|
||||
if 'identitytoken' in entry:
|
||||
log.debug(
|
||||
'Found an IdentityToken entry for registry {0}'.format(
|
||||
registry
|
||||
)
|
||||
)
|
||||
log.debug(f'Found an IdentityToken entry for registry {registry}')
|
||||
conf[registry] = {
|
||||
'IdentityToken': entry['identitytoken']
|
||||
}
|
||||
|
@ -132,16 +121,15 @@ class AuthConfig(dict):
|
|||
# a valid value in the auths config.
|
||||
# https://github.com/docker/compose/issues/3265
|
||||
log.debug(
|
||||
'Auth data for {0} is absent. Client might be using a '
|
||||
'credentials store instead.'.format(registry)
|
||||
f'Auth data for {registry} is absent. '
|
||||
f'Client might be using a credentials store instead.'
|
||||
)
|
||||
conf[registry] = {}
|
||||
continue
|
||||
|
||||
username, password = decode_auth(entry['auth'])
|
||||
log.debug(
|
||||
'Found entry (registry={0}, username={1})'
|
||||
.format(repr(registry), repr(username))
|
||||
f'Found entry (registry={registry!r}, username={username!r})'
|
||||
)
|
||||
|
||||
conf[registry] = {
|
||||
|
@ -170,7 +158,7 @@ class AuthConfig(dict):
|
|||
try:
|
||||
with open(config_file) as f:
|
||||
config_dict = json.load(f)
|
||||
except (IOError, KeyError, ValueError) as e:
|
||||
except (OSError, KeyError, ValueError) as e:
|
||||
# Likely missing new Docker config file or it's in an
|
||||
# unknown format, continue to attempt to read old location
|
||||
# and format.
|
||||
|
@ -230,7 +218,7 @@ class AuthConfig(dict):
|
|||
store_name = self.get_credential_store(registry)
|
||||
if store_name is not None:
|
||||
log.debug(
|
||||
'Using credentials store "{0}"'.format(store_name)
|
||||
f'Using credentials store "{store_name}"'
|
||||
)
|
||||
cfg = self._resolve_authconfig_credstore(registry, store_name)
|
||||
if cfg is not None:
|
||||
|
@ -239,15 +227,15 @@ class AuthConfig(dict):
|
|||
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for {0}".format(repr(registry)))
|
||||
log.debug(f"Looking for auth entry for {repr(registry)}")
|
||||
|
||||
if registry in self.auths:
|
||||
log.debug("Found {0}".format(repr(registry)))
|
||||
log.debug(f"Found {repr(registry)}")
|
||||
return self.auths[registry]
|
||||
|
||||
for key, conf in six.iteritems(self.auths):
|
||||
for key, conf in self.auths.items():
|
||||
if resolve_index_name(key) == registry:
|
||||
log.debug("Found {0}".format(repr(key)))
|
||||
log.debug(f"Found {repr(key)}")
|
||||
return conf
|
||||
|
||||
log.debug("No entry found")
|
||||
|
@ -258,7 +246,7 @@ class AuthConfig(dict):
|
|||
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||
# docker.io - in that case, it seems the full URL is necessary.
|
||||
registry = INDEX_URL
|
||||
log.debug("Looking for auth entry for {0}".format(repr(registry)))
|
||||
log.debug(f"Looking for auth entry for {repr(registry)}")
|
||||
store = self._get_store_instance(credstore_name)
|
||||
try:
|
||||
data = store.get(registry)
|
||||
|
@ -278,8 +266,8 @@ class AuthConfig(dict):
|
|||
return None
|
||||
except credentials.StoreError as e:
|
||||
raise errors.DockerException(
|
||||
'Credentials store error: {0}'.format(repr(e))
|
||||
)
|
||||
f'Credentials store error: {repr(e)}'
|
||||
) from e
|
||||
|
||||
def _get_store_instance(self, name):
|
||||
if name not in self._stores:
|
||||
|
@ -329,7 +317,7 @@ def convert_to_hostname(url):
|
|||
|
||||
|
||||
def decode_auth(auth):
|
||||
if isinstance(auth, six.string_types):
|
||||
if isinstance(auth, str):
|
||||
auth = auth.encode('ascii')
|
||||
s = base64.b64decode(auth)
|
||||
login, pwd = s.split(b':', 1)
|
||||
|
@ -385,7 +373,6 @@ def _load_legacy_config(config_file):
|
|||
}}
|
||||
except Exception as e:
|
||||
log.debug(e)
|
||||
pass
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from .api.client import APIClient
|
||||
from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
|
||||
from .constants import DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS
|
||||
from .models.configs import ConfigCollection
|
||||
from .models.containers import ContainerCollection
|
||||
from .models.images import ImageCollection
|
||||
|
@ -13,7 +13,7 @@ from .models.volumes import VolumeCollection
|
|||
from .utils import kwargs_from_env
|
||||
|
||||
|
||||
class DockerClient(object):
|
||||
class DockerClient:
|
||||
"""
|
||||
A client for communicating with a Docker server.
|
||||
|
||||
|
@ -71,8 +71,6 @@ class DockerClient(object):
|
|||
timeout (int): Default timeout for API calls, in seconds.
|
||||
max_pool_size (int): The maximum number of connections
|
||||
to save in the pool.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
environment (dict): The environment to read environment variables
|
||||
from. Default: the value of ``os.environ``
|
||||
credstore_env (dict): Override environment variables when calling
|
||||
|
@ -212,7 +210,7 @@ class DockerClient(object):
|
|||
close.__doc__ = APIClient.close.__doc__
|
||||
|
||||
def __getattr__(self, name):
|
||||
s = ["'DockerClient' object has no attribute '{}'".format(name)]
|
||||
s = [f"'DockerClient' object has no attribute '{name}'"]
|
||||
# If a user calls a method on APIClient, they
|
||||
if hasattr(APIClient, name):
|
||||
s.append("In Docker SDK for Python 2.0, this method is now on the "
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import sys
|
||||
from .version import version
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = '1.39'
|
||||
MINIMUM_DOCKER_API_VERSION = '1.21'
|
||||
from .version import __version__
|
||||
|
||||
DEFAULT_DOCKER_API_VERSION = '1.45'
|
||||
MINIMUM_DOCKER_API_VERSION = '1.24'
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = [
|
||||
|
@ -28,7 +29,7 @@ INSECURE_REGISTRY_DEPRECATION_WARNING = \
|
|||
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
|
||||
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
|
||||
|
||||
DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
|
||||
DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}"
|
||||
DEFAULT_NUM_POOLS = 25
|
||||
|
||||
# The OpenSSH server default value for MaxSessions is 10 which means we can
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
# flake8: noqa
|
||||
from .context import Context
|
||||
from .api import ContextAPI
|
||||
from .context import Context
|
||||
|
|
|
@ -2,14 +2,17 @@ import json
|
|||
import os
|
||||
|
||||
from docker import errors
|
||||
from docker.context.config import get_meta_dir
|
||||
from docker.context.config import METAFILE
|
||||
from docker.context.config import get_current_context_name
|
||||
from docker.context.config import write_context_name_to_docker_config
|
||||
from docker.context import Context
|
||||
|
||||
from .config import (
|
||||
METAFILE,
|
||||
get_current_context_name,
|
||||
get_meta_dir,
|
||||
write_context_name_to_docker_config,
|
||||
)
|
||||
from .context import Context
|
||||
|
||||
|
||||
class ContextAPI(object):
|
||||
class ContextAPI:
|
||||
"""Context API.
|
||||
Contains methods for context management:
|
||||
create, list, remove, get, inspect.
|
||||
|
@ -109,12 +112,12 @@ class ContextAPI(object):
|
|||
if filename == METAFILE:
|
||||
try:
|
||||
data = json.load(
|
||||
open(os.path.join(dirname, filename), "r"))
|
||||
open(os.path.join(dirname, filename)))
|
||||
names.append(data["Name"])
|
||||
except Exception as e:
|
||||
raise errors.ContextException(
|
||||
"Failed to load metafile {}: {}".format(
|
||||
filename, e))
|
||||
f"Failed to load metafile {filename}: {e}",
|
||||
) from e
|
||||
|
||||
contexts = [cls.DEFAULT_CONTEXT]
|
||||
for name in names:
|
||||
|
@ -138,7 +141,7 @@ class ContextAPI(object):
|
|||
err = write_context_name_to_docker_config(name)
|
||||
if err:
|
||||
raise errors.ContextException(
|
||||
'Failed to set current context: {}'.format(err))
|
||||
f'Failed to set current context: {err}')
|
||||
|
||||
@classmethod
|
||||
def remove_context(cls, name):
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
import os
|
||||
import json
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
from docker import utils
|
||||
from docker.constants import IS_WINDOWS_PLATFORM
|
||||
from docker.constants import DEFAULT_UNIX_SOCKET
|
||||
from docker.constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
|
||||
from docker.utils.config import find_config_file
|
||||
|
||||
METAFILE = "meta.json"
|
||||
|
@ -15,7 +14,7 @@ def get_current_context_name():
|
|||
docker_cfg_path = find_config_file()
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path, "r") as f:
|
||||
with open(docker_cfg_path) as f:
|
||||
name = json.load(f).get("currentContext", "default")
|
||||
except Exception:
|
||||
return "default"
|
||||
|
@ -29,7 +28,7 @@ def write_context_name_to_docker_config(name=None):
|
|||
config = {}
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path, "r") as f:
|
||||
with open(docker_cfg_path) as f:
|
||||
config = json.load(f)
|
||||
except Exception as e:
|
||||
return e
|
||||
|
@ -77,5 +76,6 @@ def get_context_host(path=None, tls=False):
|
|||
host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||
if host == DEFAULT_UNIX_SOCKET:
|
||||
# remove http+ from default docker socket url
|
||||
return host.strip("http+")
|
||||
if host.startswith("http+"):
|
||||
host = host[5:]
|
||||
return host
|
||||
|
|
|
@ -1,16 +1,21 @@
|
|||
import os
|
||||
import json
|
||||
import os
|
||||
from shutil import copyfile, rmtree
|
||||
from docker.tls import TLSConfig
|
||||
|
||||
from docker.errors import ContextException
|
||||
from docker.context.config import get_meta_dir
|
||||
from docker.context.config import get_meta_file
|
||||
from docker.context.config import get_tls_dir
|
||||
from docker.context.config import get_context_host
|
||||
from docker.tls import TLSConfig
|
||||
|
||||
from .config import (
|
||||
get_context_host,
|
||||
get_meta_dir,
|
||||
get_meta_file,
|
||||
get_tls_dir,
|
||||
)
|
||||
|
||||
|
||||
class Context:
|
||||
"""A context."""
|
||||
|
||||
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
|
||||
tls=False):
|
||||
if not name:
|
||||
|
@ -41,8 +46,9 @@ class Context:
|
|||
for k, v in endpoints.items():
|
||||
if not isinstance(v, dict):
|
||||
# unknown format
|
||||
raise ContextException("""Unknown endpoint format for
|
||||
context {}: {}""".format(name, v))
|
||||
raise ContextException(
|
||||
f"Unknown endpoint format for context {name}: {v}",
|
||||
)
|
||||
|
||||
self.endpoints[k] = v
|
||||
if k != "docker":
|
||||
|
@ -93,10 +99,11 @@ class Context:
|
|||
try:
|
||||
with open(meta_file) as f:
|
||||
metadata = json.load(f)
|
||||
except (IOError, KeyError, ValueError) as e:
|
||||
except (OSError, KeyError, ValueError) as e:
|
||||
# unknown format
|
||||
raise Exception("""Detected corrupted meta file for
|
||||
context {} : {}""".format(name, e))
|
||||
raise Exception(
|
||||
f"Detected corrupted meta file for context {name} : {e}"
|
||||
) from e
|
||||
|
||||
# for docker endpoints, set defaults for
|
||||
# Host and SkipTLSVerify fields
|
||||
|
@ -127,8 +134,12 @@ class Context:
|
|||
elif filename.startswith("key"):
|
||||
key = os.path.join(tls_dir, endpoint, filename)
|
||||
if all([ca_cert, cert, key]):
|
||||
verify = None
|
||||
if endpoint == "docker" and not self.endpoints["docker"].get(
|
||||
"SkipTLSVerify", False):
|
||||
verify = True
|
||||
certs[endpoint] = TLSConfig(
|
||||
client_cert=(cert, key), ca_cert=ca_cert)
|
||||
client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
|
||||
self.tls_cfg = certs
|
||||
self.tls_path = tls_dir
|
||||
|
||||
|
@ -166,7 +177,7 @@ class Context:
|
|||
rmtree(self.tls_path)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
|
||||
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps(self.__call__(), indent=2)
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
# flake8: noqa
|
||||
from .constants import (
|
||||
DEFAULT_LINUX_STORE,
|
||||
DEFAULT_OSX_STORE,
|
||||
DEFAULT_WIN32_STORE,
|
||||
PROGRAM_PREFIX,
|
||||
)
|
||||
from .errors import CredentialsNotFound, StoreError
|
||||
from .store import Store
|
||||
from .errors import StoreError, CredentialsNotFound
|
||||
from .constants import *
|
||||
|
|
|
@ -13,13 +13,5 @@ class InitializationError(StoreError):
|
|||
def process_store_error(cpe, program):
|
||||
message = cpe.output.decode('utf-8')
|
||||
if 'credentials not found in native keychain' in message:
|
||||
return CredentialsNotFound(
|
||||
'No matching credentials in {}'.format(
|
||||
program
|
||||
)
|
||||
)
|
||||
return StoreError(
|
||||
'Credentials store {} exited with "{}".'.format(
|
||||
program, cpe.output.decode('utf-8').strip()
|
||||
)
|
||||
)
|
||||
return CredentialsNotFound(f'No matching credentials in {program}')
|
||||
return StoreError(f'Credentials store {program} exited with "{message}".')
|
||||
|
|
|
@ -1,36 +1,33 @@
|
|||
import errno
|
||||
import json
|
||||
import shutil
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
from . import constants
|
||||
from . import errors
|
||||
from . import constants, errors
|
||||
from .utils import create_environment_dict
|
||||
from .utils import find_executable
|
||||
|
||||
|
||||
class Store(object):
|
||||
class Store:
|
||||
def __init__(self, program, environment=None):
|
||||
""" Create a store object that acts as an interface to
|
||||
perform the basic operations for storing, retrieving
|
||||
and erasing credentials using `program`.
|
||||
"""
|
||||
self.program = constants.PROGRAM_PREFIX + program
|
||||
self.exe = find_executable(self.program)
|
||||
self.exe = shutil.which(self.program)
|
||||
self.environment = environment
|
||||
if self.exe is None:
|
||||
raise errors.InitializationError(
|
||||
'{} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
warnings.warn(
|
||||
f'{self.program} not installed or not available in PATH',
|
||||
stacklevel=1,
|
||||
)
|
||||
|
||||
def get(self, server):
|
||||
""" Retrieve credentials for `server`. If no credentials are found,
|
||||
a `StoreError` will be raised.
|
||||
"""
|
||||
if not isinstance(server, six.binary_type):
|
||||
if not isinstance(server, bytes):
|
||||
server = server.encode('utf-8')
|
||||
data = self._execute('get', server)
|
||||
result = json.loads(data.decode('utf-8'))
|
||||
|
@ -41,7 +38,7 @@ class Store(object):
|
|||
# raise CredentialsNotFound
|
||||
if result['Username'] == '' and result['Secret'] == '':
|
||||
raise errors.CredentialsNotFound(
|
||||
'No matching credentials in {}'.format(self.program)
|
||||
f'No matching credentials in {self.program}'
|
||||
)
|
||||
|
||||
return result
|
||||
|
@ -61,7 +58,7 @@ class Store(object):
|
|||
""" Erase credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
if not isinstance(server, six.binary_type):
|
||||
if not isinstance(server, bytes):
|
||||
server = server.encode('utf-8')
|
||||
self._execute('erase', server)
|
||||
|
||||
|
@ -72,36 +69,25 @@ class Store(object):
|
|||
return json.loads(data.decode('utf-8'))
|
||||
|
||||
def _execute(self, subcmd, data_input):
|
||||
if self.exe is None:
|
||||
raise errors.StoreError(
|
||||
f'{self.program} not installed or not available in PATH'
|
||||
)
|
||||
output = None
|
||||
env = create_environment_dict(self.environment)
|
||||
try:
|
||||
if six.PY3:
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd], input=data_input, env=env,
|
||||
)
|
||||
else:
|
||||
process = subprocess.Popen(
|
||||
[self.exe, subcmd], stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, env=env,
|
||||
)
|
||||
output, _ = process.communicate(data_input)
|
||||
if process.returncode != 0:
|
||||
raise subprocess.CalledProcessError(
|
||||
returncode=process.returncode, cmd='', output=output
|
||||
)
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd], input=data_input, env=env,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program)
|
||||
raise errors.process_store_error(e, self.program) from e
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
'{} not installed or not available in PATH'.format(
|
||||
self.program
|
||||
)
|
||||
)
|
||||
f'{self.program} not installed or not available in PATH'
|
||||
) from e
|
||||
else:
|
||||
raise errors.StoreError(
|
||||
'Unexpected OS error "{}", errno={}'.format(
|
||||
e.strerror, e.errno
|
||||
)
|
||||
)
|
||||
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
|
||||
) from e
|
||||
return output
|
||||
|
|
|
@ -1,32 +1,4 @@
|
|||
import distutils.spawn
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def find_executable(executable, path=None):
|
||||
"""
|
||||
As distutils.spawn.find_executable, but on Windows, look up
|
||||
every extension declared in PATHEXT instead of just `.exe`
|
||||
"""
|
||||
if sys.platform != 'win32':
|
||||
return distutils.spawn.find_executable(executable, path)
|
||||
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
for ext in extensions:
|
||||
f = os.path.join(p, base + ext)
|
||||
if os.path.isfile(f):
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def create_environment_dict(overrides):
|
||||
|
|
|
@ -1,5 +1,14 @@
|
|||
import requests
|
||||
|
||||
_image_not_found_explanation_fragments = frozenset(
|
||||
fragment.lower() for fragment in [
|
||||
'no such image',
|
||||
'not found: does not exist or no pull access',
|
||||
'repository does not exist',
|
||||
'was found but does not match the specified platform',
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
"""
|
||||
|
@ -18,17 +27,16 @@ def create_api_error_from_http_exception(e):
|
|||
try:
|
||||
explanation = response.json()['message']
|
||||
except ValueError:
|
||||
explanation = (response.content or '').strip()
|
||||
explanation = (response.text or '').strip()
|
||||
cls = APIError
|
||||
if response.status_code == 404:
|
||||
if explanation and ('No such image' in str(explanation) or
|
||||
'not found: does not exist or no pull access'
|
||||
in str(explanation) or
|
||||
'repository does not exist' in str(explanation)):
|
||||
explanation_msg = (explanation or '').lower()
|
||||
if any(fragment in explanation_msg
|
||||
for fragment in _image_not_found_explanation_fragments):
|
||||
cls = ImageNotFound
|
||||
else:
|
||||
cls = NotFound
|
||||
raise cls(e, response=response, explanation=explanation)
|
||||
raise cls(e, response=response, explanation=explanation) from e
|
||||
|
||||
|
||||
class APIError(requests.exceptions.HTTPError, DockerException):
|
||||
|
@ -38,25 +46,27 @@ class APIError(requests.exceptions.HTTPError, DockerException):
|
|||
def __init__(self, message, response=None, explanation=None):
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 doesn't
|
||||
super(APIError, self).__init__(message)
|
||||
super().__init__(message)
|
||||
self.response = response
|
||||
self.explanation = explanation
|
||||
|
||||
def __str__(self):
|
||||
message = super(APIError, self).__str__()
|
||||
message = super().__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = '{0} Client Error for {1}: {2}'.format(
|
||||
self.response.status_code, self.response.url,
|
||||
self.response.reason)
|
||||
message = (
|
||||
f'{self.response.status_code} Client Error for '
|
||||
f'{self.response.url}: {self.response.reason}'
|
||||
)
|
||||
|
||||
elif self.is_server_error():
|
||||
message = '{0} Server Error for {1}: {2}'.format(
|
||||
self.response.status_code, self.response.url,
|
||||
self.response.reason)
|
||||
message = (
|
||||
f'{self.response.status_code} Server Error for '
|
||||
f'{self.response.url}: {self.response.reason}'
|
||||
)
|
||||
|
||||
if self.explanation:
|
||||
message = '{0} ("{1}")'.format(message, self.explanation)
|
||||
message = f'{message} ("{self.explanation}")'
|
||||
|
||||
return message
|
||||
|
||||
|
@ -133,11 +143,11 @@ class ContainerError(DockerException):
|
|||
self.image = image
|
||||
self.stderr = stderr
|
||||
|
||||
err = ": {}".format(stderr) if stderr is not None else ""
|
||||
msg = ("Command '{}' in image '{}' returned non-zero exit "
|
||||
"status {}{}").format(command, image, exit_status, err)
|
||||
|
||||
super(ContainerError, self).__init__(msg)
|
||||
err = f": {stderr}" if stderr is not None else ""
|
||||
super().__init__(
|
||||
f"Command '{command}' in image '{image}' "
|
||||
f"returned non-zero exit status {exit_status}{err}"
|
||||
)
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
|
@ -147,7 +157,7 @@ class StreamParseError(RuntimeError):
|
|||
|
||||
class BuildError(DockerException):
|
||||
def __init__(self, reason, build_log):
|
||||
super(BuildError, self).__init__(reason)
|
||||
super().__init__(reason)
|
||||
self.msg = reason
|
||||
self.build_log = build_log
|
||||
|
||||
|
@ -157,8 +167,8 @@ class ImageLoadError(DockerException):
|
|||
|
||||
|
||||
def create_unexpected_kwargs_error(name, kwargs):
|
||||
quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
|
||||
text = ["{}() ".format(name)]
|
||||
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
|
||||
text = [f"{name}() "]
|
||||
if len(quoted_kwargs) == 1:
|
||||
text.append("got an unexpected keyword argument ")
|
||||
else:
|
||||
|
@ -172,7 +182,7 @@ class MissingContextParameter(DockerException):
|
|||
self.param = param
|
||||
|
||||
def __str__(self):
|
||||
return ("missing parameter: {}".format(self.param))
|
||||
return (f"missing parameter: {self.param}")
|
||||
|
||||
|
||||
class ContextAlreadyExists(DockerException):
|
||||
|
@ -180,7 +190,7 @@ class ContextAlreadyExists(DockerException):
|
|||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
return ("context {} already exists".format(self.name))
|
||||
return (f"context {self.name} already exists")
|
||||
|
||||
|
||||
class ContextException(DockerException):
|
||||
|
@ -196,4 +206,4 @@ class ContextNotFound(DockerException):
|
|||
self.name = name
|
||||
|
||||
def __str__(self):
|
||||
return ("context '{}' not found".format(self.name))
|
||||
return (f"context '{self.name}' not found")
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from ..api import APIClient
|
||||
from .resource import Model, Collection
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Config(Model):
|
||||
|
@ -7,7 +7,7 @@ class Config(Model):
|
|||
id_attribute = 'ID'
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
|
||||
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
|
@ -30,6 +30,7 @@ class ConfigCollection(Collection):
|
|||
|
||||
def create(self, **kwargs):
|
||||
obj = self.client.api.create_config(**kwargs)
|
||||
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
|
||||
return self.prepare_model(obj)
|
||||
create.__doc__ = APIClient.create_config.__doc__
|
||||
|
||||
|
|
|
@ -5,10 +5,13 @@ from collections import namedtuple
|
|||
from ..api import APIClient
|
||||
from ..constants import DEFAULT_DATA_CHUNK_SIZE
|
||||
from ..errors import (
|
||||
ContainerError, DockerException, ImageNotFound,
|
||||
NotFound, create_unexpected_kwargs_error
|
||||
ContainerError,
|
||||
DockerException,
|
||||
ImageNotFound,
|
||||
NotFound,
|
||||
create_unexpected_kwargs_error,
|
||||
)
|
||||
from ..types import HostConfig
|
||||
from ..types import HostConfig, NetworkingConfig
|
||||
from ..utils import version_gte
|
||||
from .images import Image
|
||||
from .resource import Collection, Model
|
||||
|
@ -21,6 +24,7 @@ class Container(Model):
|
|||
query the Docker daemon for the current properties, causing
|
||||
:py:attr:`attrs` to be refreshed.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""
|
||||
|
@ -47,11 +51,11 @@ class Container(Model):
|
|||
try:
|
||||
result = self.attrs['Config'].get('Labels')
|
||||
return result or {}
|
||||
except KeyError:
|
||||
except KeyError as ke:
|
||||
raise DockerException(
|
||||
'Label data is not available for sparse objects. Call reload()'
|
||||
' to retrieve all information'
|
||||
)
|
||||
) from ke
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
|
@ -62,6 +66,15 @@ class Container(Model):
|
|||
return self.attrs['State']['Status']
|
||||
return self.attrs['State']
|
||||
|
||||
@property
|
||||
def health(self):
|
||||
"""
|
||||
The healthcheck status of the container.
|
||||
|
||||
For example, ``healthy`, or ``unhealthy`.
|
||||
"""
|
||||
return self.attrs.get('State', {}).get('Health', {}).get('Status', 'unknown')
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
"""
|
||||
|
@ -121,6 +134,7 @@ class Container(Model):
|
|||
tag (str): The tag to push
|
||||
message (str): A commit message
|
||||
author (str): The name of the author
|
||||
pause (bool): Whether to pause the container before committing
|
||||
changes (str): Dockerfile instructions to apply while committing
|
||||
conf (dict): The configuration for the container. See the
|
||||
`Engine API documentation
|
||||
|
@ -141,7 +155,8 @@ class Container(Model):
|
|||
Inspect changes on a container's filesystem.
|
||||
|
||||
Returns:
|
||||
(str)
|
||||
(list) A list of dictionaries containing the attributes `Path`
|
||||
and `Kind`.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -166,7 +181,8 @@ class Container(Model):
|
|||
user (str): User to execute command as. Default: root
|
||||
detach (bool): If true, detach from the exec command.
|
||||
Default: False
|
||||
stream (bool): Stream response data. Default: False
|
||||
stream (bool): Stream response data. Ignored if ``detach`` is true.
|
||||
Default: False
|
||||
socket (bool): Return the connection socket to allow custom
|
||||
read/write operations. Default: False
|
||||
environment (dict or list): A dictionary or a list of strings in
|
||||
|
@ -290,14 +306,15 @@ class Container(Model):
|
|||
tail (str or int): Output specified number of lines at the end of
|
||||
logs. Either an integer of number of lines or the string
|
||||
``all``. Default ``all``
|
||||
since (datetime or int): Show logs since a given datetime or
|
||||
integer epoch (in seconds)
|
||||
since (datetime, int, or float): Show logs since a given datetime,
|
||||
integer epoch (in seconds) or float (in nanoseconds)
|
||||
follow (bool): Follow log output. Default ``False``
|
||||
until (datetime or int): Show logs that occurred before the given
|
||||
datetime or integer epoch (in seconds)
|
||||
until (datetime, int, or float): Show logs that occurred before
|
||||
the given datetime, integer epoch (in seconds), or
|
||||
float (in nanoseconds)
|
||||
|
||||
Returns:
|
||||
(generator or str): Logs from the container.
|
||||
(generator of bytes or bytes): Logs from the container.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -323,7 +340,7 @@ class Container(Model):
|
|||
Args:
|
||||
path (str): Path inside the container where the file(s) will be
|
||||
extracted. Must exist.
|
||||
data (bytes): tar data to be extracted
|
||||
data (bytes or stream): tar data to be extracted
|
||||
|
||||
Returns:
|
||||
(bool): True if the call succeeds.
|
||||
|
@ -553,6 +570,11 @@ class ContainerCollection(Collection):
|
|||
``["SYS_ADMIN", "MKNOD"]``.
|
||||
cap_drop (list of str): Drop kernel capabilities.
|
||||
cgroup_parent (str): Override the default parent cgroup.
|
||||
cgroupns (str): Override the default cgroup namespace mode for the
|
||||
container. One of:
|
||||
- ``private`` the container runs in its own private cgroup
|
||||
namespace.
|
||||
- ``host`` use the host system's cgroup namespace.
|
||||
cpu_count (int): Number of usable CPUs (Windows only).
|
||||
cpu_percent (int): Usable percentage of the available CPUs
|
||||
(Windows only).
|
||||
|
@ -600,7 +622,28 @@ class ContainerCollection(Collection):
|
|||
group_add (:py:class:`list`): List of additional group names and/or
|
||||
IDs that the container process will run as.
|
||||
healthcheck (dict): Specify a test to perform to check that the
|
||||
container is healthy.
|
||||
container is healthy. The dict takes the following keys:
|
||||
|
||||
- test (:py:class:`list` or str): Test to perform to determine
|
||||
container health. Possible values:
|
||||
|
||||
- Empty list: Inherit healthcheck from parent image
|
||||
- ``["NONE"]``: Disable healthcheck
|
||||
- ``["CMD", args...]``: exec arguments directly.
|
||||
- ``["CMD-SHELL", command]``: Run command in the system's
|
||||
default shell.
|
||||
|
||||
If a string is provided, it will be used as a ``CMD-SHELL``
|
||||
command.
|
||||
- interval (int): The time to wait between checks in
|
||||
nanoseconds. It should be 0 or at least 1000000 (1 ms).
|
||||
- timeout (int): The time to wait before considering the check
|
||||
to have hung. It should be 0 or at least 1000000 (1 ms).
|
||||
- retries (int): The number of consecutive failures needed to
|
||||
consider a container as unhealthy.
|
||||
- start_period (int): Start period for the container to
|
||||
initialize before starting health-retries countdown in
|
||||
nanoseconds. It should be 0 or at least 1000000 (1 ms).
|
||||
hostname (str): Optional hostname for the container.
|
||||
init (bool): Run an init inside the container that forwards
|
||||
signals and reaps processes
|
||||
|
@ -644,7 +687,7 @@ class ContainerCollection(Collection):
|
|||
network_mode (str): One of:
|
||||
|
||||
- ``bridge`` Create a new network stack for the container on
|
||||
on the bridge network.
|
||||
the bridge network.
|
||||
- ``none`` No networking for this container.
|
||||
- ``container:<name|id>`` Reuse another container's network
|
||||
stack.
|
||||
|
@ -652,6 +695,14 @@ class ContainerCollection(Collection):
|
|||
This mode is incompatible with ``ports``.
|
||||
|
||||
Incompatible with ``network``.
|
||||
networking_config (Dict[str, EndpointConfig]):
|
||||
Dictionary of EndpointConfig objects for each container network.
|
||||
The key is the name of the network.
|
||||
Defaults to ``None``.
|
||||
|
||||
Used in conjuction with ``network``.
|
||||
|
||||
Incompatible with ``network_mode``.
|
||||
oom_kill_disable (bool): Whether to disable OOM killer.
|
||||
oom_score_adj (int): An integer value containing the score given
|
||||
to the container in order to tune OOM killer preferences.
|
||||
|
@ -761,6 +812,15 @@ class ContainerCollection(Collection):
|
|||
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
|
||||
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
|
||||
|
||||
Or a list of strings which each one of its elements specifies a
|
||||
mount volume.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
|
||||
|
||||
volumes_from (:py:class:`list`): List of container names or IDs to
|
||||
get volumes from.
|
||||
working_dir (str): Path to the working directory.
|
||||
|
@ -792,7 +852,7 @@ class ContainerCollection(Collection):
|
|||
image = image.id
|
||||
stream = kwargs.pop('stream', False)
|
||||
detach = kwargs.pop('detach', False)
|
||||
platform = kwargs.pop('platform', None)
|
||||
platform = kwargs.get('platform', None)
|
||||
|
||||
if detach and remove:
|
||||
if version_gte(self.client.api._version, '1.25'):
|
||||
|
@ -807,6 +867,12 @@ class ContainerCollection(Collection):
|
|||
'together.'
|
||||
)
|
||||
|
||||
if kwargs.get('networking_config') and not kwargs.get('network'):
|
||||
raise RuntimeError(
|
||||
'The option "networking_config" can not be used '
|
||||
'without "network".'
|
||||
)
|
||||
|
||||
try:
|
||||
container = self.create(image=image, command=command,
|
||||
detach=detach, **kwargs)
|
||||
|
@ -841,9 +907,9 @@ class ContainerCollection(Collection):
|
|||
container, exit_status, command, image, out
|
||||
)
|
||||
|
||||
return out if stream or out is None else b''.join(
|
||||
[line for line in out]
|
||||
)
|
||||
if stream or out is None:
|
||||
return out
|
||||
return b''.join(out)
|
||||
|
||||
def create(self, image, command=None, **kwargs):
|
||||
"""
|
||||
|
@ -959,6 +1025,7 @@ class ContainerCollection(Collection):
|
|||
|
||||
def prune(self, filters=None):
|
||||
return self.client.api.prune_containers(filters=filters)
|
||||
|
||||
prune.__doc__ = APIClient.prune_containers.__doc__
|
||||
|
||||
|
||||
|
@ -976,6 +1043,7 @@ RUN_CREATE_KWARGS = [
|
|||
'mac_address',
|
||||
'name',
|
||||
'network_disabled',
|
||||
'platform',
|
||||
'stdin_open',
|
||||
'stop_signal',
|
||||
'tty',
|
||||
|
@ -992,6 +1060,7 @@ RUN_HOST_CONFIG_KWARGS = [
|
|||
'cap_add',
|
||||
'cap_drop',
|
||||
'cgroup_parent',
|
||||
'cgroupns',
|
||||
'cpu_count',
|
||||
'cpu_percent',
|
||||
'cpu_period',
|
||||
|
@ -1075,8 +1144,17 @@ def _create_container_args(kwargs):
|
|||
host_config_kwargs['binds'] = volumes
|
||||
|
||||
network = kwargs.pop('network', None)
|
||||
networking_config = kwargs.pop('networking_config', None)
|
||||
if network:
|
||||
create_kwargs['networking_config'] = {network: None}
|
||||
if networking_config:
|
||||
# Sanity check: check if the network is defined in the
|
||||
# networking config dict, otherwise switch to None
|
||||
if network not in networking_config:
|
||||
networking_config = None
|
||||
|
||||
create_kwargs['networking_config'] = NetworkingConfig(
|
||||
networking_config
|
||||
) if networking_config else {network: None}
|
||||
host_config_kwargs['network_mode'] = network
|
||||
|
||||
# All kwargs should have been consumed by this point, so raise
|
||||
|
@ -1109,8 +1187,10 @@ def _host_volume_from_bind(bind):
|
|||
bits = rest.split(':', 1)
|
||||
if len(bits) == 1 or bits[1] in ('ro', 'rw'):
|
||||
return drive + bits[0]
|
||||
elif bits[1].endswith(':ro') or bits[1].endswith(':rw'):
|
||||
return bits[1][:-3]
|
||||
else:
|
||||
return bits[1].rstrip(':ro').rstrip(':rw')
|
||||
return bits[1]
|
||||
|
||||
|
||||
ExecResult = namedtuple('ExecResult', 'exit_code,output')
|
||||
|
|
|
@ -2,8 +2,6 @@ import itertools
|
|||
import re
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
from ..api import APIClient
|
||||
from ..constants import DEFAULT_DATA_CHUNK_SIZE
|
||||
from ..errors import BuildError, ImageLoadError, InvalidArgument
|
||||
|
@ -17,7 +15,8 @@ class Image(Model):
|
|||
An image on the server.
|
||||
"""
|
||||
def __repr__(self):
|
||||
return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
|
||||
tag_str = "', '".join(self.tags)
|
||||
return f"<{self.__class__.__name__}: '{tag_str}'>"
|
||||
|
||||
@property
|
||||
def labels(self):
|
||||
|
@ -30,12 +29,12 @@ class Image(Model):
|
|||
@property
|
||||
def short_id(self):
|
||||
"""
|
||||
The ID of the image truncated to 10 characters, plus the ``sha256:``
|
||||
The ID of the image truncated to 12 characters, plus the ``sha256:``
|
||||
prefix.
|
||||
"""
|
||||
if self.id.startswith('sha256:'):
|
||||
return self.id[:17]
|
||||
return self.id[:10]
|
||||
return self.id[:19]
|
||||
return self.id[:12]
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
|
@ -52,7 +51,7 @@ class Image(Model):
|
|||
Show the history of an image.
|
||||
|
||||
Returns:
|
||||
(str): The history of the image.
|
||||
(list): The history of the image.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
|
@ -60,6 +59,24 @@ class Image(Model):
|
|||
"""
|
||||
return self.client.api.history(self.id)
|
||||
|
||||
def remove(self, force=False, noprune=False):
|
||||
"""
|
||||
Remove this image.
|
||||
|
||||
Args:
|
||||
force (bool): Force removal of the image
|
||||
noprune (bool): Do not delete untagged parents
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self.client.api.remove_image(
|
||||
self.id,
|
||||
force=force,
|
||||
noprune=noprune,
|
||||
)
|
||||
|
||||
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
|
||||
"""
|
||||
Get a tarball of an image. Similar to the ``docker save`` command.
|
||||
|
@ -84,19 +101,19 @@ class Image(Model):
|
|||
|
||||
Example:
|
||||
|
||||
>>> image = cli.get_image("busybox:latest")
|
||||
>>> image = cli.images.get("busybox:latest")
|
||||
>>> f = open('/tmp/busybox-latest.tar', 'wb')
|
||||
>>> for chunk in image:
|
||||
>>> for chunk in image.save():
|
||||
>>> f.write(chunk)
|
||||
>>> f.close()
|
||||
"""
|
||||
img = self.id
|
||||
if named:
|
||||
img = self.tags[0] if self.tags else img
|
||||
if isinstance(named, six.string_types):
|
||||
if isinstance(named, str):
|
||||
if named not in self.tags:
|
||||
raise InvalidArgument(
|
||||
"{} is not a valid tag for this image".format(named)
|
||||
f"{named} is not a valid tag for this image"
|
||||
)
|
||||
img = named
|
||||
|
||||
|
@ -127,7 +144,7 @@ class RegistryData(Model):
|
|||
Image metadata stored on the registry, including available platforms.
|
||||
"""
|
||||
def __init__(self, image_name, *args, **kwargs):
|
||||
super(RegistryData, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.image_name = image_name
|
||||
|
||||
@property
|
||||
|
@ -140,10 +157,10 @@ class RegistryData(Model):
|
|||
@property
|
||||
def short_id(self):
|
||||
"""
|
||||
The ID of the image truncated to 10 characters, plus the ``sha256:``
|
||||
The ID of the image truncated to 12 characters, plus the ``sha256:``
|
||||
prefix.
|
||||
"""
|
||||
return self.id[:17]
|
||||
return self.id[:19]
|
||||
|
||||
def pull(self, platform=None):
|
||||
"""
|
||||
|
@ -180,7 +197,7 @@ class RegistryData(Model):
|
|||
parts = platform.split('/')
|
||||
if len(parts) > 3 or len(parts) < 1:
|
||||
raise InvalidArgument(
|
||||
'"{0}" is not a valid platform descriptor'.format(platform)
|
||||
f'"{platform}" is not a valid platform descriptor'
|
||||
)
|
||||
platform = {'os': parts[0]}
|
||||
if len(parts) > 2:
|
||||
|
@ -205,10 +222,10 @@ class ImageCollection(Collection):
|
|||
Build an image and return it. Similar to the ``docker build``
|
||||
command. Either ``path`` or ``fileobj`` must be set.
|
||||
|
||||
If you have a tar file for the Docker build context (including a
|
||||
Dockerfile) already, pass a readable file-like object to ``fileobj``
|
||||
and also pass ``custom_context=True``. If the stream is compressed
|
||||
also, set ``encoding`` to the correct value (e.g ``gzip``).
|
||||
If you already have a tar file for the Docker build context (including
|
||||
a Dockerfile), pass a readable file-like object to ``fileobj``
|
||||
and also pass ``custom_context=True``. If the stream is also
|
||||
compressed, set ``encoding`` to the correct value (e.g ``gzip``).
|
||||
|
||||
If you want to get the raw output of the build, use the
|
||||
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
|
||||
|
@ -265,7 +282,7 @@ class ImageCollection(Collection):
|
|||
|
||||
Returns:
|
||||
(tuple): The first item is the :py:class:`Image` object for the
|
||||
image that was build. The second item is a generator of the
|
||||
image that was built. The second item is a generator of the
|
||||
build logs as JSON-decoded objects.
|
||||
|
||||
Raises:
|
||||
|
@ -277,7 +294,7 @@ class ImageCollection(Collection):
|
|||
If neither ``path`` nor ``fileobj`` is specified.
|
||||
"""
|
||||
resp = self.client.api.build(**kwargs)
|
||||
if isinstance(resp, six.string_types):
|
||||
if isinstance(resp, str):
|
||||
return self.get(resp)
|
||||
last_event = None
|
||||
image_id = None
|
||||
|
@ -390,8 +407,8 @@ class ImageCollection(Collection):
|
|||
if match:
|
||||
image_id = match.group(2)
|
||||
images.append(image_id)
|
||||
if 'error' in chunk:
|
||||
raise ImageLoadError(chunk['error'])
|
||||
if 'errorDetail' in chunk:
|
||||
raise ImageLoadError(chunk['errorDetail']['message'])
|
||||
|
||||
return [self.get(i) for i in images]
|
||||
|
||||
|
@ -439,7 +456,8 @@ class ImageCollection(Collection):
|
|||
if 'stream' in kwargs:
|
||||
warnings.warn(
|
||||
'`stream` is not a valid parameter for this method'
|
||||
' and will be overridden'
|
||||
' and will be overridden',
|
||||
stacklevel=1,
|
||||
)
|
||||
del kwargs['stream']
|
||||
|
||||
|
@ -452,9 +470,8 @@ class ImageCollection(Collection):
|
|||
# to be pulled.
|
||||
pass
|
||||
if not all_tags:
|
||||
return self.get('{0}{2}{1}'.format(
|
||||
repository, tag, '@' if tag.startswith('sha256:') else ':'
|
||||
))
|
||||
sep = '@' if tag.startswith('sha256:') else ':'
|
||||
return self.get(f'{repository}{sep}{tag}')
|
||||
return self.list(repository)
|
||||
|
||||
def push(self, repository, tag=None, **kwargs):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from ..api import APIClient
|
||||
from ..utils import version_gte
|
||||
from .containers import Container
|
||||
from .resource import Model, Collection
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Network(Model):
|
||||
|
@ -184,7 +184,7 @@ class NetworkCollection(Collection):
|
|||
|
||||
def list(self, *args, **kwargs):
|
||||
"""
|
||||
List networks. Similar to the ``docker networks ls`` command.
|
||||
List networks. Similar to the ``docker network ls`` command.
|
||||
|
||||
Args:
|
||||
names (:py:class:`list`): List of names to filter by.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from .resource import Model, Collection
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Node(Model):
|
||||
|
|
|
@ -7,7 +7,7 @@ class Plugin(Model):
|
|||
A plugin on the server.
|
||||
"""
|
||||
def __repr__(self):
|
||||
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
|
||||
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
|
@ -44,16 +44,19 @@ class Plugin(Model):
|
|||
self.client.api.configure_plugin(self.name, options)
|
||||
self.reload()
|
||||
|
||||
def disable(self):
|
||||
def disable(self, force=False):
|
||||
"""
|
||||
Disable the plugin.
|
||||
|
||||
Args:
|
||||
force (bool): Force disable. Default: False
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
self.client.api.disable_plugin(self.name)
|
||||
self.client.api.disable_plugin(self.name, force)
|
||||
self.reload()
|
||||
|
||||
def enable(self, timeout=0):
|
||||
|
@ -117,8 +120,11 @@ class Plugin(Model):
|
|||
if remote is None:
|
||||
remote = self.name
|
||||
privileges = self.client.api.plugin_privileges(remote)
|
||||
for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
|
||||
yield d
|
||||
yield from self.client.api.upgrade_plugin(
|
||||
self.name,
|
||||
remote,
|
||||
privileges,
|
||||
)
|
||||
self.reload()
|
||||
|
||||
|
||||
|
@ -181,7 +187,7 @@ class PluginCollection(Collection):
|
|||
"""
|
||||
privileges = self.client.api.plugin_privileges(remote_name)
|
||||
it = self.client.api.pull_plugin(remote_name, privileges, local_name)
|
||||
for data in it:
|
||||
for _data in it:
|
||||
pass
|
||||
return self.get(local_name or remote_name)
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
|
||||
class Model(object):
|
||||
class Model:
|
||||
"""
|
||||
A base class for representing a single object on the server.
|
||||
"""
|
||||
|
@ -18,13 +17,13 @@ class Model(object):
|
|||
self.attrs = {}
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.short_id)
|
||||
return f"<{self.__class__.__name__}: {self.short_id}>"
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.id == other.id
|
||||
|
||||
def __hash__(self):
|
||||
return hash("%s:%s" % (self.__class__.__name__, self.id))
|
||||
return hash(f"{self.__class__.__name__}:{self.id}")
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
|
@ -36,9 +35,9 @@ class Model(object):
|
|||
@property
|
||||
def short_id(self):
|
||||
"""
|
||||
The ID of the object, truncated to 10 characters.
|
||||
The ID of the object, truncated to 12 characters.
|
||||
"""
|
||||
return self.id[:10]
|
||||
return self.id[:12]
|
||||
|
||||
def reload(self):
|
||||
"""
|
||||
|
@ -49,7 +48,7 @@ class Model(object):
|
|||
self.attrs = new_model.attrs
|
||||
|
||||
|
||||
class Collection(object):
|
||||
class Collection:
|
||||
"""
|
||||
A base class for representing all objects of a particular type on the
|
||||
server.
|
||||
|
@ -65,9 +64,10 @@ class Collection(object):
|
|||
|
||||
def __call__(self, *args, **kwargs):
|
||||
raise TypeError(
|
||||
"'{}' object is not callable. You might be trying to use the old "
|
||||
"(pre-2.0) API - use docker.APIClient if so."
|
||||
.format(self.__class__.__name__))
|
||||
f"'{self.__class__.__name__}' object is not callable. "
|
||||
"You might be trying to use the old (pre-2.0) API - "
|
||||
"use docker.APIClient if so."
|
||||
)
|
||||
|
||||
def list(self):
|
||||
raise NotImplementedError
|
||||
|
@ -89,5 +89,4 @@ class Collection(object):
|
|||
elif isinstance(attrs, dict):
|
||||
return self.model(attrs=attrs, client=self.client, collection=self)
|
||||
else:
|
||||
raise Exception("Can't create %s from %s" %
|
||||
(self.model.__name__, attrs))
|
||||
raise Exception(f"Can't create {self.model.__name__} from {attrs}")
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from ..api import APIClient
|
||||
from .resource import Model, Collection
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Secret(Model):
|
||||
|
@ -7,7 +7,7 @@ class Secret(Model):
|
|||
id_attribute = 'ID'
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
|
||||
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
|
@ -30,6 +30,7 @@ class SecretCollection(Collection):
|
|||
|
||||
def create(self, **kwargs):
|
||||
obj = self.client.api.create_secret(**kwargs)
|
||||
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
|
||||
return self.prepare_model(obj)
|
||||
create.__doc__ = APIClient.create_secret.__doc__
|
||||
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import copy
|
||||
from docker.errors import create_unexpected_kwargs_error, InvalidArgument
|
||||
from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode
|
||||
from .resource import Model, Collection
|
||||
|
||||
from docker.errors import InvalidArgument, create_unexpected_kwargs_error
|
||||
from docker.types import ContainerSpec, Placement, ServiceMode, TaskTemplate
|
||||
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Service(Model):
|
||||
|
@ -157,6 +159,8 @@ class ServiceCollection(Collection):
|
|||
constraints.
|
||||
preferences (list of tuple): :py:class:`~docker.types.Placement`
|
||||
preferences.
|
||||
maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
|
||||
or (int) representing maximum number of replicas per node.
|
||||
platforms (list of tuple): A list of platform constraints
|
||||
expressed as ``(arch, os)`` tuples.
|
||||
container_labels (dict): Labels to apply to the container.
|
||||
|
@ -211,6 +215,12 @@ class ServiceCollection(Collection):
|
|||
to the service.
|
||||
privileges (Privileges): Security options for the service's
|
||||
containers.
|
||||
cap_add (:py:class:`list`): A list of kernel capabilities to add to
|
||||
the default set for the container.
|
||||
cap_drop (:py:class:`list`): A list of kernel capabilities to drop
|
||||
from the default set for the container.
|
||||
sysctls (:py:class:`dict`): A dict of sysctl values to add to the
|
||||
container
|
||||
|
||||
Returns:
|
||||
:py:class:`Service`: The created service.
|
||||
|
@ -258,6 +268,8 @@ class ServiceCollection(Collection):
|
|||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id``, ``name`` , ``label`` and ``mode``.
|
||||
Default: ``None``.
|
||||
status (bool): Include the service task count of running and
|
||||
desired tasks. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
list of :py:class:`Service`: The services.
|
||||
|
@ -275,6 +287,8 @@ class ServiceCollection(Collection):
|
|||
# kwargs to copy straight over to ContainerSpec
|
||||
CONTAINER_SPEC_KWARGS = [
|
||||
'args',
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
'command',
|
||||
'configs',
|
||||
'dns_config',
|
||||
|
@ -297,6 +311,7 @@ CONTAINER_SPEC_KWARGS = [
|
|||
'tty',
|
||||
'user',
|
||||
'workdir',
|
||||
'sysctls',
|
||||
]
|
||||
|
||||
# kwargs to copy straight over to TaskTemplate
|
||||
|
@ -312,6 +327,7 @@ CREATE_SERVICE_KWARGS = [
|
|||
'labels',
|
||||
'mode',
|
||||
'update_config',
|
||||
'rollback_config',
|
||||
'endpoint_spec',
|
||||
]
|
||||
|
||||
|
@ -319,6 +335,7 @@ PLACEMENT_KWARGS = [
|
|||
'constraints',
|
||||
'preferences',
|
||||
'platforms',
|
||||
'maxreplicas',
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from docker.api import APIClient
|
||||
from docker.errors import APIError
|
||||
|
||||
from .resource import Model
|
||||
|
||||
|
||||
|
@ -11,7 +12,7 @@ class Swarm(Model):
|
|||
id_attribute = 'ID'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Swarm, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
if self.client:
|
||||
try:
|
||||
self.reload()
|
||||
|
@ -35,7 +36,8 @@ class Swarm(Model):
|
|||
|
||||
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, default_addr_pool=None,
|
||||
subnet_size=None, data_path_addr=None, **kwargs):
|
||||
subnet_size=None, data_path_addr=None, data_path_port=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Initialize a new swarm on this Engine.
|
||||
|
||||
|
@ -65,6 +67,9 @@ class Swarm(Model):
|
|||
networks created from the default subnet pool. Default: None
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
data_path_port (int): Port number to use for data path traffic.
|
||||
Acceptable port range is 1024 to 49151. If set to ``None`` or
|
||||
0, the default port 4789 will be used. Default: None
|
||||
task_history_retention_limit (int): Maximum number of tasks
|
||||
history stored.
|
||||
snapshot_interval (int): Number of logs entries between snapshot.
|
||||
|
@ -121,6 +126,7 @@ class Swarm(Model):
|
|||
'default_addr_pool': default_addr_pool,
|
||||
'subnet_size': subnet_size,
|
||||
'data_path_addr': data_path_addr,
|
||||
'data_path_port': data_path_port,
|
||||
}
|
||||
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
|
||||
node_id = self.client.api.init_swarm(**init_kwargs)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from ..api import APIClient
|
||||
from .resource import Model, Collection
|
||||
from .resource import Collection, Model
|
||||
|
||||
|
||||
class Volume(Model):
|
||||
|
|
|
@ -1,67 +1,30 @@
|
|||
import os
|
||||
import ssl
|
||||
|
||||
from . import errors
|
||||
from .transport import SSLHTTPAdapter
|
||||
|
||||
|
||||
class TLSConfig(object):
|
||||
class TLSConfig:
|
||||
"""
|
||||
TLS configuration.
|
||||
|
||||
Args:
|
||||
client_cert (tuple of str): Path to client cert, path to client key.
|
||||
ca_cert (str): Path to CA cert file.
|
||||
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||
file.
|
||||
ssl_version (int): A valid `SSL version`_.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
verify (bool or str): This can be a bool or a path to a CA cert
|
||||
file to verify against. If ``True``, verify using ca_cert;
|
||||
if ``False`` or not specified, do not verify.
|
||||
"""
|
||||
cert = None
|
||||
ca_cert = None
|
||||
verify = None
|
||||
ssl_version = None
|
||||
|
||||
def __init__(self, client_cert=None, ca_cert=None, verify=None,
|
||||
ssl_version=None, assert_hostname=None,
|
||||
assert_fingerprint=None):
|
||||
def __init__(self, client_cert=None, ca_cert=None, verify=None):
|
||||
# Argument compatibility/mapping with
|
||||
# https://docs.docker.com/engine/articles/https/
|
||||
# This diverges from the Docker CLI in that users can specify 'tls'
|
||||
# here, but also disable any public/default CA pool verification by
|
||||
# leaving verify=False
|
||||
|
||||
self.assert_hostname = assert_hostname
|
||||
self.assert_fingerprint = assert_fingerprint
|
||||
|
||||
# TODO(dperny): according to the python docs, PROTOCOL_TLSvWhatever is
|
||||
# depcreated, and it's recommended to use OPT_NO_TLSvWhatever instead
|
||||
# to exclude versions. But I think that might require a bigger
|
||||
# architectural change, so I've opted not to pursue it at this time
|
||||
|
||||
# If the user provides an SSL version, we should use their preference
|
||||
if ssl_version:
|
||||
self.ssl_version = ssl_version
|
||||
else:
|
||||
# If the user provides no ssl version, we should default to
|
||||
# TLSv1_2. This option is the most secure, and will work for the
|
||||
# majority of users with reasonably up-to-date software. However,
|
||||
# before doing so, detect openssl version to ensure we can support
|
||||
# it.
|
||||
if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
|
||||
ssl, 'PROTOCOL_TLSv1_2'):
|
||||
# If the OpenSSL version is high enough to support TLSv1_2,
|
||||
# then we should use it.
|
||||
self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
|
||||
else:
|
||||
# Otherwise, TLS v1.0 seems to be the safest default;
|
||||
# SSLv23 fails in mysterious ways:
|
||||
# https://github.com/docker/docker-py/issues/963
|
||||
self.ssl_version = ssl.PROTOCOL_TLSv1
|
||||
|
||||
# "client_cert" must have both or neither cert/key files. In
|
||||
# either case, Alert the user when both are expected, but any are
|
||||
# missing.
|
||||
|
@ -73,7 +36,7 @@ class TLSConfig(object):
|
|||
raise errors.TLSParameterError(
|
||||
'client_cert must be a tuple of'
|
||||
' (client certificate, key file)'
|
||||
)
|
||||
) from None
|
||||
|
||||
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
|
||||
not os.path.isfile(tls_key)):
|
||||
|
@ -95,8 +58,6 @@ class TLSConfig(object):
|
|||
"""
|
||||
Configure a client with these TLS options.
|
||||
"""
|
||||
client.ssl_version = self.ssl_version
|
||||
|
||||
if self.verify and self.ca_cert:
|
||||
client.verify = self.ca_cert
|
||||
else:
|
||||
|
@ -104,9 +65,3 @@ class TLSConfig(object):
|
|||
|
||||
if self.cert:
|
||||
client.cert = self.cert
|
||||
|
||||
client.mount('https://', SSLHTTPAdapter(
|
||||
ssl_version=self.ssl_version,
|
||||
assert_hostname=self.assert_hostname,
|
||||
assert_fingerprint=self.assert_fingerprint,
|
||||
))
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# flake8: noqa
|
||||
from .unixconn import UnixHTTPAdapter
|
||||
from .ssladapter import SSLHTTPAdapter
|
||||
|
||||
try:
|
||||
from .npipeconn import NpipeHTTPAdapter
|
||||
from .npipesocket import NpipeSocket
|
||||
|
|
|
@ -3,6 +3,11 @@ import requests.adapters
|
|||
|
||||
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
|
||||
def close(self):
|
||||
super(BaseHTTPAdapter, self).close()
|
||||
super().close()
|
||||
if hasattr(self, 'pools'):
|
||||
self.pools.clear()
|
||||
|
||||
# Fix for requests 2.32.2+:
|
||||
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
|
||||
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
|
||||
return self.get_connection(request.url, proxies)
|
||||
|
|
|
@ -1,26 +1,19 @@
|
|||
import six
|
||||
import queue
|
||||
|
||||
import requests.adapters
|
||||
import urllib3
|
||||
import urllib3.connection
|
||||
|
||||
from docker.transport.basehttpadapter import BaseHTTPAdapter
|
||||
from .. import constants
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .npipesocket import NpipeSocket
|
||||
|
||||
if six.PY3:
|
||||
import http.client as httplib
|
||||
else:
|
||||
import httplib
|
||||
|
||||
try:
|
||||
import requests.packages.urllib3 as urllib3
|
||||
except ImportError:
|
||||
import urllib3
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class NpipeHTTPConnection(httplib.HTTPConnection, object):
|
||||
class NpipeHTTPConnection(urllib3.connection.HTTPConnection):
|
||||
def __init__(self, npipe_path, timeout=60):
|
||||
super(NpipeHTTPConnection, self).__init__(
|
||||
super().__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.npipe_path = npipe_path
|
||||
|
@ -35,7 +28,7 @@ class NpipeHTTPConnection(httplib.HTTPConnection, object):
|
|||
|
||||
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, npipe_path, timeout=60, maxsize=10):
|
||||
super(NpipeHTTPConnectionPool, self).__init__(
|
||||
super().__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.npipe_path = npipe_path
|
||||
|
@ -53,18 +46,17 @@ class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
except AttributeError as ae: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
|
||||
|
||||
except AttributeError: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
||||
|
||||
except six.moves.queue.Empty:
|
||||
except queue.Empty:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more "
|
||||
"connections are allowed."
|
||||
)
|
||||
pass # Oh well, we'll create a new connection then
|
||||
) from None
|
||||
# Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
@ -85,7 +77,7 @@ class NpipeHTTPAdapter(BaseHTTPAdapter):
|
|||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(NpipeHTTPAdapter, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
with self.pools.lock:
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import functools
|
||||
import time
|
||||
import io
|
||||
import time
|
||||
|
||||
import six
|
||||
import pywintypes
|
||||
import win32api
|
||||
import win32event
|
||||
import win32file
|
||||
import win32pipe
|
||||
|
||||
|
@ -24,7 +26,7 @@ def check_closed(f):
|
|||
return wrapped
|
||||
|
||||
|
||||
class NpipeSocket(object):
|
||||
class NpipeSocket:
|
||||
""" Partial implementation of the socket API over windows named pipes.
|
||||
This implementation is only designed to be used as a client socket,
|
||||
and server-specific methods (bind, listen, accept...) are not
|
||||
|
@ -55,7 +57,9 @@ class NpipeSocket(object):
|
|||
0,
|
||||
None,
|
||||
win32file.OPEN_EXISTING,
|
||||
cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
|
||||
(cSECURITY_ANONYMOUS
|
||||
| cSECURITY_SQOS_PRESENT
|
||||
| win32file.FILE_FLAG_OVERLAPPED),
|
||||
0
|
||||
)
|
||||
except win32pipe.error as e:
|
||||
|
@ -128,29 +132,41 @@ class NpipeSocket(object):
|
|||
|
||||
@check_closed
|
||||
def recv_into(self, buf, nbytes=0):
|
||||
if six.PY2:
|
||||
return self._recv_into_py2(buf, nbytes)
|
||||
|
||||
readbuf = buf
|
||||
if not isinstance(buf, memoryview):
|
||||
readbuf = memoryview(buf)
|
||||
|
||||
err, data = win32file.ReadFile(
|
||||
self._handle,
|
||||
readbuf[:nbytes] if nbytes else readbuf
|
||||
)
|
||||
return len(data)
|
||||
|
||||
def _recv_into_py2(self, buf, nbytes):
|
||||
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
|
||||
n = len(data)
|
||||
buf[:n] = data
|
||||
return n
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
err, data = win32file.ReadFile(
|
||||
self._handle,
|
||||
readbuf[:nbytes] if nbytes else readbuf,
|
||||
overlapped
|
||||
)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def send(self, string, flags=0):
|
||||
err, nbytes = win32file.WriteFile(self._handle, string)
|
||||
return nbytes
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
win32file.WriteFile(self._handle, string, overlapped)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def sendall(self, string, flags=0):
|
||||
|
@ -169,15 +185,12 @@ class NpipeSocket(object):
|
|||
def settimeout(self, value):
|
||||
if value is None:
|
||||
# Blocking mode
|
||||
self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
|
||||
self._timeout = win32event.INFINITE
|
||||
elif not isinstance(value, (float, int)) or value < 0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
elif value == 0:
|
||||
# Non-blocking mode
|
||||
self._timeout = win32pipe.NMPWAIT_NO_WAIT
|
||||
else:
|
||||
# Timeout mode - Value converted to milliseconds
|
||||
self._timeout = value * 1000
|
||||
self._timeout = int(value * 1000)
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
@ -195,7 +208,7 @@ class NpipeFileIOBase(io.RawIOBase):
|
|||
self.sock = npipe_socket
|
||||
|
||||
def close(self):
|
||||
super(NpipeFileIOBase, self).close()
|
||||
super().close()
|
||||
self.sock = None
|
||||
|
||||
def fileno(self):
|
||||
|
|
|
@ -1,53 +1,64 @@
|
|||
import paramiko
|
||||
import requests.adapters
|
||||
import six
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import urllib.parse
|
||||
|
||||
import paramiko
|
||||
import requests.adapters
|
||||
import urllib3
|
||||
import urllib3.connection
|
||||
|
||||
from docker.transport.basehttpadapter import BaseHTTPAdapter
|
||||
from .. import constants
|
||||
|
||||
if six.PY3:
|
||||
import http.client as httplib
|
||||
else:
|
||||
import httplib
|
||||
|
||||
try:
|
||||
import requests.packages.urllib3 as urllib3
|
||||
except ImportError:
|
||||
import urllib3
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class SSHSocket(socket.socket):
|
||||
def __init__(self, host):
|
||||
super(SSHSocket, self).__init__(
|
||||
super().__init__(
|
||||
socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.host = host
|
||||
self.port = None
|
||||
if ':' in host:
|
||||
self.host, self.port = host.split(':')
|
||||
self.user = None
|
||||
if ':' in self.host:
|
||||
self.host, self.port = self.host.split(':')
|
||||
if '@' in self.host:
|
||||
self.user, self.host = self.host.split('@')
|
||||
|
||||
self.proc = None
|
||||
|
||||
def connect(self, **kwargs):
|
||||
port = '' if not self.port else '-p {}'.format(self.port)
|
||||
args = [
|
||||
'ssh',
|
||||
'-q',
|
||||
self.host,
|
||||
port,
|
||||
'docker system dial-stdio'
|
||||
]
|
||||
args = ['ssh']
|
||||
if self.user:
|
||||
args = args + ['-l', self.user]
|
||||
|
||||
if self.port:
|
||||
args = args + ['-p', self.port]
|
||||
|
||||
args = args + ['--', self.host, 'docker system dial-stdio']
|
||||
|
||||
preexec_func = None
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
def f():
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
preexec_func = f
|
||||
|
||||
env = dict(os.environ)
|
||||
|
||||
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
|
||||
env.pop('LD_LIBRARY_PATH', None)
|
||||
env.pop('SSL_CERT_FILE', None)
|
||||
|
||||
self.proc = subprocess.Popen(
|
||||
' '.join(args),
|
||||
shell=True,
|
||||
args,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
|
||||
preexec_fn=preexec_func)
|
||||
|
||||
def _write(self, data):
|
||||
if not self.proc or self.proc.stdin.closed:
|
||||
|
@ -72,8 +83,7 @@ class SSHSocket(socket.socket):
|
|||
def makefile(self, mode):
|
||||
if not self.proc:
|
||||
self.connect()
|
||||
if six.PY3:
|
||||
self.proc.stdout.channel = self
|
||||
self.proc.stdout.channel = self
|
||||
|
||||
return self.proc.stdout
|
||||
|
||||
|
@ -85,9 +95,9 @@ class SSHSocket(socket.socket):
|
|||
self.proc.terminate()
|
||||
|
||||
|
||||
class SSHConnection(httplib.HTTPConnection, object):
|
||||
class SSHConnection(urllib3.connection.HTTPConnection):
|
||||
def __init__(self, ssh_transport=None, timeout=60, host=None):
|
||||
super(SSHConnection, self).__init__(
|
||||
super().__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.ssh_transport = ssh_transport
|
||||
|
@ -111,7 +121,7 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|||
scheme = 'ssh'
|
||||
|
||||
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
|
||||
super(SSHConnectionPool, self).__init__(
|
||||
super().__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.ssh_transport = None
|
||||
|
@ -119,9 +129,6 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|||
if ssh_client:
|
||||
self.ssh_transport = ssh_client.get_transport()
|
||||
self.ssh_host = host
|
||||
self.ssh_port = None
|
||||
if ':' in host:
|
||||
self.ssh_host, self.ssh_port = host.split(':')
|
||||
|
||||
def _new_conn(self):
|
||||
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
|
||||
|
@ -134,17 +141,17 @@ class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
|||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
|
||||
except AttributeError as ae: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
|
||||
|
||||
except six.moves.queue.Empty:
|
||||
except queue.Empty:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more "
|
||||
"connections are allowed."
|
||||
)
|
||||
pass # Oh well, we'll create a new connection then
|
||||
) from None
|
||||
# Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
@ -158,24 +165,27 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
|
|||
def __init__(self, base_url, timeout=60,
|
||||
pool_connections=constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
|
||||
shell_out=True):
|
||||
shell_out=False):
|
||||
self.ssh_client = None
|
||||
if not shell_out:
|
||||
self._create_paramiko_client(base_url)
|
||||
self._connect()
|
||||
|
||||
self.ssh_host = base_url.lstrip('ssh://')
|
||||
self.ssh_host = base_url
|
||||
if base_url.startswith('ssh://'):
|
||||
self.ssh_host = base_url[len('ssh://'):]
|
||||
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(SSHHTTPAdapter, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def _create_paramiko_client(self, base_url):
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
self.ssh_client = paramiko.SSHClient()
|
||||
base_url = six.moves.urllib_parse.urlparse(base_url)
|
||||
base_url = urllib.parse.urlparse(base_url)
|
||||
self.ssh_params = {
|
||||
"hostname": base_url.hostname,
|
||||
"port": base_url.port,
|
||||
|
@ -187,20 +197,21 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
|
|||
with open(ssh_config_file) as f:
|
||||
conf.parse(f)
|
||||
host_config = conf.lookup(base_url.hostname)
|
||||
self.ssh_conf = host_config
|
||||
if 'proxycommand' in host_config:
|
||||
self.ssh_params["sock"] = paramiko.ProxyCommand(
|
||||
self.ssh_conf['proxycommand']
|
||||
host_config['proxycommand']
|
||||
)
|
||||
if 'hostname' in host_config:
|
||||
self.ssh_params['hostname'] = host_config['hostname']
|
||||
if base_url.port is None and 'port' in host_config:
|
||||
self.ssh_params['port'] = self.ssh_conf['port']
|
||||
self.ssh_params['port'] = host_config['port']
|
||||
if base_url.username is None and 'user' in host_config:
|
||||
self.ssh_params['username'] = self.ssh_conf['user']
|
||||
self.ssh_params['username'] = host_config['user']
|
||||
if 'identityfile' in host_config:
|
||||
self.ssh_params['key_filename'] = host_config['identityfile']
|
||||
|
||||
self.ssh_client.load_system_host_keys()
|
||||
self.ssh_client.set_missing_host_key_policy(paramiko.WarningPolicy())
|
||||
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
|
||||
|
||||
def _connect(self):
|
||||
if self.ssh_client:
|
||||
|
@ -234,6 +245,6 @@ class SSHHTTPAdapter(BaseHTTPAdapter):
|
|||
return pool
|
||||
|
||||
def close(self):
|
||||
super(SSHHTTPAdapter, self).close()
|
||||
super().close()
|
||||
if self.ssh_client:
|
||||
self.ssh_client.close()
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
""" Resolves OpenSSL issues in some servers:
|
||||
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||
https://github.com/kennethreitz/requests/pull/799
|
||||
"""
|
||||
import sys
|
||||
|
||||
from distutils.version import StrictVersion
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
from docker.transport.basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
try:
|
||||
import requests.packages.urllib3 as urllib3
|
||||
except ImportError:
|
||||
import urllib3
|
||||
|
||||
|
||||
PoolManager = urllib3.poolmanager.PoolManager
|
||||
|
||||
# Monkey-patching match_hostname with a version that supports
|
||||
# IP-address checking. Not necessary for Python 3.5 and above
|
||||
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
|
||||
from backports.ssl_match_hostname import match_hostname
|
||||
urllib3.connection.match_hostname = match_hostname
|
||||
|
||||
|
||||
class SSLHTTPAdapter(BaseHTTPAdapter):
|
||||
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
|
||||
'assert_hostname',
|
||||
'ssl_version']
|
||||
|
||||
def __init__(self, ssl_version=None, assert_hostname=None,
|
||||
assert_fingerprint=None, **kwargs):
|
||||
self.ssl_version = ssl_version
|
||||
self.assert_hostname = assert_hostname
|
||||
self.assert_fingerprint = assert_fingerprint
|
||||
super(SSLHTTPAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
kwargs = {
|
||||
'num_pools': connections,
|
||||
'maxsize': maxsize,
|
||||
'block': block,
|
||||
'assert_hostname': self.assert_hostname,
|
||||
'assert_fingerprint': self.assert_fingerprint,
|
||||
}
|
||||
if self.ssl_version and self.can_override_ssl_version():
|
||||
kwargs['ssl_version'] = self.ssl_version
|
||||
|
||||
self.poolmanager = PoolManager(**kwargs)
|
||||
|
||||
def get_connection(self, *args, **kwargs):
|
||||
"""
|
||||
Ensure assert_hostname is set correctly on our pool
|
||||
|
||||
We already take care of a normal poolmanager via init_poolmanager
|
||||
|
||||
But we still need to take care of when there is a proxy poolmanager
|
||||
"""
|
||||
conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
|
||||
if conn.assert_hostname != self.assert_hostname:
|
||||
conn.assert_hostname = self.assert_hostname
|
||||
return conn
|
||||
|
||||
def can_override_ssl_version(self):
|
||||
urllib_ver = urllib3.__version__.split('-')[0]
|
||||
if urllib_ver is None:
|
||||
return False
|
||||
if urllib_ver == 'dev':
|
||||
return True
|
||||
return StrictVersion(urllib_ver) > StrictVersion('1.5')
|
|
@ -1,41 +1,24 @@
|
|||
import six
|
||||
import requests.adapters
|
||||
import socket
|
||||
from six.moves import http_client as httplib
|
||||
|
||||
from docker.transport.basehttpadapter import BaseHTTPAdapter
|
||||
import requests.adapters
|
||||
import urllib3
|
||||
import urllib3.connection
|
||||
|
||||
from .. import constants
|
||||
|
||||
try:
|
||||
import requests.packages.urllib3 as urllib3
|
||||
except ImportError:
|
||||
import urllib3
|
||||
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class UnixHTTPResponse(httplib.HTTPResponse, object):
|
||||
def __init__(self, sock, *args, **kwargs):
|
||||
disable_buffering = kwargs.pop('disable_buffering', False)
|
||||
if six.PY2:
|
||||
# FIXME: We may need to disable buffering on Py3 as well,
|
||||
# but there's no clear way to do it at the moment. See:
|
||||
# https://github.com/docker/docker-py/issues/1799
|
||||
kwargs['buffering'] = not disable_buffering
|
||||
super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnection(httplib.HTTPConnection, object):
|
||||
class UnixHTTPConnection(urllib3.connection.HTTPConnection):
|
||||
|
||||
def __init__(self, base_url, unix_socket, timeout=60):
|
||||
super(UnixHTTPConnection, self).__init__(
|
||||
super().__init__(
|
||||
'localhost', timeout=timeout
|
||||
)
|
||||
self.base_url = base_url
|
||||
self.unix_socket = unix_socket
|
||||
self.timeout = timeout
|
||||
self.disable_buffering = False
|
||||
|
||||
def connect(self):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
|
@ -43,21 +26,10 @@ class UnixHTTPConnection(httplib.HTTPConnection, object):
|
|||
sock.connect(self.unix_socket)
|
||||
self.sock = sock
|
||||
|
||||
def putheader(self, header, *values):
|
||||
super(UnixHTTPConnection, self).putheader(header, *values)
|
||||
if header == 'Connection' and 'Upgrade' in values:
|
||||
self.disable_buffering = True
|
||||
|
||||
def response_class(self, sock, *args, **kwargs):
|
||||
if self.disable_buffering:
|
||||
kwargs['disable_buffering'] = True
|
||||
|
||||
return UnixHTTPResponse(sock, *args, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
|
||||
super(UnixHTTPConnectionPool, self).__init__(
|
||||
super().__init__(
|
||||
'localhost', timeout=timeout, maxsize=maxsize
|
||||
)
|
||||
self.base_url = base_url
|
||||
|
@ -82,14 +54,14 @@ class UnixHTTPAdapter(BaseHTTPAdapter):
|
|||
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
|
||||
socket_path = socket_url.replace('http+unix://', '')
|
||||
if not socket_path.startswith('/'):
|
||||
socket_path = '/' + socket_path
|
||||
socket_path = f"/{socket_path}"
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super(UnixHTTPAdapter, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
with self.pools.lock:
|
||||
|
|
|
@ -1,14 +1,24 @@
|
|||
# flake8: noqa
|
||||
from .containers import (
|
||||
ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
|
||||
)
|
||||
from .containers import ContainerConfig, DeviceRequest, HostConfig, LogConfig, Ulimit
|
||||
from .daemon import CancellableStream
|
||||
from .healthcheck import Healthcheck
|
||||
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
|
||||
from .services import (
|
||||
ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
|
||||
Mount, Placement, PlacementPreference, Privileges, Resources,
|
||||
RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate,
|
||||
UpdateConfig, NetworkAttachmentConfig
|
||||
ConfigReference,
|
||||
ContainerSpec,
|
||||
DNSConfig,
|
||||
DriverConfig,
|
||||
EndpointSpec,
|
||||
Mount,
|
||||
NetworkAttachmentConfig,
|
||||
Placement,
|
||||
PlacementPreference,
|
||||
Privileges,
|
||||
Resources,
|
||||
RestartPolicy,
|
||||
RollbackConfig,
|
||||
SecretReference,
|
||||
ServiceMode,
|
||||
TaskTemplate,
|
||||
UpdateConfig,
|
||||
)
|
||||
from .swarm import SwarmSpec, SwarmExternalCA
|
||||
from .swarm import SwarmExternalCA, SwarmSpec
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
import six
|
||||
|
||||
|
||||
class DictType(dict):
|
||||
def __init__(self, init):
|
||||
for k, v in six.iteritems(init):
|
||||
for k, v in init.items():
|
||||
self[k] = v
|
||||
|
|
|
@ -1,16 +1,22 @@
|
|||
import six
|
||||
|
||||
from .. import errors
|
||||
from ..utils.utils import (
|
||||
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
|
||||
format_environment, format_extra_hosts, normalize_links, parse_bytes,
|
||||
parse_devices, split_command, version_gte, version_lt,
|
||||
convert_port_bindings,
|
||||
convert_tmpfs_mounts,
|
||||
convert_volume_binds,
|
||||
format_environment,
|
||||
format_extra_hosts,
|
||||
normalize_links,
|
||||
parse_bytes,
|
||||
parse_devices,
|
||||
split_command,
|
||||
version_gte,
|
||||
version_lt,
|
||||
)
|
||||
from .base import DictType
|
||||
from .healthcheck import Healthcheck
|
||||
|
||||
|
||||
class LogConfigTypesEnum(object):
|
||||
class LogConfigTypesEnum:
|
||||
_values = (
|
||||
'json-file',
|
||||
'syslog',
|
||||
|
@ -50,8 +56,11 @@ class LogConfig(DictType):
|
|||
>>> container = client.create_container('busybox', 'true',
|
||||
... host_config=hc)
|
||||
>>> client.inspect_container(container)['HostConfig']['LogConfig']
|
||||
{'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}}
|
||||
""" # noqa: E501
|
||||
{
|
||||
'Type': 'json-file',
|
||||
'Config': {'labels': 'production_status,geo', 'max-size': '1g'}
|
||||
}
|
||||
"""
|
||||
types = LogConfigTypesEnum
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
@ -61,7 +70,7 @@ class LogConfig(DictType):
|
|||
if config and not isinstance(config, dict):
|
||||
raise ValueError("LogConfig.config must be a dictionary")
|
||||
|
||||
super(LogConfig, self).__init__({
|
||||
super().__init__({
|
||||
'Type': log_driver_type,
|
||||
'Config': config
|
||||
})
|
||||
|
@ -117,13 +126,13 @@ class Ulimit(DictType):
|
|||
name = kwargs.get('name', kwargs.get('Name'))
|
||||
soft = kwargs.get('soft', kwargs.get('Soft'))
|
||||
hard = kwargs.get('hard', kwargs.get('Hard'))
|
||||
if not isinstance(name, six.string_types):
|
||||
if not isinstance(name, str):
|
||||
raise ValueError("Ulimit.name must be a string")
|
||||
if soft and not isinstance(soft, int):
|
||||
raise ValueError("Ulimit.soft must be an integer")
|
||||
if hard and not isinstance(hard, int):
|
||||
raise ValueError("Ulimit.hard must be an integer")
|
||||
super(Ulimit, self).__init__({
|
||||
super().__init__({
|
||||
'Name': name,
|
||||
'Soft': soft,
|
||||
'Hard': hard
|
||||
|
@ -184,7 +193,7 @@ class DeviceRequest(DictType):
|
|||
|
||||
if driver is None:
|
||||
driver = ''
|
||||
elif not isinstance(driver, six.string_types):
|
||||
elif not isinstance(driver, str):
|
||||
raise ValueError('DeviceRequest.driver must be a string')
|
||||
if count is None:
|
||||
count = 0
|
||||
|
@ -203,7 +212,7 @@ class DeviceRequest(DictType):
|
|||
elif not isinstance(options, dict):
|
||||
raise ValueError('DeviceRequest.options must be a dict')
|
||||
|
||||
super(DeviceRequest, self).__init__({
|
||||
super().__init__({
|
||||
'Driver': driver,
|
||||
'Count': count,
|
||||
'DeviceIDs': device_ids,
|
||||
|
@ -274,7 +283,8 @@ class HostConfig(dict):
|
|||
volume_driver=None, cpu_count=None, cpu_percent=None,
|
||||
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
|
||||
cpu_rt_period=None, cpu_rt_runtime=None,
|
||||
device_cgroup_rules=None, device_requests=None):
|
||||
device_cgroup_rules=None, device_requests=None,
|
||||
cgroupns=None):
|
||||
|
||||
if mem_limit is not None:
|
||||
self['Memory'] = parse_bytes(mem_limit)
|
||||
|
@ -297,7 +307,7 @@ class HostConfig(dict):
|
|||
self['MemorySwappiness'] = mem_swappiness
|
||||
|
||||
if shm_size is not None:
|
||||
if isinstance(shm_size, six.string_types):
|
||||
if isinstance(shm_size, str):
|
||||
shm_size = parse_bytes(shm_size)
|
||||
|
||||
self['ShmSize'] = shm_size
|
||||
|
@ -358,7 +368,7 @@ class HostConfig(dict):
|
|||
self['Devices'] = parse_devices(devices)
|
||||
|
||||
if group_add:
|
||||
self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
|
||||
self['GroupAdd'] = [str(grp) for grp in group_add]
|
||||
|
||||
if dns is not None:
|
||||
self['Dns'] = dns
|
||||
|
@ -378,11 +388,11 @@ class HostConfig(dict):
|
|||
if not isinstance(sysctls, dict):
|
||||
raise host_config_type_error('sysctls', sysctls, 'dict')
|
||||
self['Sysctls'] = {}
|
||||
for k, v in six.iteritems(sysctls):
|
||||
self['Sysctls'][k] = six.text_type(v)
|
||||
for k, v in sysctls.items():
|
||||
self['Sysctls'][k] = str(v)
|
||||
|
||||
if volumes_from is not None:
|
||||
if isinstance(volumes_from, six.string_types):
|
||||
if isinstance(volumes_from, str):
|
||||
volumes_from = volumes_from.split(',')
|
||||
|
||||
self['VolumesFrom'] = volumes_from
|
||||
|
@ -404,7 +414,7 @@ class HostConfig(dict):
|
|||
|
||||
if isinstance(lxc_conf, dict):
|
||||
formatted = []
|
||||
for k, v in six.iteritems(lxc_conf):
|
||||
for k, v in lxc_conf.items():
|
||||
formatted.append({'Key': k, 'Value': str(v)})
|
||||
lxc_conf = formatted
|
||||
|
||||
|
@ -559,7 +569,7 @@ class HostConfig(dict):
|
|||
self["PidsLimit"] = pids_limit
|
||||
|
||||
if isolation:
|
||||
if not isinstance(isolation, six.string_types):
|
||||
if not isinstance(isolation, str):
|
||||
raise host_config_type_error('isolation', isolation, 'string')
|
||||
if version_lt(version, '1.24'):
|
||||
raise host_config_version_error('isolation', '1.24')
|
||||
|
@ -609,7 +619,7 @@ class HostConfig(dict):
|
|||
self['CpuPercent'] = cpu_percent
|
||||
|
||||
if nano_cpus:
|
||||
if not isinstance(nano_cpus, six.integer_types):
|
||||
if not isinstance(nano_cpus, int):
|
||||
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
|
||||
if version_lt(version, '1.25'):
|
||||
raise host_config_version_error('nano_cpus', '1.25')
|
||||
|
@ -648,27 +658,30 @@ class HostConfig(dict):
|
|||
req = DeviceRequest(**req)
|
||||
self['DeviceRequests'].append(req)
|
||||
|
||||
if cgroupns:
|
||||
self['CgroupnsMode'] = cgroupns
|
||||
|
||||
|
||||
def host_config_type_error(param, param_value, expected):
|
||||
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
|
||||
return TypeError(error_msg.format(param, expected, type(param_value)))
|
||||
return TypeError(
|
||||
f'Invalid type for {param} param: expected {expected} '
|
||||
f'but found {type(param_value)}'
|
||||
)
|
||||
|
||||
|
||||
def host_config_version_error(param, version, less_than=True):
|
||||
operator = '<' if less_than else '>'
|
||||
error_msg = '{0} param is not supported in API versions {1} {2}'
|
||||
return errors.InvalidVersion(error_msg.format(param, operator, version))
|
||||
|
||||
return errors.InvalidVersion(
|
||||
f'{param} param is not supported in API versions {operator} {version}',
|
||||
)
|
||||
|
||||
def host_config_value_error(param, param_value):
|
||||
error_msg = 'Invalid value for {0} param: {1}'
|
||||
return ValueError(error_msg.format(param, param_value))
|
||||
return ValueError(f'Invalid value for {param} param: {param_value}')
|
||||
|
||||
|
||||
def host_config_incompatible_error(param, param_value, incompatible_param):
|
||||
error_msg = '\"{1}\" {0} is incompatible with {2}'
|
||||
return errors.InvalidArgument(
|
||||
error_msg.format(param, param_value, incompatible_param)
|
||||
f'\"{param_value}\" {param} is incompatible with {incompatible_param}'
|
||||
)
|
||||
|
||||
|
||||
|
@ -699,17 +712,17 @@ class ContainerConfig(dict):
|
|||
'version 1.29'
|
||||
)
|
||||
|
||||
if isinstance(command, six.string_types):
|
||||
if isinstance(command, str):
|
||||
command = split_command(command)
|
||||
|
||||
if isinstance(entrypoint, six.string_types):
|
||||
if isinstance(entrypoint, str):
|
||||
entrypoint = split_command(entrypoint)
|
||||
|
||||
if isinstance(environment, dict):
|
||||
environment = format_environment(environment)
|
||||
|
||||
if isinstance(labels, list):
|
||||
labels = dict((lbl, six.text_type('')) for lbl in labels)
|
||||
labels = {lbl: '' for lbl in labels}
|
||||
|
||||
if isinstance(ports, list):
|
||||
exposed_ports = {}
|
||||
|
@ -720,10 +733,10 @@ class ContainerConfig(dict):
|
|||
if len(port_definition) == 2:
|
||||
proto = port_definition[1]
|
||||
port = port_definition[0]
|
||||
exposed_ports['{0}/{1}'.format(port, proto)] = {}
|
||||
exposed_ports[f'{port}/{proto}'] = {}
|
||||
ports = exposed_ports
|
||||
|
||||
if isinstance(volumes, six.string_types):
|
||||
if isinstance(volumes, str):
|
||||
volumes = [volumes, ]
|
||||
|
||||
if isinstance(volumes, list):
|
||||
|
@ -752,7 +765,7 @@ class ContainerConfig(dict):
|
|||
'Hostname': hostname,
|
||||
'Domainname': domainname,
|
||||
'ExposedPorts': ports,
|
||||
'User': six.text_type(user) if user is not None else None,
|
||||
'User': str(user) if user is not None else None,
|
||||
'Tty': tty,
|
||||
'OpenStdin': stdin_open,
|
||||
'StdinOnce': stdin_once,
|
||||
|
|
|
@ -1,14 +1,11 @@
|
|||
import socket
|
||||
|
||||
try:
|
||||
import requests.packages.urllib3 as urllib3
|
||||
except ImportError:
|
||||
import urllib3
|
||||
import urllib3
|
||||
|
||||
from ..errors import DockerException
|
||||
|
||||
|
||||
class CancellableStream(object):
|
||||
class CancellableStream:
|
||||
"""
|
||||
Stream wrapper for real-time events, logs, etc. from the server.
|
||||
|
||||
|
@ -31,9 +28,9 @@ class CancellableStream(object):
|
|||
try:
|
||||
return next(self._stream)
|
||||
except urllib3.exceptions.ProtocolError:
|
||||
raise StopIteration
|
||||
except socket.error:
|
||||
raise StopIteration
|
||||
raise StopIteration from None
|
||||
except OSError:
|
||||
raise StopIteration from None
|
||||
|
||||
next = __next__
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
from .base import DictType
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class Healthcheck(DictType):
|
||||
"""
|
||||
|
@ -31,7 +29,7 @@ class Healthcheck(DictType):
|
|||
"""
|
||||
def __init__(self, **kwargs):
|
||||
test = kwargs.get('test', kwargs.get('Test'))
|
||||
if isinstance(test, six.string_types):
|
||||
if isinstance(test, str):
|
||||
test = ["CMD-SHELL", test]
|
||||
|
||||
interval = kwargs.get('interval', kwargs.get('Interval'))
|
||||
|
@ -39,7 +37,7 @@ class Healthcheck(DictType):
|
|||
retries = kwargs.get('retries', kwargs.get('Retries'))
|
||||
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
|
||||
|
||||
super(Healthcheck, self).__init__({
|
||||
super().__init__({
|
||||
'Test': test,
|
||||
'Interval': interval,
|
||||
'Timeout': timeout,
|
||||
|
@ -53,7 +51,7 @@ class Healthcheck(DictType):
|
|||
|
||||
@test.setter
|
||||
def test(self, value):
|
||||
if isinstance(value, six.string_types):
|
||||
if isinstance(value, str):
|
||||
value = ["CMD-SHELL", value]
|
||||
self['Test'] = value
|
||||
|
||||
|
|
|
@ -4,7 +4,8 @@ from ..utils import normalize_links, version_lt
|
|||
|
||||
class EndpointConfig(dict):
|
||||
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
|
||||
ipv6_address=None, link_local_ips=None, driver_opt=None):
|
||||
ipv6_address=None, link_local_ips=None, driver_opt=None,
|
||||
mac_address=None):
|
||||
if version_lt(version, '1.22'):
|
||||
raise errors.InvalidVersion(
|
||||
'Endpoint config is not supported for API version < 1.22'
|
||||
|
@ -23,6 +24,13 @@ class EndpointConfig(dict):
|
|||
if ipv6_address:
|
||||
ipam_config['IPv6Address'] = ipv6_address
|
||||
|
||||
if mac_address:
|
||||
if version_lt(version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'mac_address is not supported for API version < 1.25'
|
||||
)
|
||||
self['MacAddress'] = mac_address
|
||||
|
||||
if link_local_ips is not None:
|
||||
if version_lt(version, '1.24'):
|
||||
raise errors.InvalidVersion(
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
import six
|
||||
|
||||
from .. import errors
|
||||
from ..constants import IS_WINDOWS_PLATFORM
|
||||
from ..utils import (
|
||||
check_resource, format_environment, format_extra_hosts, parse_bytes,
|
||||
split_command, convert_service_networks,
|
||||
check_resource,
|
||||
convert_service_networks,
|
||||
format_environment,
|
||||
format_extra_hosts,
|
||||
parse_bytes,
|
||||
split_command,
|
||||
)
|
||||
|
||||
|
||||
|
@ -31,6 +33,7 @@ class TaskTemplate(dict):
|
|||
force_update (int): A counter that triggers an update even if no
|
||||
relevant parameters have been changed.
|
||||
"""
|
||||
|
||||
def __init__(self, container_spec, resources=None, restart_policy=None,
|
||||
placement=None, log_driver=None, networks=None,
|
||||
force_update=None):
|
||||
|
@ -112,16 +115,24 @@ class ContainerSpec(dict):
|
|||
containers. Only used for Windows containers.
|
||||
init (boolean): Run an init inside the container that forwards signals
|
||||
and reaps processes.
|
||||
cap_add (:py:class:`list`): A list of kernel capabilities to add to the
|
||||
default set for the container.
|
||||
cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
|
||||
the default set for the container.
|
||||
sysctls (:py:class:`dict`): A dict of sysctl values to add to
|
||||
the container
|
||||
"""
|
||||
|
||||
def __init__(self, image, command=None, args=None, hostname=None, env=None,
|
||||
workdir=None, user=None, labels=None, mounts=None,
|
||||
stop_grace_period=None, secrets=None, tty=None, groups=None,
|
||||
open_stdin=None, read_only=None, stop_signal=None,
|
||||
healthcheck=None, hosts=None, dns_config=None, configs=None,
|
||||
privileges=None, isolation=None, init=None):
|
||||
privileges=None, isolation=None, init=None, cap_add=None,
|
||||
cap_drop=None, sysctls=None):
|
||||
self['Image'] = image
|
||||
|
||||
if isinstance(command, six.string_types):
|
||||
if isinstance(command, str):
|
||||
command = split_command(command)
|
||||
self['Command'] = command
|
||||
self['Args'] = args
|
||||
|
@ -151,7 +162,7 @@ class ContainerSpec(dict):
|
|||
if mounts is not None:
|
||||
parsed_mounts = []
|
||||
for mount in mounts:
|
||||
if isinstance(mount, six.string_types):
|
||||
if isinstance(mount, str):
|
||||
parsed_mounts.append(Mount.parse_mount_string(mount))
|
||||
else:
|
||||
# If mount already parsed
|
||||
|
@ -188,6 +199,24 @@ class ContainerSpec(dict):
|
|||
if init is not None:
|
||||
self['Init'] = init
|
||||
|
||||
if cap_add is not None:
|
||||
if not isinstance(cap_add, list):
|
||||
raise TypeError('cap_add must be a list')
|
||||
|
||||
self['CapabilityAdd'] = cap_add
|
||||
|
||||
if cap_drop is not None:
|
||||
if not isinstance(cap_drop, list):
|
||||
raise TypeError('cap_drop must be a list')
|
||||
|
||||
self['CapabilityDrop'] = cap_drop
|
||||
|
||||
if sysctls is not None:
|
||||
if not isinstance(sysctls, dict):
|
||||
raise TypeError('sysctls must be a dict')
|
||||
|
||||
self['Sysctls'] = sysctls
|
||||
|
||||
|
||||
class Mount(dict):
|
||||
"""
|
||||
|
@ -213,18 +242,20 @@ class Mount(dict):
|
|||
for the ``volume`` type.
|
||||
driver_config (DriverConfig): Volume driver configuration. Only valid
|
||||
for the ``volume`` type.
|
||||
subpath (str): Path inside a volume to mount instead of the volume root.
|
||||
tmpfs_size (int or string): The size for the tmpfs mount in bytes.
|
||||
tmpfs_mode (int): The permission mode for the tmpfs mount.
|
||||
"""
|
||||
|
||||
def __init__(self, target, source, type='volume', read_only=False,
|
||||
consistency=None, propagation=None, no_copy=False,
|
||||
labels=None, driver_config=None, tmpfs_size=None,
|
||||
tmpfs_mode=None):
|
||||
tmpfs_mode=None, subpath=None):
|
||||
self['Target'] = target
|
||||
self['Source'] = source
|
||||
if type not in ('bind', 'volume', 'tmpfs', 'npipe'):
|
||||
raise errors.InvalidArgument(
|
||||
'Unsupported mount type: "{}"'.format(type)
|
||||
f'Unsupported mount type: "{type}"'
|
||||
)
|
||||
self['Type'] = type
|
||||
self['ReadOnly'] = read_only
|
||||
|
@ -237,7 +268,7 @@ class Mount(dict):
|
|||
self['BindOptions'] = {
|
||||
'Propagation': propagation
|
||||
}
|
||||
if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode]):
|
||||
if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode, subpath]):
|
||||
raise errors.InvalidArgument(
|
||||
'Incompatible options have been provided for the bind '
|
||||
'type mount.'
|
||||
|
@ -250,6 +281,8 @@ class Mount(dict):
|
|||
volume_opts['Labels'] = labels
|
||||
if driver_config:
|
||||
volume_opts['DriverConfig'] = driver_config
|
||||
if subpath:
|
||||
volume_opts['Subpath'] = subpath
|
||||
if volume_opts:
|
||||
self['VolumeOptions'] = volume_opts
|
||||
if any([propagation, tmpfs_size, tmpfs_mode]):
|
||||
|
@ -260,7 +293,7 @@ class Mount(dict):
|
|||
elif type == 'tmpfs':
|
||||
tmpfs_opts = {}
|
||||
if tmpfs_mode:
|
||||
if not isinstance(tmpfs_mode, six.integer_types):
|
||||
if not isinstance(tmpfs_mode, int):
|
||||
raise errors.InvalidArgument(
|
||||
'tmpfs_mode must be an integer'
|
||||
)
|
||||
|
@ -280,7 +313,7 @@ class Mount(dict):
|
|||
parts = string.split(':')
|
||||
if len(parts) > 3:
|
||||
raise errors.InvalidArgument(
|
||||
'Invalid mount format "{0}"'.format(string)
|
||||
f'Invalid mount format "{string}"'
|
||||
)
|
||||
if len(parts) == 1:
|
||||
return cls(target=parts[0], source=None)
|
||||
|
@ -316,6 +349,7 @@ class Resources(dict):
|
|||
``{ resource_name: resource_value }``. Alternatively, a list of
|
||||
of resource specifications as defined by the Engine API.
|
||||
"""
|
||||
|
||||
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
|
||||
mem_reservation=None, generic_resources=None):
|
||||
limits = {}
|
||||
|
@ -343,20 +377,20 @@ def _convert_generic_resources_dict(generic_resources):
|
|||
return generic_resources
|
||||
if not isinstance(generic_resources, dict):
|
||||
raise errors.InvalidArgument(
|
||||
'generic_resources must be a dict or a list'
|
||||
' (found {})'.format(type(generic_resources))
|
||||
'generic_resources must be a dict or a list '
|
||||
f'(found {type(generic_resources)})'
|
||||
)
|
||||
resources = []
|
||||
for kind, value in six.iteritems(generic_resources):
|
||||
for kind, value in generic_resources.items():
|
||||
resource_type = None
|
||||
if isinstance(value, int):
|
||||
resource_type = 'DiscreteResourceSpec'
|
||||
elif isinstance(value, str):
|
||||
resource_type = 'NamedResourceSpec'
|
||||
else:
|
||||
kv = {kind: value}
|
||||
raise errors.InvalidArgument(
|
||||
'Unsupported generic resource reservation '
|
||||
'type: {}'.format({kind: value})
|
||||
f'Unsupported generic resource reservation type: {kv}'
|
||||
)
|
||||
resources.append({
|
||||
resource_type: {'Kind': kind, 'Value': value}
|
||||
|
@ -384,8 +418,9 @@ class UpdateConfig(dict):
|
|||
an update before the failure action is invoked, specified as a
|
||||
floating point number between 0 and 1. Default: 0
|
||||
order (string): Specifies the order of operations when rolling out an
|
||||
updated task. Either ``start_first`` or ``stop_first`` are accepted.
|
||||
updated task. Either ``start-first`` or ``stop-first`` are accepted.
|
||||
"""
|
||||
|
||||
def __init__(self, parallelism=0, delay=None, failure_action='continue',
|
||||
monitor=None, max_failure_ratio=None, order=None):
|
||||
self['Parallelism'] = parallelism
|
||||
|
@ -421,7 +456,8 @@ class UpdateConfig(dict):
|
|||
|
||||
class RollbackConfig(UpdateConfig):
|
||||
"""
|
||||
Used to specify the way containe rollbacks should be performed by a service
|
||||
Used to specify the way container rollbacks should be performed by a
|
||||
service
|
||||
|
||||
Args:
|
||||
parallelism (int): Maximum number of tasks to be rolled back in one
|
||||
|
@ -437,13 +473,13 @@ class RollbackConfig(UpdateConfig):
|
|||
a rollback before the failure action is invoked, specified as a
|
||||
floating point number between 0 and 1. Default: 0
|
||||
order (string): Specifies the order of operations when rolling out a
|
||||
rolled back task. Either ``start_first`` or ``stop_first`` are
|
||||
rolled back task. Either ``start-first`` or ``stop-first`` are
|
||||
accepted.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class RestartConditionTypesEnum(object):
|
||||
class RestartConditionTypesEnum:
|
||||
_values = (
|
||||
'none',
|
||||
'on-failure',
|
||||
|
@ -474,7 +510,7 @@ class RestartPolicy(dict):
|
|||
max_attempts=0, window=0):
|
||||
if condition not in self.condition_types._values:
|
||||
raise TypeError(
|
||||
'Invalid RestartPolicy condition {0}'.format(condition)
|
||||
f'Invalid RestartPolicy condition {condition}'
|
||||
)
|
||||
|
||||
self['Condition'] = condition
|
||||
|
@ -496,6 +532,7 @@ class DriverConfig(dict):
|
|||
name (string): Name of the driver to use.
|
||||
options (dict): Driver-specific options. Default: ``None``.
|
||||
"""
|
||||
|
||||
def __init__(self, name, options=None):
|
||||
self['Name'] = name
|
||||
if options:
|
||||
|
@ -517,6 +554,7 @@ class EndpointSpec(dict):
|
|||
is ``(target_port [, protocol [, publish_mode]])``.
|
||||
Ports can only be provided if the ``vip`` resolution mode is used.
|
||||
"""
|
||||
|
||||
def __init__(self, mode=None, ports=None):
|
||||
if ports:
|
||||
self['Ports'] = convert_service_ports(ports)
|
||||
|
@ -533,7 +571,7 @@ def convert_service_ports(ports):
|
|||
)
|
||||
|
||||
result = []
|
||||
for k, v in six.iteritems(ports):
|
||||
for k, v in ports.items():
|
||||
port_spec = {
|
||||
'Protocol': 'tcp',
|
||||
'PublishedPort': k
|
||||
|
@ -559,37 +597,70 @@ def convert_service_ports(ports):
|
|||
|
||||
class ServiceMode(dict):
|
||||
"""
|
||||
Indicate whether a service should be deployed as a replicated or global
|
||||
service, and associated parameters
|
||||
Indicate whether a service or a job should be deployed as a replicated
|
||||
or global service, and associated parameters
|
||||
|
||||
Args:
|
||||
mode (string): Can be either ``replicated`` or ``global``
|
||||
mode (string): Can be either ``replicated``, ``global``,
|
||||
``replicated-job`` or ``global-job``
|
||||
replicas (int): Number of replicas. For replicated services only.
|
||||
concurrency (int): Number of concurrent jobs. For replicated job
|
||||
services only.
|
||||
"""
|
||||
def __init__(self, mode, replicas=None):
|
||||
if mode not in ('replicated', 'global'):
|
||||
raise errors.InvalidArgument(
|
||||
'mode must be either "replicated" or "global"'
|
||||
)
|
||||
if mode != 'replicated' and replicas is not None:
|
||||
raise errors.InvalidArgument(
|
||||
'replicas can only be used for replicated mode'
|
||||
)
|
||||
self[mode] = {}
|
||||
if replicas is not None:
|
||||
self[mode]['Replicas'] = replicas
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
if 'global' in self:
|
||||
return 'global'
|
||||
return 'replicated'
|
||||
def __init__(self, mode, replicas=None, concurrency=None):
|
||||
replicated_modes = ('replicated', 'replicated-job')
|
||||
supported_modes = replicated_modes + ('global', 'global-job')
|
||||
|
||||
if mode not in supported_modes:
|
||||
raise errors.InvalidArgument(
|
||||
'mode must be either "replicated", "global", "replicated-job"'
|
||||
' or "global-job"'
|
||||
)
|
||||
|
||||
if mode not in replicated_modes:
|
||||
if replicas is not None:
|
||||
raise errors.InvalidArgument(
|
||||
'replicas can only be used for "replicated" or'
|
||||
' "replicated-job" mode'
|
||||
)
|
||||
|
||||
if concurrency is not None:
|
||||
raise errors.InvalidArgument(
|
||||
'concurrency can only be used for "replicated-job" mode'
|
||||
)
|
||||
|
||||
service_mode = self._convert_mode(mode)
|
||||
self.mode = service_mode
|
||||
self[service_mode] = {}
|
||||
|
||||
if replicas is not None:
|
||||
if mode == 'replicated':
|
||||
self[service_mode]['Replicas'] = replicas
|
||||
|
||||
if mode == 'replicated-job':
|
||||
self[service_mode]['MaxConcurrent'] = concurrency or 1
|
||||
self[service_mode]['TotalCompletions'] = replicas
|
||||
|
||||
@staticmethod
|
||||
def _convert_mode(original_mode):
|
||||
if original_mode == 'global-job':
|
||||
return 'GlobalJob'
|
||||
|
||||
if original_mode == 'replicated-job':
|
||||
return 'ReplicatedJob'
|
||||
|
||||
return original_mode
|
||||
|
||||
@property
|
||||
def replicas(self):
|
||||
if self.mode != 'replicated':
|
||||
return None
|
||||
return self['replicated'].get('Replicas')
|
||||
if 'replicated' in self:
|
||||
return self['replicated'].get('Replicas')
|
||||
|
||||
if 'ReplicatedJob' in self:
|
||||
return self['ReplicatedJob'].get('TotalCompletions')
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class SecretReference(dict):
|
||||
|
@ -659,10 +730,13 @@ class Placement(dict):
|
|||
are provided in order from highest to lowest precedence and
|
||||
are expressed as ``(strategy, descriptor)`` tuples. See
|
||||
:py:class:`PlacementPreference` for details.
|
||||
maxreplicas (int): Maximum number of replicas per node
|
||||
platforms (:py:class:`list` of tuple): A list of platforms
|
||||
expressed as ``(arch, os)`` tuples
|
||||
"""
|
||||
def __init__(self, constraints=None, preferences=None, platforms=None):
|
||||
|
||||
def __init__(self, constraints=None, preferences=None, platforms=None,
|
||||
maxreplicas=None):
|
||||
if constraints is not None:
|
||||
self['Constraints'] = constraints
|
||||
if preferences is not None:
|
||||
|
@ -671,6 +745,8 @@ class Placement(dict):
|
|||
if isinstance(pref, tuple):
|
||||
pref = PlacementPreference(*pref)
|
||||
self['Preferences'].append(pref)
|
||||
if maxreplicas is not None:
|
||||
self['MaxReplicas'] = maxreplicas
|
||||
if platforms:
|
||||
self['Platforms'] = []
|
||||
for plat in platforms:
|
||||
|
@ -691,11 +767,12 @@ class PlacementPreference(dict):
|
|||
the scheduler will try to spread tasks evenly over groups of
|
||||
nodes identified by this label.
|
||||
"""
|
||||
|
||||
def __init__(self, strategy, descriptor):
|
||||
if strategy != 'spread':
|
||||
raise errors.InvalidArgument(
|
||||
'PlacementPreference strategy value is invalid ({}):'
|
||||
' must be "spread".'.format(strategy)
|
||||
f'PlacementPreference strategy value is invalid ({strategy}): '
|
||||
'must be "spread".'
|
||||
)
|
||||
self['Spread'] = {'SpreadDescriptor': descriptor}
|
||||
|
||||
|
@ -712,6 +789,7 @@ class DNSConfig(dict):
|
|||
options (:py:class:`list`): A list of internal resolver variables
|
||||
to be modified (e.g., ``debug``, ``ndots:3``, etc.).
|
||||
"""
|
||||
|
||||
def __init__(self, nameservers=None, search=None, options=None):
|
||||
self['Nameservers'] = nameservers
|
||||
self['Search'] = search
|
||||
|
@ -742,6 +820,7 @@ class Privileges(dict):
|
|||
selinux_type (string): SELinux type label
|
||||
selinux_level (string): SELinux level label
|
||||
"""
|
||||
|
||||
def __init__(self, credentialspec_file=None, credentialspec_registry=None,
|
||||
selinux_disable=None, selinux_user=None, selinux_role=None,
|
||||
selinux_type=None, selinux_level=None):
|
||||
|
@ -784,6 +863,7 @@ class NetworkAttachmentConfig(dict):
|
|||
options (:py:class:`dict`): Driver attachment options for the
|
||||
network target.
|
||||
"""
|
||||
|
||||
def __init__(self, target, aliases=None, options=None):
|
||||
self['Target'] = target
|
||||
self['Aliases'] = aliases
|
||||
|
|
|
@ -1,13 +1,28 @@
|
|||
# flake8: noqa
|
||||
from .build import create_archive, exclude_paths, mkbuildcontext, tar
|
||||
|
||||
from .build import create_archive, exclude_paths, match_tag, mkbuildcontext, tar
|
||||
from .decorators import check_resource, minimum_version, update_headers
|
||||
from .utils import (
|
||||
compare_version, convert_port_bindings, convert_volume_binds,
|
||||
parse_repository_tag, parse_host,
|
||||
kwargs_from_env, convert_filters, datetime_to_timestamp,
|
||||
create_host_config, parse_bytes, parse_env_file, version_lt,
|
||||
version_gte, decode_json_header, split_command, create_ipam_config,
|
||||
create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
|
||||
format_environment, format_extra_hosts
|
||||
compare_version,
|
||||
convert_filters,
|
||||
convert_port_bindings,
|
||||
convert_service_networks,
|
||||
convert_volume_binds,
|
||||
create_host_config,
|
||||
create_ipam_config,
|
||||
create_ipam_pool,
|
||||
datetime_to_timestamp,
|
||||
decode_json_header,
|
||||
format_environment,
|
||||
format_extra_hosts,
|
||||
kwargs_from_env,
|
||||
normalize_links,
|
||||
parse_bytes,
|
||||
parse_devices,
|
||||
parse_env_file,
|
||||
parse_host,
|
||||
parse_repository_tag,
|
||||
split_command,
|
||||
version_gte,
|
||||
version_lt,
|
||||
)
|
||||
|
||||
|
|
|
@ -4,13 +4,19 @@ import re
|
|||
import tarfile
|
||||
import tempfile
|
||||
|
||||
import six
|
||||
|
||||
from .fnmatch import fnmatch
|
||||
from ..constants import IS_WINDOWS_PLATFORM
|
||||
|
||||
from .fnmatch import fnmatch
|
||||
|
||||
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
|
||||
_TAG = re.compile(
|
||||
r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*"
|
||||
r"(?::[0-9]+)?(/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*"
|
||||
r"(:[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})?$"
|
||||
)
|
||||
|
||||
|
||||
def match_tag(tag: str) -> bool:
|
||||
return bool(_TAG.match(tag))
|
||||
|
||||
|
||||
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
|
||||
|
@ -44,7 +50,7 @@ def exclude_paths(root, patterns, dockerfile=None):
|
|||
if dockerfile is None:
|
||||
dockerfile = 'Dockerfile'
|
||||
|
||||
patterns.append('!' + dockerfile)
|
||||
patterns.append(f"!{dockerfile}")
|
||||
pm = PatternMatcher(patterns)
|
||||
return set(pm.walk(root))
|
||||
|
||||
|
@ -69,7 +75,7 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
|
|||
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
|
||||
if files is None:
|
||||
files = build_file_list(root)
|
||||
extra_names = set(e[0] for e in extra_files)
|
||||
extra_names = {e[0] for e in extra_files}
|
||||
for path in files:
|
||||
if path in extra_names:
|
||||
# Extra files override context files with the same name
|
||||
|
@ -95,10 +101,10 @@ def create_archive(root, files=None, fileobj=None, gzip=False,
|
|||
try:
|
||||
with open(full_path, 'rb') as f:
|
||||
t.addfile(i, f)
|
||||
except IOError:
|
||||
raise IOError(
|
||||
'Can not read file in context: {}'.format(full_path)
|
||||
)
|
||||
except OSError as oe:
|
||||
raise OSError(
|
||||
f'Can not read file in context: {full_path}'
|
||||
) from oe
|
||||
else:
|
||||
# Directories, FIFOs, symlinks... don't need to be read.
|
||||
t.addfile(i, None)
|
||||
|
@ -119,12 +125,8 @@ def mkbuildcontext(dockerfile):
|
|||
t = tarfile.open(mode='w', fileobj=f)
|
||||
if isinstance(dockerfile, io.StringIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
if six.PY3:
|
||||
raise TypeError('Please use io.BytesIO to create in-memory '
|
||||
'Dockerfiles with Python 3')
|
||||
else:
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
raise TypeError('Please use io.BytesIO to create in-memory '
|
||||
'Dockerfiles with Python 3')
|
||||
elif isinstance(dockerfile, io.BytesIO):
|
||||
dfinfo = tarfile.TarInfo('Dockerfile')
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
|
@ -154,7 +156,7 @@ def walk(root, patterns, default=True):
|
|||
|
||||
# Heavily based on
|
||||
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
|
||||
class PatternMatcher(object):
|
||||
class PatternMatcher:
|
||||
def __init__(self, patterns):
|
||||
self.patterns = list(filter(
|
||||
lambda p: p.dirs, [Pattern(p) for p in patterns]
|
||||
|
@ -186,7 +188,7 @@ class PatternMatcher(object):
|
|||
fpath = os.path.join(
|
||||
os.path.relpath(current_dir, root), f
|
||||
)
|
||||
if fpath.startswith('.' + os.path.sep):
|
||||
if fpath.startswith(f".{os.path.sep}"):
|
||||
fpath = fpath[2:]
|
||||
match = self.matches(fpath)
|
||||
if not match:
|
||||
|
@ -212,13 +214,12 @@ class PatternMatcher(object):
|
|||
break
|
||||
if skip:
|
||||
continue
|
||||
for sub in rec_walk(cur):
|
||||
yield sub
|
||||
yield from rec_walk(cur)
|
||||
|
||||
return rec_walk(root)
|
||||
|
||||
|
||||
class Pattern(object):
|
||||
class Pattern:
|
||||
def __init__(self, pattern_str):
|
||||
self.exclusion = False
|
||||
if pattern_str.startswith('!'):
|
||||
|
@ -231,6 +232,9 @@ class Pattern(object):
|
|||
@classmethod
|
||||
def normalize(cls, p):
|
||||
|
||||
# Remove trailing spaces
|
||||
p = p.strip()
|
||||
|
||||
# Leading and trailing slashes are not relevant. Yes,
|
||||
# "foo.py/" must exclude the "foo.py" regular file. "."
|
||||
# components are not relevant either, even if the whole
|
||||
|
|
|
@ -18,11 +18,11 @@ def find_config_file(config_path=None):
|
|||
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
|
||||
]))
|
||||
|
||||
log.debug("Trying paths: {0}".format(repr(paths)))
|
||||
log.debug(f"Trying paths: {repr(paths)}")
|
||||
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Found file at path: {0}".format(path))
|
||||
log.debug(f"Found file at path: {path}")
|
||||
return path
|
||||
|
||||
log.debug("No config file found")
|
||||
|
@ -57,7 +57,7 @@ def load_general_config(config_path=None):
|
|||
try:
|
||||
with open(config_file) as f:
|
||||
return json.load(f)
|
||||
except (IOError, ValueError) as e:
|
||||
except (OSError, ValueError) as e:
|
||||
# In the case of a legacy `.dockercfg` file, we won't
|
||||
# be able to load any JSON data.
|
||||
log.debug(e)
|
||||
|
|
|
@ -27,9 +27,7 @@ def minimum_version(version):
|
|||
def wrapper(self, *args, **kwargs):
|
||||
if utils.version_lt(self._version, version):
|
||||
raise errors.InvalidVersion(
|
||||
'{0} is not available for version < {1}'.format(
|
||||
f.__name__, version
|
||||
)
|
||||
f'{f.__name__} is not available for version < {version}',
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
|
|
@ -79,18 +79,18 @@ def translate(pat):
|
|||
i = i + 1
|
||||
if i >= n:
|
||||
# is "**EOF" - to align with .gitignore just accept all
|
||||
res = res + '.*'
|
||||
res = f"{res}.*"
|
||||
else:
|
||||
# is "**"
|
||||
# Note that this allows for any # of /'s (even 0) because
|
||||
# the .* will eat everything, even /'s
|
||||
res = res + '(.*/)?'
|
||||
res = f"{res}(.*/)?"
|
||||
else:
|
||||
# is "*" so map it to anything but "/"
|
||||
res = res + '[^/]*'
|
||||
res = f"{res}[^/]*"
|
||||
elif c == '?':
|
||||
# "?" is any char except "/"
|
||||
res = res + '[^/]'
|
||||
res = f"{res}[^/]"
|
||||
elif c == '[':
|
||||
j = i
|
||||
if j < n and pat[j] == '!':
|
||||
|
@ -100,16 +100,16 @@ def translate(pat):
|
|||
while j < n and pat[j] != ']':
|
||||
j = j + 1
|
||||
if j >= n:
|
||||
res = res + '\\['
|
||||
res = f"{res}\\["
|
||||
else:
|
||||
stuff = pat[i:j].replace('\\', '\\\\')
|
||||
i = j + 1
|
||||
if stuff[0] == '!':
|
||||
stuff = '^' + stuff[1:]
|
||||
stuff = f"^{stuff[1:]}"
|
||||
elif stuff[0] == '^':
|
||||
stuff = '\\' + stuff
|
||||
res = '%s[%s]' % (res, stuff)
|
||||
stuff = f"\\{stuff}"
|
||||
res = f'{res}[{stuff}]'
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
|
||||
return res + '$'
|
||||
return f"{res}$"
|
||||
|
|
|
@ -1,14 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import json.decoder
|
||||
|
||||
import six
|
||||
|
||||
from ..errors import StreamParseError
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
|
||||
|
||||
|
@ -20,7 +14,7 @@ def stream_as_text(stream):
|
|||
instead of byte streams.
|
||||
"""
|
||||
for data in stream:
|
||||
if not isinstance(data, six.text_type):
|
||||
if not isinstance(data, str):
|
||||
data = data.decode('utf-8', 'replace')
|
||||
yield data
|
||||
|
||||
|
@ -46,8 +40,8 @@ def json_stream(stream):
|
|||
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||
|
||||
|
||||
def line_splitter(buffer, separator=u'\n'):
|
||||
index = buffer.find(six.text_type(separator))
|
||||
def line_splitter(buffer, separator='\n'):
|
||||
index = buffer.find(str(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
return buffer[:index + 1], buffer[index + 1:]
|
||||
|
@ -61,7 +55,7 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
|||
of the input.
|
||||
"""
|
||||
splitter = splitter or line_splitter
|
||||
buffered = six.text_type('')
|
||||
buffered = ''
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
|
@ -77,4 +71,4 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
|
|||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
raise StreamParseError(e)
|
||||
raise StreamParseError(e) from e
|
||||
|
|
|
@ -3,7 +3,7 @@ import re
|
|||
PORT_SPEC = re.compile(
|
||||
"^" # Match full string
|
||||
"(" # External part
|
||||
r"((?P<host>[a-fA-F\d.:]+):)?" # Address
|
||||
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
|
||||
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
|
@ -49,7 +49,7 @@ def port_range(start, end, proto, randomly_available_port=False):
|
|||
if not end:
|
||||
return [start + proto]
|
||||
if randomly_available_port:
|
||||
return ['{}-{}'.format(start, end) + proto]
|
||||
return [f"{start}-{end}{proto}"]
|
||||
return [str(port) + proto for port in range(int(start), int(end) + 1)]
|
||||
|
||||
|
||||
|
|
|
@ -69,5 +69,9 @@ class ProxyConfig(dict):
|
|||
return proxy_env + environment
|
||||
|
||||
def __str__(self):
|
||||
return 'ProxyConfig(http={}, https={}, ftp={}, no_proxy={})'.format(
|
||||
self.http, self.https, self.ftp, self.no_proxy)
|
||||
return (
|
||||
'ProxyConfig('
|
||||
f'http={self.http}, https={self.https}, '
|
||||
f'ftp={self.ftp}, no_proxy={self.no_proxy}'
|
||||
')'
|
||||
)
|
||||
|
|
|
@ -4,8 +4,6 @@ import select
|
|||
import socket as pysocket
|
||||
import struct
|
||||
|
||||
import six
|
||||
|
||||
try:
|
||||
from ..transport import NpipeSocket
|
||||
except ImportError:
|
||||
|
@ -20,6 +18,11 @@ class SocketError(Exception):
|
|||
pass
|
||||
|
||||
|
||||
# NpipeSockets have their own error types
|
||||
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
|
||||
NPIPE_ENDED = 109
|
||||
|
||||
|
||||
def read(socket, n=4096):
|
||||
"""
|
||||
Reads at most n bytes from socket
|
||||
|
@ -27,18 +30,33 @@ def read(socket, n=4096):
|
|||
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
if six.PY3 and not isinstance(socket, NpipeSocket):
|
||||
select.select([socket], [], [])
|
||||
if not isinstance(socket, NpipeSocket):
|
||||
if not hasattr(select, "poll"):
|
||||
# Limited to 1024
|
||||
select.select([socket], [], [])
|
||||
else:
|
||||
poll = select.poll()
|
||||
poll.register(socket, select.POLLIN | select.POLLPRI)
|
||||
poll.poll()
|
||||
|
||||
try:
|
||||
if hasattr(socket, 'recv'):
|
||||
return socket.recv(n)
|
||||
if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
|
||||
if isinstance(socket, pysocket.SocketIO):
|
||||
return socket.read(n)
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
except OSError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
except Exception as e:
|
||||
is_pipe_ended = (isinstance(socket, NpipeSocket) and
|
||||
len(e.args) > 0 and
|
||||
e.args[0] == NPIPE_ENDED)
|
||||
if is_pipe_ended:
|
||||
# npipes don't support duplex sockets, so we interpret
|
||||
# a PIPE_ENDED error as a close operation (0-length read).
|
||||
return ''
|
||||
raise
|
||||
|
||||
|
||||
def read_exactly(socket, n):
|
||||
|
@ -46,7 +64,7 @@ def read_exactly(socket, n):
|
|||
Reads exactly n bytes from socket
|
||||
Raises SocketError if there isn't enough data
|
||||
"""
|
||||
data = six.binary_type()
|
||||
data = b""
|
||||
while len(data) < n:
|
||||
next_data = read(socket, n - len(data))
|
||||
if not next_data:
|
||||
|
@ -134,7 +152,7 @@ def consume_socket_output(frames, demux=False):
|
|||
if demux is False:
|
||||
# If the streams are multiplexed, the generator returns strings, that
|
||||
# we just need to concatenate.
|
||||
return six.binary_type().join(frames)
|
||||
return b"".join(frames)
|
||||
|
||||
# If the streams are demultiplexed, the generator yields tuples
|
||||
# (stdout, stderr)
|
||||
|
@ -166,4 +184,4 @@ def demux_adaptor(stream_id, data):
|
|||
elif stream_id == STDERR:
|
||||
return (None, data)
|
||||
else:
|
||||
raise ValueError('{0} is not a valid stream'.format(stream_id))
|
||||
raise ValueError(f'{stream_id} is not a valid stream')
|
||||
|
|
|
@ -1,26 +1,28 @@
|
|||
import base64
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shlex
|
||||
import string
|
||||
from datetime import datetime
|
||||
from distutils.version import StrictVersion
|
||||
|
||||
import six
|
||||
from datetime import datetime, timezone
|
||||
from functools import lru_cache
|
||||
from itertools import zip_longest
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
from .. import errors
|
||||
from .. import tls
|
||||
from ..constants import DEFAULT_HTTP_HOST
|
||||
from ..constants import DEFAULT_UNIX_SOCKET
|
||||
from ..constants import DEFAULT_NPIPE
|
||||
from ..constants import BYTE_UNITS
|
||||
from ..constants import (
|
||||
BYTE_UNITS,
|
||||
DEFAULT_HTTP_HOST,
|
||||
DEFAULT_NPIPE,
|
||||
DEFAULT_UNIX_SOCKET,
|
||||
)
|
||||
from ..tls import TLSConfig
|
||||
|
||||
if six.PY2:
|
||||
from urllib import splitnport
|
||||
from urlparse import urlparse
|
||||
else:
|
||||
from urllib.parse import splitnport, urlparse
|
||||
URLComponents = collections.namedtuple(
|
||||
'URLComponents',
|
||||
'scheme netloc url params query fragment',
|
||||
)
|
||||
|
||||
|
||||
def create_ipam_pool(*args, **kwargs):
|
||||
|
@ -39,11 +41,11 @@ def create_ipam_config(*args, **kwargs):
|
|||
|
||||
def decode_json_header(header):
|
||||
data = base64.b64decode(header)
|
||||
if six.PY3:
|
||||
data = data.decode('utf-8')
|
||||
data = data.decode('utf-8')
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def compare_version(v1, v2):
|
||||
"""Compare docker versions
|
||||
|
||||
|
@ -56,14 +58,20 @@ def compare_version(v1, v2):
|
|||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
if v1 == v2:
|
||||
return 0
|
||||
elif s1 > s2:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
# Split into `sys.version_info` like tuples.
|
||||
s1 = tuple(int(p) for p in v1.split('.'))
|
||||
s2 = tuple(int(p) for p in v2.split('.'))
|
||||
# Compare each component, padding with 0 if necessary.
|
||||
for c1, c2 in zip_longest(s1, s2, fillvalue=0):
|
||||
if c1 == c2:
|
||||
continue
|
||||
elif c1 > c2:
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def version_lt(v1, v2):
|
||||
|
@ -80,7 +88,7 @@ def _convert_port_binding(binding):
|
|||
if len(binding) == 2:
|
||||
result['HostPort'] = binding[1]
|
||||
result['HostIp'] = binding[0]
|
||||
elif isinstance(binding[0], six.string_types):
|
||||
elif isinstance(binding[0], str):
|
||||
result['HostIp'] = binding[0]
|
||||
else:
|
||||
result['HostPort'] = binding[0]
|
||||
|
@ -104,7 +112,7 @@ def _convert_port_binding(binding):
|
|||
|
||||
def convert_port_bindings(port_bindings):
|
||||
result = {}
|
||||
for k, v in six.iteritems(port_bindings):
|
||||
for k, v in iter(port_bindings.items()):
|
||||
key = str(k)
|
||||
if '/' not in key:
|
||||
key += '/tcp'
|
||||
|
@ -121,18 +129,17 @@ def convert_volume_binds(binds):
|
|||
|
||||
result = []
|
||||
for k, v in binds.items():
|
||||
if isinstance(k, six.binary_type):
|
||||
if isinstance(k, bytes):
|
||||
k = k.decode('utf-8')
|
||||
|
||||
if isinstance(v, dict):
|
||||
if 'ro' in v and 'mode' in v:
|
||||
raise ValueError(
|
||||
'Binding cannot contain both "ro" and "mode": {}'
|
||||
.format(repr(v))
|
||||
f'Binding cannot contain both "ro" and "mode": {v!r}'
|
||||
)
|
||||
|
||||
bind = v['bind']
|
||||
if isinstance(bind, six.binary_type):
|
||||
if isinstance(bind, bytes):
|
||||
bind = bind.decode('utf-8')
|
||||
|
||||
if 'ro' in v:
|
||||
|
@ -142,14 +149,30 @@ def convert_volume_binds(binds):
|
|||
else:
|
||||
mode = 'rw'
|
||||
|
||||
# NOTE: this is only relevant for Linux hosts
|
||||
# (doesn't apply in Docker Desktop)
|
||||
propagation_modes = [
|
||||
'rshared',
|
||||
'shared',
|
||||
'rslave',
|
||||
'slave',
|
||||
'rprivate',
|
||||
'private',
|
||||
]
|
||||
if 'propagation' in v and v['propagation'] in propagation_modes:
|
||||
if mode:
|
||||
mode = f"{mode},{v['propagation']}"
|
||||
else:
|
||||
mode = v['propagation']
|
||||
|
||||
result.append(
|
||||
six.text_type('{0}:{1}:{2}').format(k, bind, mode)
|
||||
f'{k}:{bind}:{mode}'
|
||||
)
|
||||
else:
|
||||
if isinstance(v, six.binary_type):
|
||||
if isinstance(v, bytes):
|
||||
v = v.decode('utf-8')
|
||||
result.append(
|
||||
six.text_type('{0}:{1}:rw').format(k, v)
|
||||
f'{k}:{v}:rw'
|
||||
)
|
||||
return result
|
||||
|
||||
|
@ -160,13 +183,13 @@ def convert_tmpfs_mounts(tmpfs):
|
|||
|
||||
if not isinstance(tmpfs, list):
|
||||
raise ValueError(
|
||||
'Expected tmpfs value to be either a list or a dict, found: {}'
|
||||
.format(type(tmpfs).__name__)
|
||||
'Expected tmpfs value to be either a list or a dict, '
|
||||
f'found: {type(tmpfs).__name__}'
|
||||
)
|
||||
|
||||
result = {}
|
||||
for mount in tmpfs:
|
||||
if isinstance(mount, six.string_types):
|
||||
if isinstance(mount, str):
|
||||
if ":" in mount:
|
||||
name, options = mount.split(":", 1)
|
||||
else:
|
||||
|
@ -175,8 +198,8 @@ def convert_tmpfs_mounts(tmpfs):
|
|||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected item in tmpfs list to be a string, found: {}"
|
||||
.format(type(mount).__name__)
|
||||
"Expected item in tmpfs list to be a string, "
|
||||
f"found: {type(mount).__name__}"
|
||||
)
|
||||
|
||||
result[name] = options
|
||||
|
@ -191,7 +214,7 @@ def convert_service_networks(networks):
|
|||
|
||||
result = []
|
||||
for n in networks:
|
||||
if isinstance(n, six.string_types):
|
||||
if isinstance(n, str):
|
||||
n = {'Target': n}
|
||||
result.append(n)
|
||||
return result
|
||||
|
@ -208,10 +231,6 @@ def parse_repository_tag(repo_name):
|
|||
|
||||
|
||||
def parse_host(addr, is_win32=False, tls=False):
|
||||
path = ''
|
||||
port = None
|
||||
host = None
|
||||
|
||||
# Sensible defaults
|
||||
if not addr and is_win32:
|
||||
return DEFAULT_NPIPE
|
||||
|
@ -222,9 +241,9 @@ def parse_host(addr, is_win32=False, tls=False):
|
|||
|
||||
parsed_url = urlparse(addr)
|
||||
proto = parsed_url.scheme
|
||||
if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
|
||||
if not proto or any(x not in f"{string.ascii_letters}+" for x in proto):
|
||||
# https://bugs.python.org/issue754016
|
||||
parsed_url = urlparse('//' + addr, 'tcp')
|
||||
parsed_url = urlparse(f"//{addr}", 'tcp')
|
||||
proto = 'tcp'
|
||||
|
||||
if proto == 'fd':
|
||||
|
@ -240,14 +259,14 @@ def parse_host(addr, is_win32=False, tls=False):
|
|||
|
||||
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
|
||||
raise errors.DockerException(
|
||||
"Invalid bind address protocol: {}".format(addr)
|
||||
f"Invalid bind address protocol: {addr}"
|
||||
)
|
||||
|
||||
if proto == 'tcp' and not parsed_url.netloc:
|
||||
# "tcp://" is exceptionally disallowed by convention;
|
||||
# omitting a hostname for other protocols is fine
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: {}'.format(addr)
|
||||
f'Invalid bind address format: {addr}'
|
||||
)
|
||||
|
||||
if any([
|
||||
|
@ -255,45 +274,51 @@ def parse_host(addr, is_win32=False, tls=False):
|
|||
parsed_url.password
|
||||
]):
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: {}'.format(addr)
|
||||
f'Invalid bind address format: {addr}'
|
||||
)
|
||||
|
||||
if parsed_url.path and proto == 'ssh':
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: no path allowed for this protocol:'
|
||||
' {}'.format(addr)
|
||||
f'Invalid bind address format: no path allowed for this protocol: {addr}'
|
||||
)
|
||||
else:
|
||||
path = parsed_url.path
|
||||
if proto == 'unix' and parsed_url.hostname is not None:
|
||||
# For legacy reasons, we consider unix://path
|
||||
# to be valid and equivalent to unix:///path
|
||||
path = '/'.join((parsed_url.hostname, path))
|
||||
path = f"{parsed_url.hostname}/{path}"
|
||||
|
||||
netloc = parsed_url.netloc
|
||||
if proto in ('tcp', 'ssh'):
|
||||
# parsed_url.hostname strips brackets from IPv6 addresses,
|
||||
# which can be problematic hence our use of splitnport() instead.
|
||||
host, port = splitnport(parsed_url.netloc)
|
||||
if port is None or port < 0:
|
||||
port = parsed_url.port or 0
|
||||
if port <= 0:
|
||||
if proto != 'ssh':
|
||||
raise errors.DockerException(
|
||||
'Invalid bind address format: port is required:'
|
||||
' {}'.format(addr)
|
||||
f'Invalid bind address format: port is required: {addr}'
|
||||
)
|
||||
port = 22
|
||||
netloc = f'{parsed_url.netloc}:{port}'
|
||||
|
||||
if not host:
|
||||
host = DEFAULT_HTTP_HOST
|
||||
if not parsed_url.hostname:
|
||||
netloc = f'{DEFAULT_HTTP_HOST}:{port}'
|
||||
|
||||
# Rewrite schemes to fit library internals (requests adapters)
|
||||
if proto == 'tcp':
|
||||
proto = 'http{}'.format('s' if tls else '')
|
||||
proto = f"http{'s' if tls else ''}"
|
||||
elif proto == 'unix':
|
||||
proto = 'http+unix'
|
||||
|
||||
if proto in ('http+unix', 'npipe'):
|
||||
return "{}://{}".format(proto, path).rstrip('/')
|
||||
return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
|
||||
return f"{proto}://{path}".rstrip('/')
|
||||
|
||||
return urlunparse(URLComponents(
|
||||
scheme=proto,
|
||||
netloc=netloc,
|
||||
url=path,
|
||||
params='',
|
||||
query='',
|
||||
fragment='',
|
||||
)).rstrip('/')
|
||||
|
||||
|
||||
def parse_devices(devices):
|
||||
|
@ -302,9 +327,9 @@ def parse_devices(devices):
|
|||
if isinstance(device, dict):
|
||||
device_list.append(device)
|
||||
continue
|
||||
if not isinstance(device, six.string_types):
|
||||
if not isinstance(device, str):
|
||||
raise errors.DockerException(
|
||||
'Invalid device type {0}'.format(type(device))
|
||||
f'Invalid device type {type(device)}'
|
||||
)
|
||||
device_mapping = device.split(':')
|
||||
if device_mapping:
|
||||
|
@ -325,7 +350,7 @@ def parse_devices(devices):
|
|||
return device_list
|
||||
|
||||
|
||||
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
|
||||
def kwargs_from_env(environment=None):
|
||||
if not environment:
|
||||
environment = os.environ
|
||||
host = environment.get('DOCKER_HOST')
|
||||
|
@ -353,18 +378,11 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
|
|||
if not cert_path:
|
||||
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
|
||||
|
||||
if not tls_verify and assert_hostname is None:
|
||||
# assert_hostname is a subset of TLS verification,
|
||||
# so if it's not set already then set it to false.
|
||||
assert_hostname = False
|
||||
|
||||
params['tls'] = tls.TLSConfig(
|
||||
params['tls'] = TLSConfig(
|
||||
client_cert=(os.path.join(cert_path, 'cert.pem'),
|
||||
os.path.join(cert_path, 'key.pem')),
|
||||
ca_cert=os.path.join(cert_path, 'ca.pem'),
|
||||
verify=tls_verify,
|
||||
ssl_version=ssl_version,
|
||||
assert_hostname=assert_hostname,
|
||||
)
|
||||
|
||||
return params
|
||||
|
@ -372,26 +390,26 @@ def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
|
|||
|
||||
def convert_filters(filters):
|
||||
result = {}
|
||||
for k, v in six.iteritems(filters):
|
||||
for k, v in iter(filters.items()):
|
||||
if isinstance(v, bool):
|
||||
v = 'true' if v else 'false'
|
||||
if not isinstance(v, list):
|
||||
v = [v, ]
|
||||
result[k] = [
|
||||
str(item) if not isinstance(item, six.string_types) else item
|
||||
str(item) if not isinstance(item, str) else item
|
||||
for item in v
|
||||
]
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
def datetime_to_timestamp(dt):
|
||||
"""Convert a UTC datetime to a Unix timestamp"""
|
||||
delta = dt - datetime.utcfromtimestamp(0)
|
||||
"""Convert a datetime to a Unix timestamp"""
|
||||
delta = dt.astimezone(timezone.utc) - datetime(1970, 1, 1, tzinfo=timezone.utc)
|
||||
return delta.seconds + delta.days * 24 * 3600
|
||||
|
||||
|
||||
def parse_bytes(s):
|
||||
if isinstance(s, six.integer_types + (float,)):
|
||||
if isinstance(s, (int, float,)):
|
||||
return s
|
||||
if len(s) == 0:
|
||||
return 0
|
||||
|
@ -413,19 +431,18 @@ def parse_bytes(s):
|
|||
if suffix in units.keys() or suffix.isdigit():
|
||||
try:
|
||||
digits = float(digits_part)
|
||||
except ValueError:
|
||||
except ValueError as ve:
|
||||
raise errors.DockerException(
|
||||
'Failed converting the string value for memory ({0}) to'
|
||||
' an integer.'.format(digits_part)
|
||||
)
|
||||
'Failed converting the string value for memory '
|
||||
f'({digits_part}) to an integer.'
|
||||
) from ve
|
||||
|
||||
# Reconvert to long for the final result
|
||||
s = int(digits * units[suffix])
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'The specified value for memory ({0}) should specify the'
|
||||
' units. The postfix should be one of the `b` `k` `m` `g`'
|
||||
' characters'.format(s)
|
||||
f'The specified value for memory ({s}) should specify the units. '
|
||||
'The postfix should be one of the `b` `k` `m` `g` characters'
|
||||
)
|
||||
|
||||
return s
|
||||
|
@ -433,9 +450,9 @@ def parse_bytes(s):
|
|||
|
||||
def normalize_links(links):
|
||||
if isinstance(links, dict):
|
||||
links = six.iteritems(links)
|
||||
links = iter(links.items())
|
||||
|
||||
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
|
||||
return [f'{k}:{v}' if v else k for k, v in sorted(links)]
|
||||
|
||||
|
||||
def parse_env_file(env_file):
|
||||
|
@ -445,7 +462,7 @@ def parse_env_file(env_file):
|
|||
"""
|
||||
environment = {}
|
||||
|
||||
with open(env_file, 'r') as f:
|
||||
with open(env_file) as f:
|
||||
for line in f:
|
||||
|
||||
if line[0] == '#':
|
||||
|
@ -461,15 +478,12 @@ def parse_env_file(env_file):
|
|||
environment[k] = v
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
'Invalid line in environment file {0}:\n{1}'.format(
|
||||
env_file, line))
|
||||
f'Invalid line in environment file {env_file}:\n{line}')
|
||||
|
||||
return environment
|
||||
|
||||
|
||||
def split_command(command):
|
||||
if six.PY2 and not isinstance(command, six.binary_type):
|
||||
command = command.encode('utf-8')
|
||||
return shlex.split(command)
|
||||
|
||||
|
||||
|
@ -477,22 +491,22 @@ def format_environment(environment):
|
|||
def format_env(key, value):
|
||||
if value is None:
|
||||
return key
|
||||
if isinstance(value, six.binary_type):
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode('utf-8')
|
||||
|
||||
return u'{key}={value}'.format(key=key, value=value)
|
||||
return [format_env(*var) for var in six.iteritems(environment)]
|
||||
return f'{key}={value}'
|
||||
return [format_env(*var) for var in iter(environment.items())]
|
||||
|
||||
|
||||
def format_extra_hosts(extra_hosts, task=False):
|
||||
# Use format dictated by Swarm API if container is part of a task
|
||||
if task:
|
||||
return [
|
||||
'{} {}'.format(v, k) for k, v in sorted(six.iteritems(extra_hosts))
|
||||
f'{v} {k}' for k, v in sorted(iter(extra_hosts.items()))
|
||||
]
|
||||
|
||||
return [
|
||||
'{}:{}'.format(k, v) for k, v in sorted(six.iteritems(extra_hosts))
|
||||
f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items()))
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -1,2 +1,8 @@
|
|||
version = "4.4.0-dev"
|
||||
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
||||
try:
|
||||
from ._version import __version__
|
||||
except ImportError:
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
try:
|
||||
__version__ = version('docker')
|
||||
except PackageNotFoundError:
|
||||
__version__ = '0.0.0'
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
recommonmark==0.4.0
|
||||
Sphinx==1.4.6
|
|
@ -1,3 +1,12 @@
|
|||
dl.hide-signature > dt {
|
||||
display: none;
|
||||
}
|
||||
|
||||
dl.field-list > dt {
|
||||
/* prevent code blocks from forcing wrapping on the "Parameters" header */
|
||||
word-break: initial;
|
||||
}
|
||||
|
||||
code.literal{
|
||||
hyphens: none;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,254 @@
|
|||
Change log
|
||||
Changelog
|
||||
==========
|
||||
|
||||
7.1.0
|
||||
-----
|
||||
### Upgrade Notes
|
||||
- Bumped minimum engine API version to 1.24
|
||||
- Bumped default engine API version to 1.44 (Moby 25.0)
|
||||
|
||||
### Bugfixes
|
||||
- Fixed issue with tag parsing when the registry address includes ports that resulted in `invalid tag format` errors
|
||||
- Fixed issue preventing creating new configs (`ConfigCollection`), which failed with a `KeyError` due to the `name` field
|
||||
- Fixed an issue due to an update in the [requests](https://github.com/psf/requests) package breaking `docker-py` by applying the [suggested fix](https://github.com/psf/requests/pull/6710)
|
||||
|
||||
### Miscellaneous
|
||||
- Documentation improvements
|
||||
- Updated Ruff (linter) and fixed minor linting issues
|
||||
- Packaging/CI updates
|
||||
- Started using hatch for packaging (https://github.com/pypa/hatch)
|
||||
- Updated `setup-python` github action
|
||||
- Updated tests
|
||||
- Stopped checking for deprecated container and image related fields (`Container` and `ContainerConfig`)
|
||||
- Updated tests that check `NetworkSettings.Networks.<network>.Aliases` due to engine changes
|
||||
|
||||
7.0.0
|
||||
-----
|
||||
### Upgrade Notes
|
||||
- Removed SSL version (`ssl_version`) and explicit hostname check (`assert_hostname`) options
|
||||
- `assert_hostname` has not been used since Python 3.6 and was removed in 3.12
|
||||
- Python 3.7+ supports TLSv1.3 by default
|
||||
- Websocket support is no longer included by default
|
||||
- Use `pip install docker[websockets]` to include `websocket-client` dependency
|
||||
- By default, `docker-py` hijacks the TCP connection and does not use Websockets
|
||||
- Websocket client is only required to use `attach_socket(container, ws=True)`
|
||||
- Python 3.7 no longer officially supported (reached end-of-life June 2023)
|
||||
|
||||
### Features
|
||||
- Python 3.12 support
|
||||
- Full `networking_config` support for `containers.create()`
|
||||
- Replaces `network_driver_opt` (added in 6.1.0)
|
||||
- Add `health()` property to container that returns status (e.g. `unhealthy`)
|
||||
- Add `pause` option to `container.commit()`
|
||||
- Add support for bind mount propagation (e.g. `rshared`, `private`)
|
||||
- Add `filters`, `keep_storage`, and `all` parameters to `prune_builds()` (requires API v1.39+)
|
||||
|
||||
### Bugfixes
|
||||
- Consistently return `docker.errors.NotFound` on 404 responses
|
||||
- Validate tag format before image push
|
||||
|
||||
### Miscellaneous
|
||||
- Upgraded urllib3 version in `requirements.txt` (used for development/tests)
|
||||
- Documentation typo fixes & formatting improvements
|
||||
- Fixed integration test compatibility for newer Moby engine versions
|
||||
- Switch to [ruff](https://github.com/astral-sh/ruff) for linting
|
||||
|
||||
6.1.3
|
||||
-----
|
||||
#### Bugfixes
|
||||
- Fix compatibility with [`eventlet/eventlet`](https://github.com/eventlet/eventlet)
|
||||
|
||||
6.1.2
|
||||
-----
|
||||
|
||||
#### Bugfixes
|
||||
- Fix for socket timeouts on long `docker exec` calls
|
||||
|
||||
6.1.1
|
||||
-----
|
||||
|
||||
#### Bugfixes
|
||||
- Fix `containers.stats()` hanging with `stream=True`
|
||||
- Correct return type in docs for `containers.diff()` method
|
||||
|
||||
|
||||
6.1.0
|
||||
-----
|
||||
|
||||
### Upgrade Notes
|
||||
- Errors are no longer returned during client initialization if the credential helper cannot be found. A warning will be emitted instead, and an error is returned if the credential helper is used.
|
||||
|
||||
### Features
|
||||
- Python 3.11 support
|
||||
- Use `poll()` instead of `select()` on non-Windows platforms
|
||||
- New API fields
|
||||
- `network_driver_opt` on container run / create
|
||||
- `one-shot` on container stats
|
||||
- `status` on services list
|
||||
|
||||
### Bugfixes
|
||||
- Support for requests 2.29.0+ and urllib3 2.x
|
||||
- Do not strip characters from volume names
|
||||
- Fix connection leak on container.exec_* operations
|
||||
- Fix errors closing named pipes on Windows
|
||||
|
||||
6.0.1
|
||||
-----
|
||||
|
||||
### Bugfixes
|
||||
- Fix for `The pipe has been ended errors` on Windows
|
||||
- Support floats for container log filtering by timestamp (`since` / `until`)
|
||||
|
||||
6.0.0
|
||||
-----
|
||||
|
||||
### Upgrade Notes
|
||||
- Minimum supported Python version is 3.7+
|
||||
- When installing with pip, the `docker[tls]` extra is deprecated and a no-op,
|
||||
use `docker` for same functionality (TLS support is always available now)
|
||||
- Native Python SSH client (used by default / `use_ssh_client=False`) will now
|
||||
reject unknown host keys with `paramiko.ssh_exception.SSHException`
|
||||
- Short IDs are now 12 characters instead of 10 characters (same as Docker CLI)
|
||||
|
||||
### Features
|
||||
- Python 3.10 support
|
||||
- Automatically negotiate most secure TLS version
|
||||
- Add `platform` (e.g. `linux/amd64`, `darwin/arm64`) to container create & run
|
||||
- Add support for `GlobalJob` and `ReplicatedJobs` for Swarm
|
||||
- Add `remove()` method on `Image`
|
||||
- Add `force` param to `disable()` on `Plugin`
|
||||
|
||||
### Bugfixes
|
||||
- Fix install issues on Windows related to `pywin32`
|
||||
- Do not accept unknown SSH host keys in native Python SSH mode
|
||||
- Use 12 character short IDs for consistency with Docker CLI
|
||||
- Ignore trailing whitespace in `.dockerignore` files
|
||||
- Fix IPv6 host parsing when explicit port specified
|
||||
- Fix `ProxyCommand` option for SSH connections
|
||||
- Do not spawn extra subshell when launching external SSH client
|
||||
- Improve exception semantics to preserve context
|
||||
- Documentation improvements (formatting, examples, typos, missing params)
|
||||
|
||||
### Miscellaneous
|
||||
- Upgrade dependencies in `requirements.txt` to latest versions
|
||||
- Remove extraneous transitive dependencies
|
||||
- Eliminate usages of deprecated functions/methods
|
||||
- Test suite reliability improvements
|
||||
- GitHub Actions workflows for linting, unit tests, integration tests, and
|
||||
publishing releases
|
||||
|
||||
5.0.3
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/76?closed=1)
|
||||
|
||||
### Features
|
||||
- Add `cap_add` and `cap_drop` parameters to service create and ContainerSpec
|
||||
- Add `templating` parameter to config create
|
||||
|
||||
### Bugfixes
|
||||
- Fix getting a read timeout for logs/attach with a tty and slow output
|
||||
|
||||
### Miscellaneous
|
||||
- Fix documentation examples
|
||||
|
||||
5.0.2
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/75?closed=1)
|
||||
|
||||
### Bugfixes
|
||||
- Fix `disable_buffering` regression
|
||||
|
||||
5.0.1
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/74?closed=1)
|
||||
|
||||
### Bugfixes
|
||||
- Bring back support for ssh identity file
|
||||
- Cleanup remaining python-2 dependencies
|
||||
- Fix image save example in docs
|
||||
|
||||
### Miscellaneous
|
||||
- Bump urllib3 to 1.26.5
|
||||
- Bump requests to 2.26.0
|
||||
|
||||
5.0.0
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/70?closed=1)
|
||||
|
||||
### Breaking changes
|
||||
- Remove support for Python 2.7
|
||||
- Make Python 3.6 the minimum version supported
|
||||
|
||||
### Features
|
||||
- Add `limit` parameter to image search endpoint
|
||||
|
||||
### Bugfixes
|
||||
- Fix `KeyError` exception on secret create
|
||||
- Verify TLS keys loaded from docker contexts
|
||||
- Update PORT_SPEC regex to allow square brackets for IPv6 addresses
|
||||
- Fix containers and images documentation examples
|
||||
|
||||
4.4.4
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/73?closed=1)
|
||||
|
||||
### Bugfixes
|
||||
- Remove `LD_LIBRARY_PATH` and `SSL_CERT_FILE` environment variables when shelling out to the ssh client
|
||||
|
||||
4.4.3
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/72?closed=1)
|
||||
|
||||
### Features
|
||||
- Add support for docker.types.Placement.MaxReplicas
|
||||
|
||||
### Bugfixes
|
||||
- Fix SSH port parsing when shelling out to the ssh client
|
||||
|
||||
4.4.2
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/71?closed=1)
|
||||
|
||||
### Bugfixes
|
||||
- Fix SSH connection bug where the hostname was incorrectly trimmed and the error was hidden
|
||||
- Fix docs example
|
||||
|
||||
### Miscellaneous
|
||||
- Add Python3.8 and 3.9 in setup.py classifier list
|
||||
|
||||
4.4.1
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/69?closed=1)
|
||||
|
||||
### Bugfixes
|
||||
- Avoid setting unsuported parameter for subprocess.Popen on Windows
|
||||
- Replace use of deprecated "filter" argument on ""docker/api/image"
|
||||
|
||||
4.4.0
|
||||
-----
|
||||
|
||||
[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/67?closed=1)
|
||||
|
||||
### Features
|
||||
- Add an alternative SSH connection to the paramiko one, based on shelling out to the SSh client. Similar to the behaviour of Docker cli
|
||||
- Default image tag to `latest` on `pull`
|
||||
|
||||
### Bugfixes
|
||||
- Fix plugin model upgrade
|
||||
- Fix examples URL in ulimits
|
||||
|
||||
### Miscellaneous
|
||||
- Improve exception messages for server and client errors
|
||||
- Bump cryptography from 2.3 to 3.2
|
||||
|
||||
4.3.1
|
||||
-----
|
||||
|
||||
|
@ -25,7 +273,6 @@ Change log
|
|||
- Update default API version to v1.39
|
||||
- Update test engine version to 19.03.12
|
||||
|
||||
|
||||
4.2.2
|
||||
-----
|
||||
|
||||
|
@ -81,7 +328,6 @@ Change log
|
|||
- Adjust `--platform` tests for changes in docker engine
|
||||
- Update credentials-helpers to v0.6.3
|
||||
|
||||
|
||||
4.0.2
|
||||
-----
|
||||
|
||||
|
@ -95,7 +341,6 @@ Change log
|
|||
|
||||
- Bumped version of websocket-client
|
||||
|
||||
|
||||
4.0.1
|
||||
-----
|
||||
|
||||
|
|
44
docs/conf.py
44
docs/conf.py
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# docker-sdk-python documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed Sep 14 15:48:58 2016.
|
||||
|
@ -19,6 +18,8 @@
|
|||
import datetime
|
||||
import os
|
||||
import sys
|
||||
from importlib.metadata import version
|
||||
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
|
||||
|
||||
|
@ -34,24 +35,19 @@ sys.path.insert(0, os.path.abspath('..'))
|
|||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.napoleon',
|
||||
'myst_parser'
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
|
||||
source_parsers = {
|
||||
'.md': CommonMarkParser,
|
||||
source_suffix = {
|
||||
'.rst': 'restructuredtext',
|
||||
'.txt': 'markdown',
|
||||
'.md': 'markdown',
|
||||
}
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
# source_suffix = '.md'
|
||||
|
||||
# The encoding of source files.
|
||||
#
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
@ -60,28 +56,26 @@ source_suffix = ['.rst', '.md']
|
|||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Docker SDK for Python'
|
||||
project = 'Docker SDK for Python'
|
||||
year = datetime.datetime.now().year
|
||||
copyright = u'%d Docker Inc' % year
|
||||
author = u'Docker Inc'
|
||||
copyright = f'{year} Docker Inc'
|
||||
author = 'Docker Inc'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
with open('../docker/version.py', 'r') as vfile:
|
||||
exec(vfile.read())
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = version
|
||||
# The short X.Y version.
|
||||
version = '{}.{}'.format(version_info[0], version_info[1])
|
||||
# see https://github.com/pypa/setuptools_scm#usage-from-sphinx
|
||||
release = version('docker')
|
||||
# for example take major/minor
|
||||
version = '.'.join(release.split('.')[:2])
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
language = 'en'
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
|
@ -283,8 +277,8 @@ latex_elements = {
|
|||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'docker-sdk-python.tex', u'docker-sdk-python Documentation',
|
||||
u'Docker Inc.', 'manual'),
|
||||
(master_doc, 'docker-sdk-python.tex', 'docker-sdk-python Documentation',
|
||||
'Docker Inc.', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
|
@ -325,7 +319,7 @@ latex_documents = [
|
|||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
|
||||
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
|
@ -340,7 +334,7 @@ man_pages = [
|
|||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'docker-sdk-python', u'docker-sdk-python Documentation',
|
||||
(master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation',
|
||||
author, 'docker-sdk-python', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
|
|
@ -58,7 +58,7 @@ You can stream logs:
|
|||
.. code-block:: python
|
||||
|
||||
>>> for line in container.logs(stream=True):
|
||||
... print line.strip()
|
||||
... print(line.strip())
|
||||
Reticulating spline 2...
|
||||
Reticulating spline 3...
|
||||
...
|
||||
|
|
|
@ -15,7 +15,7 @@ For example, to check the server against a specific CA certificate:
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem')
|
||||
tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem', verify=True)
|
||||
client = docker.DockerClient(base_url='<https_url>', tls=tls_config)
|
||||
|
||||
This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``.
|
||||
|
|
|
@ -16,10 +16,13 @@ Prepare the command we are going to use. It prints "hello stdout"
|
|||
in `stdout`, followed by "hello stderr" in `stderr`:
|
||||
|
||||
>>> cmd = '/bin/sh -c "echo hello stdout ; echo hello stderr >&2"'
|
||||
|
||||
We'll run this command with all four the combinations of ``stream``
|
||||
and ``demux``.
|
||||
|
||||
With ``stream=False`` and ``demux=False``, the output is a string
|
||||
that contains both the `stdout` and the `stderr` output:
|
||||
|
||||
>>> res = container.exec_run(cmd, stream=False, demux=False)
|
||||
>>> res.output
|
||||
b'hello stderr\nhello stdout\n'
|
||||
|
@ -52,15 +55,8 @@ Traceback (most recent call last):
|
|||
File "<stdin>", line 1, in <module>
|
||||
StopIteration
|
||||
|
||||
Finally, with ``stream=False`` and ``demux=True``, the whole output
|
||||
is returned, but the streams are still separated:
|
||||
Finally, with ``stream=False`` and ``demux=True``, the output is a tuple ``(stdout, stderr)``:
|
||||
|
||||
>>> res = container.exec_run(cmd, stream=True, demux=True)
|
||||
>>> next(res.output)
|
||||
(b'hello stdout\n', None)
|
||||
>>> next(res.output)
|
||||
(None, b'hello stderr\n')
|
||||
>>> next(res.output)
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
StopIteration
|
||||
>>> res = container.exec_run(cmd, stream=False, demux=True)
|
||||
>>> res.output
|
||||
(b'hello stdout\n', b'hello stderr\n')
|
|
@ -0,0 +1,102 @@
|
|||
[build-system]
|
||||
requires = ["hatchling", "hatch-vcs"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "docker"
|
||||
dynamic = ["version"]
|
||||
description = "A Python library for the Docker Engine API."
|
||||
readme = "README.md"
|
||||
license = "Apache-2.0"
|
||||
requires-python = ">=3.8"
|
||||
maintainers = [
|
||||
{ name = "Docker Inc.", email = "no-reply@docker.com" },
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Other Environment",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: Apache Software License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Topic :: Software Development",
|
||||
"Topic :: Utilities",
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
"requests >= 2.26.0",
|
||||
"urllib3 >= 1.26.0",
|
||||
"pywin32>=304; sys_platform == \"win32\"",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
# ssh feature allows DOCKER_HOST=ssh://... style connections
|
||||
ssh = [
|
||||
"paramiko>=2.4.3",
|
||||
]
|
||||
# tls is always supported, the feature is a no-op for backwards compatibility
|
||||
tls = []
|
||||
# websockets can be used as an alternate container attach mechanism but
|
||||
# by default docker-py hijacks the TCP connection and does not use Websockets
|
||||
# unless attach_socket(container, ws=True) is called
|
||||
websockets = [
|
||||
"websocket-client >= 1.3.0",
|
||||
]
|
||||
# docs are dependencies required to build the ReadTheDocs site
|
||||
# this is only needed for CI / working on the docs!
|
||||
docs = [
|
||||
"myst-parser==0.18.0",
|
||||
"Sphinx==5.1.1",
|
||||
|
||||
]
|
||||
# dev are dependencies required to test & lint this project
|
||||
# this is only needed if you are making code changes to docker-py!
|
||||
dev = [
|
||||
"coverage==7.2.7",
|
||||
"pytest==7.4.2",
|
||||
"pytest-cov==4.1.0",
|
||||
"pytest-timeout==2.1.0",
|
||||
"ruff==0.1.8",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Changelog = "https://docker-py.readthedocs.io/en/stable/change-log.html"
|
||||
Documentation = "https://docker-py.readthedocs.io"
|
||||
Homepage = "https://github.com/docker/docker-py"
|
||||
Source = "https://github.com/docker/docker-py"
|
||||
Tracker = "https://github.com/docker/docker-py/issues"
|
||||
|
||||
[tool.hatch.version]
|
||||
source = "vcs"
|
||||
|
||||
[tool.hatch.build.hooks.vcs]
|
||||
version-file = "docker/_version.py"
|
||||
|
||||
[tool.hatch.build.targets.sdist]
|
||||
include = [
|
||||
"/docker",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py38"
|
||||
extend-select = [
|
||||
"B",
|
||||
"C",
|
||||
"F",
|
||||
"I",
|
||||
"UP",
|
||||
"W",
|
||||
]
|
||||
ignore = [
|
||||
"UP012", # unnecessary `UTF-8` argument (we want to be explicit)
|
||||
"C901", # too complex (there's a whole bunch of these)
|
||||
]
|
||||
|
||||
[tool.ruff.per-file-ignores]
|
||||
"**/__init__.py" = ["F401"]
|
|
@ -1,18 +0,0 @@
|
|||
appdirs==1.4.3
|
||||
asn1crypto==0.22.0
|
||||
backports.ssl-match-hostname==3.5.0.1
|
||||
cffi==1.10.0
|
||||
cryptography==3.2
|
||||
enum34==1.1.6
|
||||
idna==2.5
|
||||
ipaddress==1.0.18
|
||||
packaging==16.8
|
||||
paramiko==2.4.2
|
||||
pycparser==2.17
|
||||
pyOpenSSL==18.0.0
|
||||
pyparsing==2.2.0
|
||||
pywin32==227; sys_platform == 'win32'
|
||||
requests==2.20.0
|
||||
six==1.10.0
|
||||
urllib3==1.24.3
|
||||
websocket-client==0.56.0
|
|
@ -52,8 +52,8 @@ class Version(namedtuple('_Version', 'major minor patch stage edition')):
|
|||
return (int(self.major), int(self.minor), int(self.patch)) + stage
|
||||
|
||||
def __str__(self):
|
||||
stage = '-{}'.format(self.stage) if self.stage else ''
|
||||
edition = '-{}'.format(self.edition) if self.edition else ''
|
||||
stage = f'-{self.stage}' if self.stage else ''
|
||||
edition = f'-{self.edition}' if self.edition else ''
|
||||
return '.'.join(map(str, self[:3])) + edition + stage
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
[bdist_wheel]
|
||||
universal = 1
|
||||
|
||||
[metadata]
|
||||
description_file = README.rst
|
||||
license = Apache License 2.0
|
93
setup.py
93
setup.py
|
@ -1,93 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
|
||||
import codecs
|
||||
import os
|
||||
|
||||
from setuptools import find_packages
|
||||
from setuptools import setup
|
||||
|
||||
ROOT_DIR = os.path.dirname(__file__)
|
||||
SOURCE_DIR = os.path.join(ROOT_DIR)
|
||||
|
||||
requirements = [
|
||||
'six >= 1.4.0',
|
||||
'websocket-client >= 0.32.0',
|
||||
'requests >= 2.14.2, != 2.18.0',
|
||||
]
|
||||
|
||||
extras_require = {
|
||||
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
|
||||
# While not imported explicitly, the ipaddress module is required for
|
||||
# ssl_match_hostname to verify hosts match with certificates via
|
||||
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
|
||||
':python_version < "3.3"': 'ipaddress >= 1.0.16',
|
||||
|
||||
# win32 APIs if on Windows (required for npipe support)
|
||||
':sys_platform == "win32"': 'pywin32==227',
|
||||
|
||||
# If using docker-py over TLS, highly recommend this option is
|
||||
# pip-installed or pinned.
|
||||
|
||||
# TODO: if pip installing both "requests" and "requests[security]", the
|
||||
# extra package from the "security" option are not installed (see
|
||||
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
|
||||
# installing the extra dependencies, install the following instead:
|
||||
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
|
||||
'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=1.3.4', 'idna>=2.0.0'],
|
||||
|
||||
# Only required when connecting using the ssh:// protocol
|
||||
'ssh': ['paramiko>=2.4.2'],
|
||||
|
||||
}
|
||||
|
||||
version = None
|
||||
exec(open('docker/version.py').read())
|
||||
|
||||
with open('./test-requirements.txt') as test_reqs_txt:
|
||||
test_requirements = [line for line in test_reqs_txt]
|
||||
|
||||
|
||||
long_description = ''
|
||||
with codecs.open('./README.md', encoding='utf-8') as readme_md:
|
||||
long_description = readme_md.read()
|
||||
|
||||
setup(
|
||||
name="docker",
|
||||
version=version,
|
||||
description="A Python library for the Docker Engine API.",
|
||||
long_description=long_description,
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://github.com/docker/docker-py',
|
||||
project_urls={
|
||||
'Documentation': 'https://docker-py.readthedocs.io',
|
||||
'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501
|
||||
'Source': 'https://github.com/docker/docker-py',
|
||||
'Tracker': 'https://github.com/docker/docker-py/issues',
|
||||
},
|
||||
packages=find_packages(exclude=["tests.*", "tests"]),
|
||||
install_requires=requirements,
|
||||
tests_require=test_requirements,
|
||||
extras_require=extras_require,
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
|
||||
zip_safe=False,
|
||||
test_suite='tests',
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Other Environment',
|
||||
'Intended Audience :: Developers',
|
||||
'Operating System :: OS Independent',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Topic :: Software Development',
|
||||
'Topic :: Utilities',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
],
|
||||
maintainer='Joffrey F',
|
||||
maintainer_email='joffrey@docker.com',
|
||||
)
|
|
@ -1,7 +0,0 @@
|
|||
setuptools==44.0.0 # last version with python 2.7 support
|
||||
coverage==4.5.2
|
||||
flake8==3.6.0
|
||||
mock==1.0.1
|
||||
pytest==4.3.1
|
||||
pytest-cov==2.6.1
|
||||
pytest-timeout==1.3.3
|
|
@ -1,17 +1,16 @@
|
|||
ARG PYTHON_VERSION=3.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
FROM python:${PYTHON_VERSION}
|
||||
|
||||
ARG APT_MIRROR
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
|
||||
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get -y install --no-install-recommends \
|
||||
gnupg2 \
|
||||
pass
|
||||
|
||||
# Add SSH keys and set permissions
|
||||
COPY tests/ssh-keys /root/.ssh
|
||||
COPY tests/ssh/config/client /root/.ssh
|
||||
COPY tests/ssh/config/server/known_ed25519.pub /root/.ssh/known_hosts
|
||||
RUN sed -i '1s;^;dpy-dind-ssh ;' /root/.ssh/known_hosts
|
||||
RUN chmod -R 600 /root/.ssh
|
||||
|
||||
COPY ./tests/gpg-keys /gpg-keys
|
||||
|
@ -27,11 +26,10 @@ RUN curl -sSL -o /opt/docker-credential-pass.tar.gz \
|
|||
chmod +x /usr/local/bin/docker-credential-pass
|
||||
|
||||
WORKDIR /src
|
||||
COPY requirements.txt /src/requirements.txt
|
||||
RUN pip install -r requirements.txt
|
||||
COPY . .
|
||||
|
||||
COPY test-requirements.txt /src/test-requirements.txt
|
||||
RUN pip install -r test-requirements.txt
|
||||
|
||||
COPY . /src
|
||||
RUN pip install .
|
||||
ARG VERSION=0.0.0.dev0
|
||||
RUN --mount=type=cache,target=/cache/pip \
|
||||
PIP_CACHE_DIR=/cache/pip \
|
||||
SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \
|
||||
pip install .[dev,ssh,websockets]
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
ARG PYTHON_VERSION=2.7
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
|
||||
FROM python:${PYTHON_VERSION}
|
||||
RUN mkdir /tmp/certs
|
||||
|
|
|
@ -1,23 +1,20 @@
|
|||
ARG API_VERSION=1.39
|
||||
ARG ENGINE_VERSION=19.03.12
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG API_VERSION=1.45
|
||||
ARG ENGINE_VERSION=26.1
|
||||
|
||||
FROM docker:${ENGINE_VERSION}-dind
|
||||
|
||||
RUN apk add --no-cache \
|
||||
openssh
|
||||
RUN apk add --no-cache --upgrade \
|
||||
openssh
|
||||
|
||||
# Add the keys and set permissions
|
||||
RUN ssh-keygen -A
|
||||
|
||||
# copy the test SSH config
|
||||
RUN echo "IgnoreUserKnownHosts yes" > /etc/ssh/sshd_config && \
|
||||
echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config && \
|
||||
echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
|
||||
COPY tests/ssh/config/server /etc/ssh/
|
||||
|
||||
# set authorized keys for client paswordless connection
|
||||
COPY tests/ssh-keys/authorized_keys /root/.ssh/authorized_keys
|
||||
RUN chmod 600 /root/.ssh/authorized_keys
|
||||
COPY tests/ssh/config/client/id_rsa.pub /root/.ssh/authorized_keys
|
||||
|
||||
RUN echo "root:root" | chpasswd
|
||||
RUN ln -s /usr/local/bin/docker /usr/bin/docker
|
||||
# RUN echo "root:root" | chpasswd
|
||||
RUN chmod -R 600 /etc/ssh \
|
||||
&& chmod -R 600 /root/.ssh \
|
||||
&& ln -s /usr/local/bin/docker /usr/bin/docker
|
||||
EXPOSE 22
|
||||
|
|
|
@ -8,10 +8,10 @@ import tarfile
|
|||
import tempfile
|
||||
import time
|
||||
|
||||
import docker
|
||||
import paramiko
|
||||
import pytest
|
||||
import six
|
||||
|
||||
import docker
|
||||
|
||||
|
||||
def make_tree(dirs, files):
|
||||
|
@ -47,6 +47,19 @@ def untar_file(tardata, filename):
|
|||
return result
|
||||
|
||||
|
||||
def skip_if_desktop():
|
||||
def fn(f):
|
||||
@functools.wraps(f)
|
||||
def wrapped(self, *args, **kwargs):
|
||||
info = self.client.info()
|
||||
if info['Name'] == 'docker-desktop':
|
||||
pytest.skip('Test does not support Docker Desktop')
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
return fn
|
||||
|
||||
def requires_api_version(version):
|
||||
test_version = os.environ.get(
|
||||
'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
|
||||
|
@ -54,7 +67,7 @@ def requires_api_version(version):
|
|||
|
||||
return pytest.mark.skipif(
|
||||
docker.utils.version_lt(test_version, version),
|
||||
reason="API version is too low (< {0})".format(version)
|
||||
reason=f"API version is too low (< {version})"
|
||||
)
|
||||
|
||||
|
||||
|
@ -81,12 +94,12 @@ def wait_on_condition(condition, delay=0.1, timeout=40):
|
|||
start_time = time.time()
|
||||
while not condition():
|
||||
if time.time() - start_time > timeout:
|
||||
raise AssertionError("Timeout: %s" % condition)
|
||||
raise AssertionError(f"Timeout: {condition}")
|
||||
time.sleep(delay)
|
||||
|
||||
|
||||
def random_name():
|
||||
return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
|
||||
return f'dockerpytest_{random.getrandbits(64):x}'
|
||||
|
||||
|
||||
def force_leave_swarm(client):
|
||||
|
@ -105,11 +118,11 @@ def force_leave_swarm(client):
|
|||
|
||||
|
||||
def swarm_listen_addr():
|
||||
return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
|
||||
return f'0.0.0.0:{random.randrange(10000, 25000)}'
|
||||
|
||||
|
||||
def assert_cat_socket_detached_with_keys(sock, inputs):
|
||||
if six.PY3 and hasattr(sock, '_sock'):
|
||||
if hasattr(sock, '_sock'):
|
||||
sock = sock._sock
|
||||
|
||||
for i in inputs:
|
||||
|
@ -128,7 +141,7 @@ def assert_cat_socket_detached_with_keys(sock, inputs):
|
|||
# of the daemon no longer cause this to raise an error.
|
||||
try:
|
||||
sock.sendall(b'make sure the socket is closed\n')
|
||||
except socket.error:
|
||||
except OSError:
|
||||
return
|
||||
|
||||
sock.sendall(b"make sure the socket is closed\n")
|
||||
|
@ -144,4 +157,4 @@ def ctrl_with(char):
|
|||
if re.match('[a-z]', char):
|
||||
return chr(ord(char) - ord('a') + 1).encode('ascii')
|
||||
else:
|
||||
raise(Exception('char must be [a-z]'))
|
||||
raise Exception('char must be [a-z]')
|
||||
|
|
|
@ -3,14 +3,13 @@ import os
|
|||
import shutil
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
|
||||
from docker import errors
|
||||
from docker.utils.proxy import ProxyConfig
|
||||
|
||||
import pytest
|
||||
import six
|
||||
|
||||
from .base import BaseAPIIntegrationTest, TEST_IMG
|
||||
from ..helpers import random_name, requires_api_version, requires_experimental
|
||||
from .base import TEST_IMG, BaseAPIIntegrationTest
|
||||
|
||||
|
||||
class BuildTest(BaseAPIIntegrationTest):
|
||||
|
@ -71,9 +70,8 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
assert len(logs) > 0
|
||||
|
||||
def test_build_from_stringio(self):
|
||||
if six.PY3:
|
||||
return
|
||||
script = io.StringIO(six.text_type('\n').join([
|
||||
return
|
||||
script = io.StringIO('\n'.join([
|
||||
'FROM busybox',
|
||||
'RUN mkdir -p /tmp/test',
|
||||
'EXPOSE 8080',
|
||||
|
@ -83,8 +81,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
stream = self.client.build(fileobj=script)
|
||||
logs = ''
|
||||
for chunk in stream:
|
||||
if six.PY3:
|
||||
chunk = chunk.decode('utf-8')
|
||||
chunk = chunk.decode('utf-8')
|
||||
logs += chunk
|
||||
assert logs != ''
|
||||
|
||||
|
@ -103,7 +100,9 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
'ignored',
|
||||
'Dockerfile',
|
||||
'.dockerignore',
|
||||
' ignored-with-spaces ', # check that spaces are trimmed
|
||||
'!ignored/subdir/excepted-file',
|
||||
'! ignored/subdir/excepted-with-spaces '
|
||||
'', # empty line,
|
||||
'#*', # comment line
|
||||
]))
|
||||
|
@ -114,6 +113,9 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
with open(os.path.join(base_dir, '#file.txt'), 'w') as f:
|
||||
f.write('this file should not be ignored')
|
||||
|
||||
with open(os.path.join(base_dir, 'ignored-with-spaces'), 'w') as f:
|
||||
f.write("this file should be ignored")
|
||||
|
||||
subdir = os.path.join(base_dir, 'ignored', 'subdir')
|
||||
os.makedirs(subdir)
|
||||
with open(os.path.join(subdir, 'file'), 'w') as f:
|
||||
|
@ -122,12 +124,15 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
|
||||
f.write("this file should not be ignored")
|
||||
|
||||
with open(os.path.join(subdir, 'excepted-with-spaces'), 'w') as f:
|
||||
f.write("this file should not be ignored")
|
||||
|
||||
tag = 'docker-py-test-build-with-dockerignore'
|
||||
stream = self.client.build(
|
||||
path=base_dir,
|
||||
tag=tag,
|
||||
)
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
|
||||
|
@ -135,11 +140,11 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
self.client.wait(c)
|
||||
logs = self.client.logs(c)
|
||||
|
||||
if six.PY3:
|
||||
logs = logs.decode('utf-8')
|
||||
logs = logs.decode('utf-8')
|
||||
|
||||
assert sorted(list(filter(None, logs.split('\n')))) == sorted([
|
||||
assert sorted(filter(None, logs.split('\n'))) == sorted([
|
||||
'/test/#file.txt',
|
||||
'/test/ignored/subdir/excepted-with-spaces',
|
||||
'/test/ignored/subdir/excepted-file',
|
||||
'/test/not-ignored'
|
||||
])
|
||||
|
@ -155,7 +160,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
|
||||
)
|
||||
self.tmp_imgs.append('buildargs')
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
info = self.client.inspect_image('buildargs')
|
||||
|
@ -175,7 +180,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
fileobj=script, tag=tag, shmsize=shmsize
|
||||
)
|
||||
self.tmp_imgs.append(tag)
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
# There is currently no way to get the shmsize
|
||||
|
@ -193,7 +198,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
isolation='default'
|
||||
)
|
||||
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
@requires_api_version('1.23')
|
||||
|
@ -208,7 +213,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
fileobj=script, tag='labels', labels=labels
|
||||
)
|
||||
self.tmp_imgs.append('labels')
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
info = self.client.inspect_image('labels')
|
||||
|
@ -225,7 +230,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
|
||||
stream = self.client.build(fileobj=script, tag='build1')
|
||||
self.tmp_imgs.append('build1')
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
stream = self.client.build(
|
||||
|
@ -266,11 +271,11 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
fileobj=script, target='first', tag='build1'
|
||||
)
|
||||
self.tmp_imgs.append('build1')
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
info = self.client.inspect_image('build1')
|
||||
assert not info['Config']['OnBuild']
|
||||
assert 'OnBuild' not in info['Config'] or not info['Config']['OnBuild']
|
||||
|
||||
@requires_api_version('1.25')
|
||||
def test_build_with_network_mode(self):
|
||||
|
@ -295,7 +300,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
)
|
||||
|
||||
self.tmp_imgs.append('dockerpytest_customnetbuild')
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
assert self.client.inspect_image('dockerpytest_customnetbuild')
|
||||
|
@ -307,7 +312,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
)
|
||||
|
||||
self.tmp_imgs.append('dockerpytest_nonebuild')
|
||||
logs = [chunk for chunk in stream]
|
||||
logs = list(stream)
|
||||
assert 'errorDetail' in logs[-1]
|
||||
assert logs[-1]['errorDetail']['code'] == 1
|
||||
|
||||
|
@ -340,8 +345,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
assert self.client.inspect_image(img_name)
|
||||
ctnr = self.run_container(img_name, 'cat /hosts-file')
|
||||
logs = self.client.logs(ctnr)
|
||||
if six.PY3:
|
||||
logs = logs.decode('utf-8')
|
||||
logs = logs.decode('utf-8')
|
||||
assert '127.0.0.1\textrahost.local.test' in logs
|
||||
assert '127.0.0.1\thello.world.test' in logs
|
||||
|
||||
|
@ -361,7 +365,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
fileobj=script, tag=tag, squash=squash
|
||||
)
|
||||
self.tmp_imgs.append(tag)
|
||||
for chunk in stream:
|
||||
for _chunk in stream:
|
||||
pass
|
||||
|
||||
return self.client.inspect_image(tag)
|
||||
|
@ -376,7 +380,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
|
||||
script = io.BytesIO(b'\n'.join([
|
||||
b'FROM busybox',
|
||||
'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
|
||||
f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8')
|
||||
]))
|
||||
|
||||
stream = self.client.build(
|
||||
|
@ -385,10 +389,8 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
lines = []
|
||||
for chunk in stream:
|
||||
lines.append(chunk.get('stream'))
|
||||
expected = '{0}{2}\n{1}'.format(
|
||||
control_chars[0], control_chars[1], snippet
|
||||
)
|
||||
assert any([line == expected for line in lines])
|
||||
expected = f'{control_chars[0]}{snippet}\n{control_chars[1]}'
|
||||
assert any(line == expected for line in lines)
|
||||
|
||||
def test_build_gzip_encoding(self):
|
||||
base_dir = tempfile.mkdtemp()
|
||||
|
@ -440,7 +442,7 @@ class BuildTest(BaseAPIIntegrationTest):
|
|||
@requires_api_version('1.32')
|
||||
@requires_experimental(until=None)
|
||||
def test_build_invalid_platform(self):
|
||||
script = io.BytesIO('FROM busybox\n'.encode('ascii'))
|
||||
script = io.BytesIO(b'FROM busybox\n')
|
||||
|
||||
with pytest.raises(errors.APIError) as excinfo:
|
||||
stream = self.client.build(fileobj=script, platform='foobar')
|
||||
|
|
|
@ -47,7 +47,7 @@ class ConnectionTimeoutTest(unittest.TestCase):
|
|||
# This call isn't supposed to complete, and it should fail fast.
|
||||
try:
|
||||
res = self.client.inspect_container('id')
|
||||
except: # noqa: E722
|
||||
except Exception:
|
||||
pass
|
||||
end = time.time()
|
||||
assert res is None
|
||||
|
@ -72,6 +72,4 @@ class UnixconnTest(unittest.TestCase):
|
|||
client.close()
|
||||
del client
|
||||
|
||||
assert len(w) == 0, "No warnings produced: {0}".format(
|
||||
w[0].message
|
||||
)
|
||||
assert len(w) == 0, f"No warnings produced: {w[0].message}"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue