Compare commits

..

8 Commits
main ... v4.8.1

Author SHA1 Message Date
Urvashi Mohnani e524db436a
Merge pull request #357 from umohnani8/4.8.1
Bump version to podman 4.8.1
2023-12-21 07:35:41 -05:00
Urvashi Mohnani 29603f2cd1 Bump version to podman 4.8.1
Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
2023-12-20 11:30:08 -05:00
openshift-merge-bot[bot] ae0f1ee8ca
Merge pull request #359 from openshift-cherrypick-robot/cherry-pick-358-to-release-4.8
[release-4.8] Fix lint issues
2023-12-20 14:07:15 +00:00
Urvashi Mohnani e767cd8f7d Fix lint issues
Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
2023-12-20 13:26:15 +00:00
openshift-merge-bot[bot] 40d7bb319f
Merge pull request #354 from openshift-cherrypick-robot/cherry-pick-353-to-release-4.8
[release-4.8] Add rich dep to setup.cfg
2023-12-20 08:34:33 +00:00
Urvashi Mohnani 3a63473dcc Add rich dep to setup.cfg
Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
2023-12-11 12:55:21 +00:00
openshift-merge-bot[bot] 803ea74b3b
Merge pull request #347 from umohnani8/4.8.0-1
Bump version to 4.8.0.post1
2023-11-29 13:05:43 +00:00
Urvashi Mohnani 1555080e7f Bump version to 4.8.0.post1
Had an issue with the 4.8.0 release on pypi and we can't
repush changes with the same version again so need to bump
to a minor version to be able to publish on pypi again.

Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
2023-11-29 07:41:41 -05:00
103 changed files with 4104 additions and 3874 deletions

126
.cirrus.yml Normal file
View File

@ -0,0 +1,126 @@
---
env:
DEST_BRANCH: "main"
GOPATH: "/var/tmp/go"
GOBIN: "${GOPATH}/bin"
GOCACHE: "${GOPATH}/cache"
GOSRC: "${GOPATH}/src/github.com/containers/podman"
CIRRUS_WORKING_DIR: "${GOPATH}/src/github.com/containers/podman-py"
SCRIPT_BASE: "./contrib/cirrus"
CIRRUS_SHELL: "/bin/bash"
HOME: "/root" # not set by default
####
#### Cache-image names to test with (double-quotes around names are critical)
####
FEDORA_NAME: "fedora-38"
# Google-cloud VM Images
IMAGE_SUFFIX: "c20231116t174419z-f39f38d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-podman-py-${IMAGE_SUFFIX}"
gcp_credentials: ENCRYPTED[0c639039cdd3a9a93fac7746ea1bf366d432e5ff3303bf293e64a7ff38dee85fd445f71625fa5626dc438be2b8efe939]
# Default VM to use unless set or modified by task
gce_instance:
image_project: "libpod-218412"
zone: "us-central1-c" # Required by Cirrus for the time being
cpu: 2
memory: "4Gb"
disk: 200 # Required for performance reasons
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
gating_task:
name: "Gating test"
alias: gating
# Only run this on PRs, never during post-merge testing. This is also required
# for proper setting of EPOCH_TEST_COMMIT value, required by validation tools.
only_if: $CIRRUS_PR != ""
timeout_in: 20m
env:
PATH: ${PATH}:${GOPATH}/bin
script:
- make
- make lint
test_task:
name: "Test on $FEDORA_NAME"
alias: test
depends_on:
- gating
script:
- ${SCRIPT_BASE}/enable_ssh.sh
- ${SCRIPT_BASE}/build_podman.sh
- ${SCRIPT_BASE}/enable_podman.sh
- ${SCRIPT_BASE}/test.sh
latest_task:
name: "Test Podman main on $FEDORA_NAME"
alias: latest
allow_failures: true
depends_on:
- gating
env:
PATH: ${PATH}:${GOPATH}/bin
script:
- ${SCRIPT_BASE}/enable_ssh.sh
- ${SCRIPT_BASE}/build_podman.sh
- ${SCRIPT_BASE}/enable_podman.sh
- ${SCRIPT_BASE}/test.sh
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
alias: meta
name: "VM img. keepalive"
container: &smallcontainer
image: "quay.io/libpod/imgts:latest"
cpu: 1
memory: 1
env:
IMGNAMES: ${FEDORA_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_REPO_NAME}"
GCPJSON: ENCRYPTED[e8a53772eff6e86bf6b99107b6e6ee3216e2ca00c36252ae3bd8cb29d9b903ffb2e1a1322ea810ca251b04f833b8f8d9]
GCPNAME: ENCRYPTED[fb878daf188d35c2ed356dc777267d99b59863ff3abf0c41199d562fca50ba0668fdb0d87e109c9eaa2a635d2825feed]
GCPPROJECT: "libpod-218412"
clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR
script: /usr/local/bin/entrypoint.sh
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
# of this task.
success_task:
name: "Total Success"
alias: success
# N/B: ALL tasks must be listed here, minus their '_task' suffix.
depends_on:
- meta
- gating
- test
- latest
container:
image: quay.io/libpod/alpine:latest
cpu: 1
memory: 1
env:
CIRRUS_SHELL: "/bin/sh"
clone_script: *noop
script: *noop

View File

@ -1 +0,0 @@
1

View File

@ -51,5 +51,5 @@
*************************************************/
// Don't leave dep. update. PRs "hanging", assign them to people.
"assignees": ["inknos"],
"assignees": ["umohnani8", "cevich"],
}

View File

@ -4,13 +4,13 @@ on:
jobs:
commit:
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
# Only check commits on pull requests.
if: github.event_name == 'pull_request'
steps:
- name: get pr commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@v1.3.1
uses: tim-actions/get-pr-commits@v1.3.0
with:
token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,18 +0,0 @@
name: pre-commit
on:
pull_request:
push:
branches: [main]
jobs:
pre-commit:
runs-on: ubuntu-latest
env:
SKIP: no-commit-to-branch
steps:
- uses: actions/checkout@v5
- uses: actions/setup-python@v6
with:
python-version: |
3.9
3.x
- uses: pre-commit/action@v3.0.1

View File

@ -1,126 +0,0 @@
name: Publish Python 🐍 distribution 📦 to PyPI and TestPyPI
on: push
jobs:
build:
name: Build distribution 📦
# ensure the workflow is never executed on forked branches
# it would fail anyway, so we just avoid to see an error
if: ${{ github.repository == 'containers/podman-py' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.x"
- name: Install pypa/build
run: >-
python3 -m
pip install
build
--user
- name: Build a binary wheel and a source tarball
run: python3 -m build
- name: Store the distribution packages
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
publish-to-pypi:
name: >-
Publish Python 🐍 distribution 📦 to PyPI
if: startsWith(github.ref, 'refs/tags/') && github.repository == 'containers/podman-py'
needs:
- build
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/podman
permissions:
id-token: write # IMPORTANT: mandatory for trusted publishing
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
github-release:
name: >-
Sign the Python 🐍 distribution 📦 with Sigstore
and upload them to GitHub Release
if: github.repository == 'containers/podman-py'
needs:
- publish-to-pypi
runs-on: ubuntu-latest
permissions:
contents: write # IMPORTANT: mandatory for making GitHub Releases
id-token: write # IMPORTANT: mandatory for sigstore
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
- name: Sign the dists with Sigstore
uses: sigstore/gh-action-sigstore-python@v3.0.1
with:
inputs: >-
./dist/*.tar.gz
./dist/*.whl
- name: Create GitHub Release
env:
GITHUB_TOKEN: ${{ github.token }}
run: >-
gh release create
'${{ github.ref_name }}'
--repo '${{ github.repository }}'
--generate-notes
- name: Upload artifact signatures to GitHub Release
env:
GITHUB_TOKEN: ${{ github.token }}
# Upload to GitHub Release using the `gh` CLI.
# `dist/` contains the built packages, and the
# sigstore-produced signatures and certificates.
run: >-
gh release upload
'${{ github.ref_name }}' dist/**
--repo '${{ github.repository }}'
publish-to-testpypi:
name: Publish Python 🐍 distribution 📦 to TestPyPI
if: github.repository == 'containers/podman-py'
needs:
- build
runs-on: ubuntu-latest
environment:
name: testpypi
url: https://test.pypi.org/p/podman
permissions:
id-token: write # IMPORTANT: mandatory for trusted publishing
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to TestPyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
repository-url: https://test.pypi.org/legacy/
skip_existing: true
verbose: true

View File

@ -2,172 +2,40 @@
# See the documentation for more information:
# https://packit.dev/docs/configuration/
downstream_package_name: python-podman
specfile_path: rpm/python-podman.spec
upstream_tag_template: v{version}
files_to_sync:
- src: rpm/gating.yml
dest: gating.yml
delete: true
- src: pyproject.toml
dest: pyproject.toml
delete: true
- src: plans/
dest: plans/
delete: true
mkpath: true
- src: tests/
dest: tests/
delete: true
mkpath: true
- src: .fmf/
dest: .fmf/
delete: true
mkpath: true
packages:
python-podman-fedora:
pkg_tool: fedpkg
downstream_package_name: python-podman
specfile_path: rpm/python-podman.spec
python-podman-centos:
pkg_tool: centpkg
downstream_package_name: python-podman
specfile_path: rpm/python-podman.spec
python-podman-rhel:
specfile_path: rpm/python-podman.spec
srpm_build_deps:
- make
jobs:
# Copr builds for Fedora
- job: copr_build
trigger: pull_request
identifier: pr-fedora
packages: [python-podman-fedora]
targets:
- fedora-all
# Copr builds for CentOS Stream
- job: copr_build
trigger: pull_request
identifier: pr-centos
packages: [python-podman-centos]
targets:
- centos-stream-10
- centos-stream-8
- centos-stream-9
# Copr builds for RHEL
- job: copr_build
trigger: pull_request
identifier: pr-rhel
packages: [python-podman-rhel]
targets:
- epel-9
# Run on commit to main branch
- job: copr_build
trigger: commit
identifier: commit-fedora
packages: [python-podman-fedora]
branch: main
owner: rhcontainerbot
project: podman-next
# Downstream sync for Fedora
- job: propose_downstream
trigger: release
packages: [python-podman-fedora]
update_release: false
dist_git_branches:
- fedora-all
# Downstream sync for CentOS Stream
# TODO: c9s enablement being tracked in https://issues.redhat.com/browse/RUN-2123
- job: propose_downstream
trigger: release
packages: [python-podman-centos]
dist_git_branches:
- c10s
- c9s
- job: koji_build
trigger: commit
packages: [python-podman-fedora]
dist_git_branches:
- fedora-all
- job: bodhi_update
trigger: commit
packages: [python-podman-fedora]
dist_git_branches:
- fedora-branched # rawhide updates are created automatically
# Test linting on the codebase
# This test might break based on the OS and lint used, so we follow fedora-latest as a reference
- job: tests
trigger: pull_request
identifier: distro-sanity
tmt_plan: /distro/sanity
packages: [python-podman-fedora]
targets:
- fedora-latest-stable
skip_build: true
# test unit test coverage
- job: tests
trigger: pull_request
identifier: unittest-coverage
tmt_plan: /distro/unittest_coverage
packages: [python-podman-fedora]
targets:
- fedora-latest-stable
skip_build: true
# TODO: test integration test coverage
# run all tests for all python versions on all fedoras
- job: tests
trigger: pull_request
identifier: distro-fedora-all
tmt_plan: /distro/all_python
packages: [python-podman-fedora]
targets:
- fedora-all
# run tests for the rawhide python version using podman-next packages
- job: tests
trigger: pull_request
identifier: podman-next-fedora-base
tmt_plan: /pnext/base_python
packages: [python-podman-fedora]
targets:
- fedora-rawhide
tf_extra_params:
environments:
- artifacts:
- type: repository-file
id: https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next/repo/fedora-$releasever/rhcontainerbot-podman-next-fedora-$releasever.repo
manual_trigger: true
labels:
- pnext
- podman-next
- job: tests
trigger: pull_request
identifier: distro-centos-base
tmt_plan: /distro/base_python
packages: [python-podman-centos]
targets:
- centos-stream-9
- centos-stream-10
- job: tests
trigger: pull_request
identifier: distro-rhel-base
tmt_plan: /distro/base_python
packages: [python-podman-rhel]
targets:
- epel-9

View File

@ -1,27 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-yaml
exclude: "gating.yml"
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.12.8
hooks:
# Run the linter.
- id: ruff
args: [ --fix ]
# Run the formatter.
- id: ruff-format
- repo: https://github.com/teemtee/tmt.git
rev: 1.39.0
hooks:
- id: tmt-lint
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.15.0
hooks:
- id: mypy
pass_filenames: false
args: ["--package", "podman"]

View File

@ -21,10 +21,7 @@ build:
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- method: pip
path: .
extra_requirements:
- docs
- requirements: requirements.txt
# Build documentation in the docs/ directory with Sphinx
sphinx:

View File

@ -25,9 +25,9 @@ Please don't include any private/sensitive information in your issue!
## Tools we use
- Python >= 3.9
- [pre-commit](https://pre-commit.com/)
- [ruff](https://docs.astral.sh/ruff/)
- Python 3.6
- [pylint](https://www.pylint.org/)
- [black](https://github.com/psf/black)
- [tox](https://tox.readthedocs.io/en/latest/)
- You may need to use [virtualenv](https://virtualenv.pypa.io/en/latest/) to
support Python 3.6
@ -45,45 +45,6 @@ pip install tox
tox -e coverage
```
#### Advanced testing
Always prefer to run `tox` directly, even when you want to run a specific test or scenario.
Instead of running `pytest` directly, you should run:
```
tox -e py -- podman/tests/integration/test_container_create.py -k test_container_directory_volume_mount
```
If you'd like to test against a specific `tox` environment you can do:
```
tox -e py12 -- podman/tests/integration/test_container_create.py -k test_container_directory_volume_mount
```
Pass pytest options after `--`.
#### Testing future features
Since `podman-py` follows stable releases of `podman`, tests are thought to be run against
libpod's versions that are commonly installed in the distributions. Tests can be versioned,
but preferably they should not. Occasionally, upstream can diverge and have features that
are not included in a specific version of libpod, or that will be included eventually.
To run a test against such changes, you need to have
[podman-next](https://copr.fedorainfracloud.org/coprs/rhcontainerbot/podman-next) installed.
Then, you need to mark the test as `@pytest.mark.pnext`. Marked tests willbe excluded from the
runs, unless you pass `--pnext` as a cli option.
Preferably, this should be a rare case and it's better to use this marker as a temporary solution,
with the goal of removing the marker within few PRs.
To run these tests use:
```
tox -e py -- --pnext -m pnext podman/tests/integration/test_container_create.py -k test_container_mounts_without_rw_as_default
```
The option `--pnext` **enables** the tests with the `pnext` pytest marker, and `-m pnext` will run
the marked tests **only**.
## Submitting changes
- Create a github pull request (PR)
@ -104,12 +65,10 @@ the marked tests **only**.
## Coding conventions
- Formatting and linting are incorporated using [ruff](https://docs.astral.sh/ruff/).
- If you use [pre-commit](https://pre-commit.com/) the checks will run automatically when you commit some changes
- If you prefer to run the ckecks with pre-commit, use `pre-commit run -a` to run the pre-commit checks for you.
- If you'd like to see what's happening with the checks you can run the [linter](https://docs.astral.sh/ruff/linter/)
and [formatter](https://docs.astral.sh/ruff/formatter/) separately with `ruff check --diff` and `ruff format --diff`
- Checks need to pass pylint
- Use [black](https://github.com/psf/black) code formatter. If you have tox
installed, run `tox -e black` to see what changes will be made. You can use
`tox -e black-format` to update the code formatting prior to committing.
- Pass pylint
- exceptions are possible, but you will need to make a good argument
- Use spaces not tabs for indentation
- This is open source software. Consider the people who will read your code,

View File

@ -8,37 +8,23 @@ DESTDIR ?=
EPOCH_TEST_COMMIT ?= $(shell git merge-base $${DEST_BRANCH:-main} HEAD)
HEAD ?= HEAD
export PODMAN_VERSION ?= "5.6.0"
export PODMAN_VERSION ?= "4.8.1"
.PHONY: podman
podman:
rm dist/* || :
$(PYTHON) -m pip install -q build
$(PYTHON) -m pip install --user -r requirements.txt
PODMAN_VERSION=$(PODMAN_VERSION) \
$(PYTHON) -m build
$(PYTHON) setup.py sdist bdist bdist_wheel
.PHONY: lint
lint: tox
$(PYTHON) -m tox -e format,lint,mypy
$(PYTHON) -m tox -e black,pylint
.PHONY: tests
tests: tox
# see tox.ini for environment variable settings
$(PYTHON) -m tox -e coverage,py39,py310,py311,py312,py313
.PHONY: tests-ci-base-python-podman-next
tests-ci-base-python-podman-next:
$(PYTHON) -m tox -e py -- --pnext -m pnext
.PHONY: tests-ci-base-python
tests-ci-base-python:
$(PYTHON) -m tox -e coverage,py
# TODO: coverage is probably not necessary here and in tests-ci-base-python
# but for now it's ok to leave it here so it's run
.PHONY: tests-ci-all-python
tests-ci-all-python:
$(PYTHON) -m tox -e coverage,py39,py310,py311,py312,py313
$(PYTHON) -m tox -e pylint,coverage,py36,py38,py39,py310,py311
.PHONY: unittest
unittest:
@ -53,9 +39,9 @@ integration:
.PHONY: tox
tox:
ifeq (, $(shell which dnf))
brew install python@3.9 python@3.10 python@3.11 python@3.12 python@3.13
brew install python@3.8 python@3.9 python@3.10 python@3.11
else
-dnf install -y python3 python3.9 python3.10 python3.11 python3.12 python3.13
-dnf install -y python3 python3.6 python3.8 python3.9
endif
# ensure tox is available. It will take care of other testing requirements
$(PYTHON) -m pip install --user tox

17
OWNERS
View File

@ -1,4 +1,6 @@
approvers:
- baude
- cdoern
- edsantiago
- giuseppe
- jwhonce
@ -6,13 +8,22 @@ approvers:
- Luap99
- mheon
- mwhahaha
- rhatdan
- TomSweeneyRedHat
- umohnani8
- vrothberg
- inknos
reviewers:
- ashley-cui
- baude
- Honny1
- cdoern
- edsantiago
- giuseppe
- jwhonce
- lsm5
- Luap99
- mheon
- mwhahaha
- rhatdan
- TomSweeneyRedHat
- Edward5hen
- umohnani8
- vrothberg

View File

@ -1,32 +1,14 @@
# podman-py
[![PyPI Latest Version](https://img.shields.io/pypi/v/podman)](https://pypi.org/project/podman/)
[![Build Status](https://api.cirrus-ci.com/github/containers/podman-py.svg)](https://cirrus-ci.com/github/containers/podman-py/main)
This python package is a library of bindings to use the RESTful API of [Podman](https://github.com/containers/podman).
It is currently under development and contributors are welcome!
## Installation
<div class="termy">
```console
pip install podman
```
</div>
---
**Documentation**: <a href="https://podman-py.readthedocs.io/en/latest/" target="_blank">https://podman-py.readthedocs.io/en/latest/</a>
**Source Code**: <a href="https://github.com/containers/podman-py" target="_blank">https://github.com/containers/podman-py</a>
---
## Dependencies
* For runtime dependencies, see \[dependencies\] in [pyproject.toml](https://github.com/containers/podman-py/blob/main/pyproject.toml)
* For testing and development dependencies, see \[project.optional.dependencies\] in [pyproject.toml](https://github.com/containers/podman-py/blob/main/pyproject.toml)
* The package is split in \[progress\_bar\], \[docs\], and \[test\]
* For runtime dependencies, see [requirements.txt](https://github.com/containers/podman-py/blob/main/requirements.txt).
* For testing and development dependencies, see [test-requirements.txt](https://github.com/containers/podman-py/blob/main/test-requirements.txt).
## Example usage
@ -53,12 +35,9 @@ with PodmanClient(base_url=uri) as client:
# find all containers
for container in client.containers.list():
# After a list call you would probably want to reload the container
# to get the information about the variables such as status.
# Note that list() ignores the sparse option and assumes True by default.
container.reload()
first_name = container['Names'][0]
container = client.containers.get(first_name)
print(container, container.id, "\n")
print(container, container.status, "\n")
# available fields
print(sorted(container.attrs.keys()))

10
contrib/cirrus/build_podman.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
set -xeo pipefail
systemctl stop podman.socket || :
dnf erase podman -y
dnf copr enable rhcontainerbot/podman-next -y
dnf install podman -y

11
contrib/cirrus/enable_podman.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
systemctl enable podman.socket podman.service
systemctl start podman.socket
systemctl status podman.socket ||:
# log which version of podman we just enabled
echo "Locate podman: $(type -P podman)"
podman --version

11
contrib/cirrus/enable_ssh.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
set -eo pipefail
systemctl enable sshd
systemctl start sshd
systemctl status sshd ||:
ssh-keygen -t ecdsa -b 521 -f /root/.ssh/id_ecdsa -P ""
cp /root/.ssh/authorized_keys /root/.ssh/authorized_keys%
cat /root/.ssh/id_ecdsa.pub >>/root/.ssh/authorized_keys

5
contrib/cirrus/test.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash
set -eo pipefail
make tests

View File

@ -5,3 +5,4 @@
{% for docname in docnames %}
{{ docname }}
{%- endfor %}

View File

@ -20,9 +20,9 @@ sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'Podman Python SDK'
copyright = '2021, Red Hat Inc'
author = 'Red Hat Inc'
project = u'Podman Python SDK'
copyright = u'2021, Red Hat Inc'
author = u'Red Hat Inc'
# The full version, including alpha/beta/rc tags
version = '3.2.1.0'
@ -125,7 +125,9 @@ class PatchedPythonDomain(PythonDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if 'refspecific' in node:
del node['refspecific']
return super().resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
return super(PatchedPythonDomain, self).resolve_xref(
env, fromdocname, builder, typ, target, node, contnode
)
def skip(app, what, name, obj, would_skip, options):

View File

@ -34,14 +34,13 @@ Example
.. code-block:: python
:linenos:
import podman
with podman.PodmanClient() as client:
if client.ping():
images = client.images.list()
for image in images:
print(image.id)
import podman
with podman.Client() as client:
if client.ping():
images = client.images.list()
for image in images:
print(image.id)
.. toctree::
:caption: Podman Client

View File

@ -1,10 +0,0 @@
---
!Policy
product_versions:
- fedora-*
decision_contexts:
- bodhi_update_push_stable
- bodhi_update_push_testing
subject_type: koji_build
rules:
- !PassingTestCaseRule {test_case_name: fedora-ci.koji-build./plans/downstream/all.functional}

61
hack/get_ci_vm.sh Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env bash
#
# For help and usage information, simply execute the script w/o any arguments.
#
# This script is intended to be run by Red Hat podman-py developers who need
# to debug problems specifically related to Cirrus-CI automated testing.
# It requires that you have been granted prior access to create VMs in
# google-cloud. For non-Red Hat contributors, VMs are available as-needed,
# with supervision upon request.
set -e
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# Help detect if we were called by get_ci_vm container
GET_CI_VM="${GET_CI_VM:-0}"
in_get_ci_vm() {
if ((GET_CI_VM==0)); then
echo "Error: $1 is not intended for use in this context"
exit 2
fi
}
# get_ci_vm APIv1 container entrypoint calls into this script
# to obtain required repo. specific configuration options.
if [[ "$1" == "--config" ]]; then
in_get_ci_vm "$1"
cat <<EOF
DESTDIR="/var/tmp/go/src/github.com/containers/podman-py"
UPSTREAM_REPO="https://github.com/containers/podman-py.git"
CI_ENVFILE="/etc/ci_environment"
GCLOUD_PROJECT="podman-py"
GCLOUD_IMGPROJECT="libpod-218412"
GCLOUD_CFG="podman-py"
GCLOUD_ZONE="${GCLOUD_ZONE:-us-central1-c}"
GCLOUD_CPUS="2"
GCLOUD_MEMORY="4Gb"
GCLOUD_DISK="200"
EOF
elif [[ "$1" == "--setup" ]]; then
in_get_ci_vm "$1"
echo "+ Setting up and Running make" > /dev/stderr
echo 'PATH=$PATH:$GOPATH/bin' > /etc/ci_environment
make
else
# Create and access VM for specified Cirrus-CI task
mkdir -p $HOME/.config/gcloud/ssh
podman run -it --rm \
--tz=local \
-e NAME="$USER" \
-e SRCDIR=/src \
-e GCLOUD_ZONE="$GCLOUD_ZONE" \
-e DEBUG="${DEBUG:-0}" \
-v $REPO_DIRPATH:/src:O \
-v $HOME/.config/gcloud:/root/.config/gcloud:z \
-v $HOME/.config/gcloud/ssh:/root/.ssh:z \
quay.io/libpod/get_ci_vm:latest "$@"
fi

View File

@ -1,116 +0,0 @@
summary: Run Python Podman Tests
discover:
how: fmf
execute:
how: tmt
prepare:
- name: pkg dependencies
how: install
package:
- make
- python3-pip
- podman
- name: pip dependencies
how: shell
script:
- pip3 install .[test]
- name: ssh configuration
how: shell
script:
- ssh-keygen -t ecdsa -b 521 -f /root/.ssh/id_ecdsa -P ""
- cp /root/.ssh/authorized_keys /root/.ssh/authorized_keys%
- cat /root/.ssh/id_ecdsa.pub >>/root/.ssh/authorized_keys
# Run tests agains Podman Next builds.
# These tests should NOT overlap with the ones who run in the distro plan and should only include
# tests against upcoming features or upstream tests that we need to run for reasons.
/pnext:
prepare+:
- name: enable rhcontainerbot/podman-next update podman
when: initiator == packit
how: shell
script: |
COPR_REPO_FILE="/etc/yum.repos.d/*podman-next*.repo"
if compgen -G $COPR_REPO_FILE > /dev/null; then
sed -i -n '/^priority=/!p;$apriority=1' $COPR_REPO_FILE
fi
dnf -y upgrade --allowerasing
/base_python:
summary: Run Tests Upstream PRs for base Python
discover+:
filter: tag:pnext
adjust+:
enabled: false
when: initiator is not defined or initiator != packit
# Run tests against Podman buids installed from the distribution.
/distro:
prepare+:
- name: Enable testing repositories
when: initiator == packit && distro == fedora
how: shell
script: |
dnf config-manager setopt updates-testing.enabled=true
dnf -y upgrade --allowerasing --setopt=allow_vendor_change=true
/sanity:
summary: Run Sanity and Coverage checks on Python Podman
discover+:
# we want to change this to tag:stable once all the coverage tests are fixed
filter: tag:lint
/base_python:
summary: Run Tests Upstream for base Python
discover+:
filter: tag:base
/all_python:
summary: Run Tests Upstream PRs for all Python versions
prepare+:
- name: install all python versions
how: install
package:
- python3.9
- python3.10
- python3.11
- python3.12
- python3.13
discover+:
filter: tag:matrix
# TODO: replace with /coverage and include integration tests coverage
/unittest_coverage:
summary: Run Unit test coverage
discover+:
filter: tag:coverage & tag:unittest
adjust+:
enabled: false
when: initiator is not defined or initiator != packit
# Run tests against downstream Podman. These tests should be the all_python only since the sanity
# of code is tested in the distro environment
/downstream:
/all:
summary: Run Tests on bodhi / errata and dist-git PRs
prepare+:
- name: install all python versions
how: install
package:
- python3.9
- python3.10
- python3.11
- python3.12
- python3.13
discover+:
filter: tag:matrix
adjust+:
enabled: false
when: initiator == packit

View File

@ -1,5 +1,9 @@
"""Podman client module."""
import sys
assert sys.version_info >= (3, 6), "Python 3.6 or greater is required."
from podman.client import PodmanClient, from_env
from podman.version import __version__

View File

@ -1,9 +1,10 @@
"""Tools for connecting to a Podman service."""
import re
from podman.api.cached_property import cached_property
from podman.api.client import APIClient
from podman.api.api_versions import VERSION, COMPATIBLE_VERSION
from podman.api.http_utils import encode_auth_header, prepare_body, prepare_filters
from podman.api.http_utils import prepare_body, prepare_filters
from podman.api.parse_utils import (
decode_header,
frames,
@ -14,20 +15,42 @@ from podman.api.parse_utils import (
stream_helper,
)
from podman.api.tar_utils import create_tar, prepare_containerfile, prepare_containerignore
from .. import version
DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024
def _api_version(release: str, significant: int = 3) -> str:
"""Return API version removing any additional identifiers from the release version.
This is a simple lexicographical parsing, no semantics are applied, e.g. semver checking.
"""
items = re.split(r"\.|-|\+", release)
parts = items[0:significant]
return ".".join(parts)
VERSION: str = _api_version(version.__version__)
COMPATIBLE_VERSION: str = _api_version(version.__compatible_version__, 2)
try:
from typing import Literal
except (ImportError, ModuleNotFoundError):
try:
from typing_extensions import Literal
except (ImportError, ModuleNotFoundError):
from podman.api.typing_extensions import Literal # pylint: disable=ungrouped-imports
# isort: unique-list
__all__ = [
'APIClient',
'COMPATIBLE_VERSION',
'DEFAULT_CHUNK_SIZE',
'Literal',
'VERSION',
'cached_property',
'create_tar',
'decode_header',
'encode_auth_header',
'frames',
'parse_repository',
'prepare_body',

View File

@ -1,7 +1,6 @@
"""Utility functions for working with Adapters."""
from typing import NamedTuple
from collections.abc import Mapping
from typing import NamedTuple, Mapping
def _key_normalizer(key_class: NamedTuple, request_context: Mapping) -> Mapping:

View File

@ -1,18 +0,0 @@
"""Constants API versions"""
import re
from .. import version
def _api_version(release: str, significant: int = 3) -> str:
"""Return API version removing any additional identifiers from the release version.
This is a simple lexicographical parsing, no semantics are applied, e.g. semver checking.
"""
items = re.split(r"\.|-|\+", release)
parts = items[0:significant]
return ".".join(parts)
VERSION: str = _api_version(version.__version__)
COMPATIBLE_VERSION: str = _api_version(version.__compatible_version__, 2)

View File

@ -6,5 +6,5 @@ try:
from functools import cached_property # pylint: disable=unused-import
except ImportError:
def cached_property(fn): # type: ignore[no-redef]
def cached_property(fn):
return property(functools.lru_cache()(fn))

View File

@ -1,24 +1,16 @@
"""APIClient for connecting to Podman service."""
import json
import warnings
import urllib.parse
from typing import (
Any,
ClassVar,
IO,
Optional,
Union,
)
from collections.abc import Iterable, Mapping
from typing import Any, ClassVar, IO, Iterable, List, Mapping, Optional, Tuple, Type, Union
import requests
from requests.adapters import HTTPAdapter
from podman.api.api_versions import VERSION, COMPATIBLE_VERSION
from podman import api # pylint: disable=cyclic-import
from podman.api.ssh import SSHAdapter
from podman.api.uds import UDSAdapter
from podman.errors import APIError, NotFound, PodmanError
from podman.errors import APIError, NotFound
from podman.tlsconfig import TLSConfig
from podman.version import __version__
@ -27,25 +19,15 @@ _Data = Union[
str,
bytes,
Mapping[str, Any],
Iterable[tuple[str, Optional[str]]],
Iterable[Tuple[str, Optional[str]]],
IO,
]
"""Type alias for request data parameter."""
_Timeout = Union[None, float, tuple[float, float], tuple[float, None]]
_Timeout = Union[None, float, Tuple[float, float], Tuple[float, None]]
"""Type alias for request timeout parameter."""
class ParameterDeprecationWarning(DeprecationWarning):
"""
Custom DeprecationWarning for deprecated parameters.
"""
# Make the ParameterDeprecationWarning visible for user.
warnings.simplefilter('always', ParameterDeprecationWarning)
class APIResponse:
"""APIResponse proxy requests.Response objects.
@ -65,7 +47,7 @@ class APIResponse:
"""Forward any query for an attribute not defined in this proxy class to wrapped class."""
return getattr(self._response, item)
def raise_for_status(self, not_found: type[APIError] = NotFound) -> None:
def raise_for_status(self, not_found: Type[APIError] = NotFound) -> None:
"""Raises exception when Podman service reports one."""
if self.status_code < 400:
return
@ -88,7 +70,7 @@ class APIClient(requests.Session):
# Abstract methods (delete,get,head,post) are specialized and pylint cannot walk hierarchy.
# pylint: disable=too-many-instance-attributes,arguments-differ,arguments-renamed
supported_schemes: ClassVar[list[str]] = (
supported_schemes: ClassVar[List[str]] = (
"unix",
"http+unix",
"ssh",
@ -107,9 +89,9 @@ class APIClient(requests.Session):
num_pools: Optional[int] = None,
credstore_env: Optional[Mapping[str, str]] = None,
use_ssh_client=True,
max_pool_size=None,
max_pools_size=None,
**kwargs,
): # pylint: disable=unused-argument,too-many-positional-arguments
): # pylint: disable=unused-argument
"""Instantiate APIClient object.
Args:
@ -135,39 +117,30 @@ class APIClient(requests.Session):
self.base_url = self._normalize_url(base_url)
adapter_kwargs = kwargs.copy()
# The HTTPAdapter doesn't handle the "**kwargs", so it needs special structure
# where the parameters are set specifically.
http_adapter_kwargs = {}
if num_pools is not None:
adapter_kwargs["pool_connections"] = num_pools
http_adapter_kwargs["pool_connections"] = num_pools
if max_pool_size is not None:
adapter_kwargs["pool_maxsize"] = max_pool_size
http_adapter_kwargs["pool_maxsize"] = max_pool_size
if max_pools_size is not None:
adapter_kwargs["pool_maxsize"] = max_pools_size
if timeout is not None:
adapter_kwargs["timeout"] = timeout
if self.base_url.scheme == "http+unix":
self.mount("http://", UDSAdapter(self.base_url.geturl(), **adapter_kwargs))
self.mount("https://", UDSAdapter(self.base_url.geturl(), **adapter_kwargs))
# ignore proxies from the env vars
self.trust_env = False
elif self.base_url.scheme == "http+ssh":
self.mount("http://", SSHAdapter(self.base_url.geturl(), **adapter_kwargs))
self.mount("https://", SSHAdapter(self.base_url.geturl(), **adapter_kwargs))
elif self.base_url.scheme == "http":
self.mount("http://", HTTPAdapter(**http_adapter_kwargs))
self.mount("https://", HTTPAdapter(**http_adapter_kwargs))
self.mount("http://", HTTPAdapter(**adapter_kwargs))
self.mount("https://", HTTPAdapter(**adapter_kwargs))
else:
raise PodmanError("APIClient.supported_schemes changed without adding a branch here.")
assert False, "APIClient.supported_schemes changed without adding a branch here."
self.version = version or VERSION
self.version = version or api.VERSION
self.path_prefix = f"/v{self.version}/libpod/"
self.compatible_version = kwargs.get("compatible_version", COMPATIBLE_VERSION)
self.compatible_version = kwargs.get("compatible_version", api.COMPATIBLE_VERSION)
self.compatible_prefix = f"/v{self.compatible_version}/"
self.timeout = timeout
@ -206,7 +179,6 @@ class APIClient(requests.Session):
def delete(
self,
path: Union[str, bytes],
*,
params: Union[None, bytes, Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: _Timeout = None,
@ -241,8 +213,7 @@ class APIClient(requests.Session):
def get(
self,
path: Union[str, bytes],
*,
params: Union[None, bytes, Mapping[str, list[str]]] = None,
params: Union[None, bytes, Mapping[str, List[str]]] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: _Timeout = None,
stream: Optional[bool] = False,
@ -276,7 +247,6 @@ class APIClient(requests.Session):
def head(
self,
path: Union[str, bytes],
*,
params: Union[None, bytes, Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None,
timeout: _Timeout = None,
@ -311,7 +281,6 @@ class APIClient(requests.Session):
def post(
self,
path: Union[str, bytes],
*,
params: Union[None, bytes, Mapping[str, str]] = None,
data: _Data = None,
headers: Optional[Mapping[str, str]] = None,
@ -331,7 +300,6 @@ class APIClient(requests.Session):
Keyword Args:
compatible: Will override the default path prefix with compatible prefix
verify: Whether to verify TLS certificates.
Raises:
APIError: when service returns an error
@ -350,7 +318,6 @@ class APIClient(requests.Session):
def put(
self,
path: Union[str, bytes],
*,
params: Union[None, bytes, Mapping[str, str]] = None,
data: _Data = None,
headers: Optional[Mapping[str, str]] = None,
@ -389,7 +356,6 @@ class APIClient(requests.Session):
self,
method: str,
path: Union[str, bytes],
*,
data: _Data = None,
params: Union[None, bytes, Mapping[str, str]] = None,
headers: Optional[Mapping[str, str]] = None,
@ -408,7 +374,6 @@ class APIClient(requests.Session):
Keyword Args:
compatible: Will override the default path prefix with compatible prefix
verify: Whether to verify TLS certificates.
Raises:
APIError: when service returns an error
@ -424,10 +389,10 @@ class APIClient(requests.Session):
path = path.lstrip("/") # leading / makes urljoin crazy...
scheme = "https" if kwargs.get("verify", None) else "http"
# TODO should we have an option for HTTPS support?
# Build URL for operation from base_url
uri = urllib.parse.ParseResult(
scheme,
"http",
self.base_url.netloc,
urllib.parse.urljoin(path_prefix, path),
self.base_url.params,
@ -444,7 +409,6 @@ class APIClient(requests.Session):
data=data,
headers=(headers or {}),
stream=stream,
verify=kwargs.get("verify", None),
**timeout_kw,
)
)

View File

@ -3,17 +3,16 @@
import base64
import collections.abc
import json
from typing import Optional, Union, Any
from collections.abc import Mapping
from typing import Dict, List, Mapping, Optional, Union, Any
def prepare_filters(filters: Union[str, list[str], Mapping[str, str]]) -> Optional[str]:
"""Return filters as an URL quoted JSON dict[str, list[Any]]."""
def prepare_filters(filters: Union[str, List[str], Mapping[str, str]]) -> Optional[str]:
"""Return filters as an URL quoted JSON Dict[str, List[Any]]."""
if filters is None or len(filters) == 0:
return None
criteria: dict[str, list[str]] = {}
criteria: Dict[str, List[str]] = {}
if isinstance(filters, str):
_format_string(filters, criteria)
elif isinstance(filters, collections.abc.Mapping):
@ -43,12 +42,12 @@ def _format_dict(filters, criteria):
for key, value in filters.items():
if value is None:
continue
str_value = str(value)
value = str(value)
if key in criteria:
criteria[key].append(str_value)
criteria[key].append(value)
else:
criteria[key] = [str_value]
criteria[key] = [value]
def _format_string(filters, criteria):
@ -68,7 +67,7 @@ def prepare_body(body: Mapping[str, Any]) -> str:
return json.dumps(body, sort_keys=True)
def _filter_values(mapping: Mapping[str, Any], recursion=False) -> dict[str, Any]:
def _filter_values(mapping: Mapping[str, Any], recursion=False) -> Dict[str, Any]:
"""Returns a canonical dictionary with values == None or empty Iterables removed.
Dictionary is walked using recursion.
@ -85,7 +84,6 @@ def _filter_values(mapping: Mapping[str, Any], recursion=False) -> dict[str, Any
continue
# depending on type we need details...
proposal: Any
if isinstance(value, collections.abc.Mapping):
proposal = _filter_values(value, recursion=True)
elif isinstance(value, collections.abc.Iterable) and not isinstance(value, str):
@ -93,7 +91,7 @@ def _filter_values(mapping: Mapping[str, Any], recursion=False) -> dict[str, Any
else:
proposal = value
if not recursion and proposal not in (None, "", [], {}):
if not recursion and proposal not in (None, str(), [], {}):
canonical[key] = proposal
elif recursion and proposal not in (None, [], {}):
canonical[key] = proposal
@ -101,5 +99,5 @@ def _filter_values(mapping: Mapping[str, Any], recursion=False) -> dict[str, Any
return canonical
def encode_auth_header(auth_config: dict[str, str]) -> bytes:
return base64.urlsafe_b64encode(json.dumps(auth_config).encode('utf-8'))
def encode_auth_header(auth_config: Dict[str, str]) -> str:
return base64.b64encode(json.dumps(auth_config).encode('utf-8'))

View File

@ -1,49 +0,0 @@
"""Utility functions for dealing with stdout and stderr."""
HEADER_SIZE = 8
STDOUT = 1
STDERR = 2
# pylint: disable=line-too-long
def demux_output(data_bytes):
"""Demuxes the output of a container stream into stdout and stderr streams.
Stream data is expected to be in the following format:
- 1 byte: stream type (1=stdout, 2=stderr)
- 3 bytes: padding
- 4 bytes: payload size (big-endian)
- N bytes: payload data
ref: https://docs.podman.io/en/latest/_static/api.html?version=v5.0#tag/containers/operation/ContainerAttachLibpod
Args:
data_bytes: Bytes object containing the combined stream data.
Returns:
A tuple containing two bytes objects: (stdout, stderr).
"""
stdout = b""
stderr = b""
while len(data_bytes) >= HEADER_SIZE:
# Extract header information
header, data_bytes = data_bytes[:HEADER_SIZE], data_bytes[HEADER_SIZE:]
stream_type = header[0]
payload_size = int.from_bytes(header[4:HEADER_SIZE], "big")
# Check if data is sufficient for payload
if len(data_bytes) < payload_size:
break # Incomplete frame, wait for more data
# Extract and process payload
payload = data_bytes[:payload_size]
if stream_type == STDOUT:
stdout += payload
elif stream_type == STDERR:
stderr += payload
else:
# todo: Handle unexpected stream types
pass
# Update data for next frame
data_bytes = data_bytes[payload_size:]
return stdout or None, stderr or None

View File

@ -4,32 +4,33 @@ import base64
import ipaddress
import json
import struct
from datetime import datetime, timezone
from typing import Any, Optional, Union
from collections.abc import Iterator
from datetime import datetime
from typing import Any, Dict, Iterator, Optional, Tuple, Union
from podman.api.client import APIResponse
from .output_utils import demux_output
from requests import Response
def parse_repository(name: str) -> tuple[str, Optional[str]]:
"""Parse repository image name from tag.
def parse_repository(name: str) -> Tuple[str, Optional[str]]:
"""Parse repository image name from tag or digest
Returns:
item 1: repository name
item 2: Either tag or None
item 2: Either digest and tag, tag, or None
"""
# split image name and digest
elements = name.split("@", 1)
if len(elements) == 2:
return elements[0], elements[1]
# split repository and image name from tag
# tags need to be split from the right since
# a port number might increase the split list len by 1
elements = name.rsplit(":", 1)
elements = name.split(":", 1)
if len(elements) == 2 and "/" not in elements[1]:
return elements[0], elements[1]
return name, None
def decode_header(value: Optional[str]) -> dict[str, Any]:
def decode_header(value: Optional[str]) -> Dict[str, Any]:
"""Decode a base64 JSON header value."""
if value is None:
return {}
@ -48,15 +49,13 @@ def prepare_timestamp(value: Union[datetime, int, None]) -> Optional[int]:
return value
if isinstance(value, datetime):
if value.tzinfo is None:
value = value.replace(tzinfo=timezone.utc)
delta = value - datetime.fromtimestamp(0, timezone.utc)
delta = value - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
raise ValueError(f"Type '{type(value)}' is not supported by prepare_timestamp()")
def prepare_cidr(value: Union[ipaddress.IPv4Network, ipaddress.IPv6Network]) -> tuple[str, str]:
def prepare_cidr(value: Union[ipaddress.IPv4Network, ipaddress.IPv6Network]) -> (str, str):
"""Returns network address and Base64 encoded netmask from CIDR.
The return values are dictated by the Go JSON decoder.
@ -64,7 +63,7 @@ def prepare_cidr(value: Union[ipaddress.IPv4Network, ipaddress.IPv6Network]) ->
return str(value.network_address), base64.b64encode(value.netmask.packed).decode("utf-8")
def frames(response: APIResponse) -> Iterator[bytes]:
def frames(response: Response) -> Iterator[bytes]:
"""Returns each frame from multiplexed payload, all results are expected in the payload.
The stdout and stderr frames are undifferentiated as they are returned.
@ -80,13 +79,11 @@ def frames(response: APIResponse) -> Iterator[bytes]:
yield response.content[frame_begin:frame_end]
def stream_frames(
response: APIResponse, demux: bool = False
) -> Iterator[Union[bytes, tuple[bytes, bytes]]]:
def stream_frames(response: Response) -> Iterator[bytes]:
"""Returns each frame from multiplexed streamed payload.
If ``demux`` then output will be tuples where the first position is ``STDOUT`` and the second
is ``STDERR``.
Notes:
The stdout and stderr frames are undifferentiated as they are returned.
"""
while True:
header = response.raw.read(8)
@ -98,18 +95,14 @@ def stream_frames(
continue
data = response.raw.read(frame_length)
if demux:
data = demux_output(header + data)
if not data:
return
yield data
def stream_helper(
response: APIResponse, decode_to_json: bool = False
) -> Union[Iterator[bytes], Iterator[dict[str, Any]]]:
response: Response, decode_to_json: bool = False
) -> Union[Iterator[bytes], Iterator[Dict[str, Any]]]:
"""Helper to stream results and optionally decode to json"""
for value in response.iter_lines():
if decode_to_json:

View File

@ -1,54 +0,0 @@
"""Helper functions for managing paths"""
import errno
import getpass
import os
import stat
def get_runtime_dir() -> str:
"""Returns the runtime directory for the current user
The value in XDG_RUNTIME_DIR is preferred, but that is not always set, for
example, on headless servers. /run/user/$UID is defined in the XDG documentation.
"""
try:
return os.environ['XDG_RUNTIME_DIR']
except KeyError:
user = getpass.getuser()
run_user = f'/run/user/{os.getuid()}'
if os.path.isdir(run_user):
return run_user
fallback = f'/tmp/podmanpy-runtime-dir-fallback-{user}'
try:
# This must be a real directory, not a symlink, so attackers can't
# point it elsewhere. So we use lstat to check it.
fallback_st = os.lstat(fallback)
except OSError as e:
if e.errno == errno.ENOENT:
os.mkdir(fallback, 0o700)
else:
raise
else:
# The fallback must be a directory
if not stat.S_ISDIR(fallback_st.st_mode):
os.unlink(fallback)
os.mkdir(fallback, 0o700)
# Must be owned by the user and not accessible by anyone else
elif (fallback_st.st_uid != os.getuid()) or (
fallback_st.st_mode & (stat.S_IRWXG | stat.S_IRWXO)
):
os.rmdir(fallback)
os.mkdir(fallback, 0o700)
return fallback
def get_xdg_config_home() -> str:
"""Returns the XDG_CONFIG_HOME directory for the current user"""
try:
return os.environ["XDG_CONFIG_HOME"]
except KeyError:
return os.path.join(os.path.expanduser("~"), ".config")

View File

@ -15,12 +15,12 @@ from contextlib import suppress
from typing import Optional, Union
import time
import xdg.BaseDirectory
import urllib3
import urllib3.connection
from requests.adapters import DEFAULT_POOLBLOCK, DEFAULT_RETRIES, HTTPAdapter
from podman.api.path_utils import get_runtime_dir
from .adapter_utils import _key_normalizer
@ -46,7 +46,7 @@ class SSHSocket(socket.socket):
self.identity = identity
self._proc: Optional[subprocess.Popen] = None
runtime_dir = pathlib.Path(get_runtime_dir()) / "podman"
runtime_dir = pathlib.Path(xdg.BaseDirectory.get_runtime_dir(strict=False)) / "podman"
runtime_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
self.local_sock = runtime_dir / f"podman-forward-{random.getrandbits(80):x}.sock"
@ -250,7 +250,7 @@ class SSHAdapter(HTTPAdapter):
max_retries: int = DEFAULT_RETRIES,
pool_block: int = DEFAULT_POOLBLOCK,
**kwargs,
): # pylint: disable=too-many-positional-arguments
):
"""Initialize SSHAdapter.
Args:

View File

@ -6,12 +6,12 @@ import shutil
import tarfile
import tempfile
from fnmatch import fnmatch
from typing import BinaryIO, Optional
from typing import BinaryIO, List, Optional
import sys
def prepare_containerignore(anchor: str) -> list[str]:
def prepare_containerignore(anchor: str) -> List[str]:
"""Return the list of patterns for filenames to exclude.
.containerignore takes precedence over .dockerignore.
@ -24,7 +24,7 @@ def prepare_containerignore(anchor: str) -> list[str]:
with ignore.open(encoding='utf-8') as file:
return list(
filter(
lambda i: i and not i.startswith("#"),
lambda l: l and not l.startswith("#"),
(line.strip() for line in file.readlines()),
)
)
@ -53,7 +53,7 @@ def prepare_containerfile(anchor: str, dockerfile: str) -> str:
def create_tar(
anchor: str, name: str = None, exclude: list[str] = None, gzip: bool = False
anchor: str, name: str = None, exclude: List[str] = None, gzip: bool = False
) -> BinaryIO:
"""Create a tarfile from context_dir to send to Podman service.
@ -119,7 +119,7 @@ def create_tar(
return open(name.name, "rb") # pylint: disable=consider-using-with
def _exclude_matcher(path: str, exclude: list[str]) -> bool:
def _exclude_matcher(path: str, exclude: List[str]) -> bool:
"""Returns True if path matches an entry in exclude.
Note:

File diff suppressed because it is too large Load Diff

View File

@ -137,7 +137,7 @@ class UDSAdapter(HTTPAdapter):
max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK,
**kwargs,
): # pylint: disable=too-many-positional-arguments
):
"""Initialize UDSAdapter.
Args:
@ -153,7 +153,7 @@ class UDSAdapter(HTTPAdapter):
Examples:
requests.Session.mount(
"http://", UDSAdapter("http+unix:///run/user/1000/podman/podman.sock"))
"http://", UDSAdapater("http+unix:///run/user/1000/podman/podman.sock"))
"""
self.poolmanager: Optional[UDSPoolManager] = None

View File

@ -4,11 +4,12 @@ import logging
import os
from contextlib import AbstractContextManager
from pathlib import Path
from typing import Any, Optional
from typing import Any, Dict, Optional
import xdg.BaseDirectory
from podman.api import cached_property
from podman.api.client import APIClient
from podman.api.path_utils import get_runtime_dir
from podman.domain.config import PodmanConfig
from podman.domain.containers_manager import ContainersManager
from podman.domain.events import EventsManager
@ -69,7 +70,9 @@ class PodmanClient(AbstractContextManager):
# Override configured identity, if provided in arguments
api_kwargs["identity"] = kwargs.get("identity", str(connection.identity))
elif "base_url" not in api_kwargs:
path = str(Path(get_runtime_dir()) / "podman" / "podman.sock")
path = str(
Path(xdg.BaseDirectory.get_runtime_dir(strict=False)) / "podman" / "podman.sock"
)
api_kwargs["base_url"] = "http+unix://" + path
self.api = APIClient(**api_kwargs)
@ -82,14 +85,13 @@ class PodmanClient(AbstractContextManager):
@classmethod
def from_env(
cls,
*,
version: str = "auto",
timeout: Optional[int] = None,
max_pool_size: Optional[int] = None,
ssl_version: Optional[int] = None, # pylint: disable=unused-argument
assert_hostname: bool = False, # pylint: disable=unused-argument
environment: Optional[dict[str, str]] = None,
credstore_env: Optional[dict[str, str]] = None,
environment: Optional[Dict[str, str]] = None,
credstore_env: Optional[Dict[str, str]] = None,
use_ssh_client: bool = True, # pylint: disable=unused-argument
) -> "PodmanClient":
"""Returns connection to service using environment variables and parameters.
@ -122,24 +124,23 @@ class PodmanClient(AbstractContextManager):
if version == "auto":
version = None
kwargs = {
'version': version,
'timeout': timeout,
'tls': False,
'credstore_env': credstore_env,
'max_pool_size': max_pool_size,
}
host = environment.get("CONTAINER_HOST") or environment.get("DOCKER_HOST") or None
if host is not None:
kwargs['base_url'] = host
if host is None:
raise ValueError("CONTAINER_HOST or DOCKER_HOST must be set to URL of podman service.")
return PodmanClient(**kwargs)
return PodmanClient(
base_url=host,
version=version,
timeout=timeout,
tls=False,
credstore_env=credstore_env,
max_pool_size=max_pool_size,
)
@cached_property
def containers(self) -> ContainersManager:
"""Returns Manager for operations on containers stored by a Podman service."""
return ContainersManager(client=self.api, podman_client=self)
return ContainersManager(client=self.api)
@cached_property
def images(self) -> ImagesManager:
@ -175,7 +176,7 @@ class PodmanClient(AbstractContextManager):
def system(self):
return SystemManager(client=self.api)
def df(self) -> dict[str, Any]: # pylint: disable=missing-function-docstring,invalid-name
def df(self) -> Dict[str, Any]: # pylint: disable=missing-function-docstring,invalid-name
return self.system.df()
df.__doc__ = SystemManager.df.__doc__

View File

@ -3,11 +3,11 @@
import sys
import urllib
from pathlib import Path
from typing import Optional
import json
from typing import Dict, Optional
import xdg.BaseDirectory
from podman.api import cached_property
from podman.api.path_utils import get_xdg_config_home
if sys.version_info >= (3, 11):
from tomllib import loads as toml_loads
@ -24,7 +24,7 @@ else:
class ServiceConnection:
"""ServiceConnection defines a connection to the Podman service."""
def __init__(self, name: str, attrs: dict[str, str]):
def __init__(self, name: str, attrs: Dict[str, str]):
"""Create a Podman ServiceConnection."""
self.name = name
self.attrs = attrs
@ -48,16 +48,12 @@ class ServiceConnection:
@cached_property
def url(self):
"""urllib.parse.ParseResult: Returns URL for service connection."""
if self.attrs.get("uri"):
return urllib.parse.urlparse(self.attrs.get("uri"))
return urllib.parse.urlparse(self.attrs.get("URI"))
return urllib.parse.urlparse(self.attrs.get("uri"))
@cached_property
def identity(self):
"""Path: Returns Path to identity file for service connection."""
if self.attrs.get("identity"):
return Path(self.attrs.get("identity"))
return Path(self.attrs.get("Identity"))
return Path(self.attrs.get("identity"))
class PodmanConfig:
@ -66,46 +62,17 @@ class PodmanConfig:
def __init__(self, path: Optional[str] = None):
"""Read Podman configuration from users XDG_CONFIG_HOME."""
self.is_default = False
if path is None:
home = Path(get_xdg_config_home())
self.path = home / "containers" / "podman-connections.json"
old_toml_file = home / "containers" / "containers.conf"
self.is_default = True
# this elif is only for testing purposes
elif "@@is_test@@" in path:
test_path = path.replace("@@is_test@@", '')
self.path = Path(test_path) / "podman-connections.json"
old_toml_file = Path(test_path) / "containers.conf"
self.is_default = True
home = Path(xdg.BaseDirectory.xdg_config_home)
self.path = home / "containers" / "containers.conf"
else:
self.path = Path(path)
old_toml_file = None
self.attrs = {}
if self.path.exists():
try:
with open(self.path, encoding='utf-8') as file:
self.attrs = json.load(file)
except Exception:
# if the user specifies a path, it can either be a JSON file
# or a TOML file - so try TOML next
try:
with self.path.open(encoding='utf-8') as file:
buffer = file.read()
loaded_toml = toml_loads(buffer)
self.attrs.update(loaded_toml)
except Exception as e:
raise AttributeError(
"The path given is neither a JSON nor a TOML connections file"
) from e
# Read the old toml file configuration
if self.is_default and old_toml_file.exists():
with old_toml_file.open(encoding='utf-8') as file:
with self.path.open(encoding='utf-8') as file:
buffer = file.read()
loaded_toml = toml_loads(buffer)
self.attrs.update(loaded_toml)
self.attrs = toml_loads(buffer)
def __hash__(self) -> int:
return hash(tuple(self.path.name))
@ -122,16 +89,15 @@ class PodmanConfig:
@cached_property
def services(self):
"""dict[str, ServiceConnection]: Returns list of service connections.
"""Dict[str, ServiceConnection]: Returns list of service connections.
Examples:
podman_config = PodmanConfig()
address = podman_config.services["testing"]
print(f"Testing service address {address}")
"""
services: dict[str, ServiceConnection] = {}
services: Dict[str, ServiceConnection] = {}
# read the keys of the toml file first
engine = self.attrs.get("engine")
if engine:
destinations = engine.get("service_destinations")
@ -139,35 +105,17 @@ class PodmanConfig:
connection = ServiceConnection(key, attrs=destinations[key])
services[key] = connection
# read the keys of the json file next
# this will ensure that if the new json file and the old toml file
# has a connection with the same name defined, we always pick the
# json one
connection = self.attrs.get("Connection")
if connection:
destinations = connection.get("Connections")
for key in destinations:
connection = ServiceConnection(key, attrs=destinations[key])
services[key] = connection
return services
@cached_property
def active_service(self):
"""Optional[ServiceConnection]: Returns active connection."""
# read the new json file format
connection = self.attrs.get("Connection")
if connection:
active = connection.get("Default")
destinations = connection.get("Connections")
return ServiceConnection(active, attrs=destinations[active])
# if we are here, that means there was no default in the new json file
engine = self.attrs.get("engine")
if engine:
active = engine.get("active_service")
destinations = engine.get("service_destinations")
return ServiceConnection(active, attrs=destinations[active])
for key in destinations:
if key == active:
return ServiceConnection(key, attrs=destinations[key])
return None

View File

@ -3,14 +3,12 @@
import json
import logging
import shlex
from collections.abc import Iterable, Iterator, Mapping
from contextlib import suppress
from typing import Any, Optional, Union
from typing import Any, Dict, Iterable, Iterator, List, Mapping, Optional, Tuple, Union
import requests
from podman import api
from podman.api.output_utils import demux_output
from podman.domain.images import Image
from podman.domain.images_manager import ImagesManager
from podman.domain.manager import PodmanResource
@ -43,20 +41,15 @@ class Container(PodmanResource):
@property
def labels(self):
"""dict[str, str]: Returns labels associated with container."""
labels = None
with suppress(KeyError):
# Container created from ``list()`` operation
if "Labels" in self.attrs:
labels = self.attrs["Labels"]
# Container created from ``get()`` operation
else:
labels = self.attrs["Config"].get("Labels", {})
return labels or {}
return self.attrs["Labels"]
return self.attrs["Config"]["Labels"]
return {}
@property
def status(self):
"""Literal["created", "initialized", "running", "stopped", "exited", "unknown"]:
Returns status of container."""
"""Literal["running", "stopped", "exited", "unknown"]: Returns status of container."""
with suppress(KeyError):
return self.attrs["State"]["Status"]
return "unknown"
@ -99,7 +92,7 @@ class Container(PodmanResource):
Keyword Args:
author (str): Name of commit author
changes (list[str]): Instructions to apply during commit
changes (List[str]): Instructions to apply during commit
comment (str): Commit message to include with Image, overrides keyword message
conf (dict[str, Any]): Ignored.
format (str): Format of the image manifest and metadata
@ -122,7 +115,7 @@ class Container(PodmanResource):
body = response.json()
return ImagesManager(client=self.client).get(body["Id"])
def diff(self) -> list[dict[str, int]]:
def diff(self) -> List[Dict[str, int]]:
"""Report changes of a container's filesystem.
Raises:
@ -132,11 +125,10 @@ class Container(PodmanResource):
response.raise_for_status()
return response.json()
# pylint: disable=too-many-arguments
# pylint: disable=too-many-arguments,unused-argument
def exec_run(
self,
cmd: Union[str, list[str]],
*,
cmd: Union[str, List[str]],
stdout: bool = True,
stderr: bool = True,
stdin: bool = False,
@ -145,14 +137,11 @@ class Container(PodmanResource):
user=None,
detach: bool = False,
stream: bool = False,
socket: bool = False, # pylint: disable=unused-argument
environment: Union[Mapping[str, str], list[str]] = None,
socket: bool = False,
environment: Union[Mapping[str, str], List[str]] = None,
workdir: str = None,
demux: bool = False,
) -> tuple[
Optional[int],
Union[Iterator[Union[bytes, tuple[bytes, bytes]]], Any, tuple[bytes, bytes]],
]:
) -> Tuple[Optional[int], Union[Iterator[bytes], Any, Tuple[bytes, bytes]]]:
"""Run given command inside container and return results.
Args:
@ -162,32 +151,28 @@ class Container(PodmanResource):
stdin: Attach to stdin. Default: False
tty: Allocate a pseudo-TTY. Default: False
privileged: Run as privileged.
user: User to execute command as.
user: User to execute command as. Default: root
detach: If true, detach from the exec command.
Default: False
stream: Stream response data. Ignored if ``detach`` is ``True``. Default: False
stream: Stream response data. Default: False
socket: Return the connection socket to allow custom
read/write operations. Default: False
environment: A dictionary or a list[str] in
environment: A dictionary or a List[str] in
the following format ["PASSWORD=xxx"] or
{"PASSWORD": "xxx"}.
workdir: Path to working directory for this exec session
demux: Return stdout and stderr separately
Returns:
A tuple of (``response_code``, ``output``).
``response_code``:
The exit code of the provided command. ``None`` if ``stream``.
``output``:
If ``stream``, then a generator yielding response chunks.
If ``demux``, then a tuple of (``stdout``, ``stderr``).
Else the response content.
First item is the command response code
Second item is the requests response content
Raises:
NotImplementedError: method not implemented.
APIError: when service reports error
"""
# pylint: disable-msg=too-many-locals
user = user or "root"
if isinstance(environment, dict):
environment = [f"{k}={v}" for k, v in environment.items()]
data = {
@ -199,32 +184,21 @@ class Container(PodmanResource):
"Env": environment,
"Privileged": privileged,
"Tty": tty,
"User": user,
"WorkingDir": workdir,
}
if user:
data["User"] = user
stream = stream and not detach
# create the exec instance
response = self.client.post(f"/containers/{self.name}/exec", data=json.dumps(data))
response.raise_for_status()
exec_id = response.json()['Id']
# start the exec instance, this will store command output
start_resp = self.client.post(
f"/exec/{exec_id}/start", data=json.dumps({"Detach": detach, "Tty": tty}), stream=stream
f"/exec/{exec_id}/start", data=json.dumps({"Detach": detach, "Tty": tty})
)
start_resp.raise_for_status()
if stream:
return None, api.stream_frames(start_resp, demux=demux)
# get and return exec information
response = self.client.get(f"/exec/{exec_id}/json")
response.raise_for_status()
if demux:
stdout_data, stderr_data = demux_output(start_resp.content)
return response.json().get('ExitCode'), (stdout_data, stderr_data)
return response.json().get('ExitCode'), start_resp.content
def export(self, chunk_size: int = api.DEFAULT_CHUNK_SIZE) -> Iterator[bytes]:
@ -243,11 +217,12 @@ class Container(PodmanResource):
response = self.client.get(f"/containers/{self.id}/export", stream=True)
response.raise_for_status()
yield from response.iter_content(chunk_size=chunk_size)
for out in response.iter_content(chunk_size=chunk_size):
yield out
def get_archive(
self, path: str, chunk_size: int = api.DEFAULT_CHUNK_SIZE
) -> tuple[Iterable, dict[str, Any]]:
) -> Tuple[Iterable, Dict[str, Any]]:
"""Download a file or folder from the container's filesystem.
Args:
@ -265,12 +240,7 @@ class Container(PodmanResource):
stat = api.decode_header(stat)
return response.iter_content(chunk_size=chunk_size), stat
def init(self) -> None:
"""Initialize the container."""
response = self.client.post(f"/containers/{self.id}/init")
response.raise_for_status()
def inspect(self) -> dict:
def inspect(self) -> Dict:
"""Inspect a container.
Raises:
@ -310,7 +280,7 @@ class Container(PodmanResource):
params = {
"follow": kwargs.get("follow", kwargs.get("stream", None)),
"since": api.prepare_timestamp(kwargs.get("since")),
"stderr": kwargs.get("stderr", True),
"stderr": kwargs.get("stderr", None),
"stdout": kwargs.get("stdout", True),
"tail": kwargs.get("tail"),
"timestamps": kwargs.get("timestamps"),
@ -421,7 +391,7 @@ class Container(PodmanResource):
def stats(
self, **kwargs
) -> Union[bytes, dict[str, Any], Iterator[bytes], Iterator[dict[str, Any]]]:
) -> Union[bytes, Dict[str, Any], Iterator[bytes], Iterator[Dict[str, Any]]]:
"""Return statistics for container.
Keyword Args:
@ -476,7 +446,7 @@ class Container(PodmanResource):
body = response.json()
raise APIError(body["cause"], response=response, explanation=body["message"])
def top(self, **kwargs) -> Union[Iterator[dict[str, Any]], dict[str, Any]]:
def top(self, **kwargs) -> Union[Iterator[Dict[str, Any]], Dict[str, Any]]:
"""Report on running processes in the container.
Keyword Args:
@ -506,234 +476,19 @@ class Container(PodmanResource):
response = self.client.post(f"/containers/{self.id}/unpause")
response.raise_for_status()
def update(self, **kwargs) -> None:
def update(self, **kwargs):
"""Update resource configuration of the containers.
Keyword Args:
Please refer to Podman API documentation for details:
https://docs.podman.io/en/latest/_static/api.html#tag/containers/operation/ContainerUpdateLibpod
restart_policy (str): New restart policy for the container.
restart_retries (int): New amount of retries for the container's restart policy.
Only allowed if restartPolicy is set to on-failure
blkio_weight_device tuple(str, int):Block IO weight (relative device weight)
in the form: (device_path, weight)
blockio (dict): LinuxBlockIO for Linux cgroup 'blkio' resource management
Example:
blockio = {
"leafWeight": 0
"throttleReadBpsDevice": [{
"major": 0,
"minor": 0,
"rate": 0
}],
"throttleReadIopsDevice": [{
"major": 0,
"minor": 0,
"rate": 0
}],
"throttleWriteBpsDevice": [{
"major": 0,
"minor": 0,
"rate": 0
}],
"throttleWriteIopsDevice": [{
"major": 0,
"minor": 0,
"rate": 0
}],
"weight": 0,
"weightDevice": [{
"leafWeight": 0,
"major": 0,
"minor": 0,
"weight": 0
}],
}
cpu (dict): LinuxCPU for Linux cgroup 'cpu' resource management
Example:
cpu = {
"burst": 0,
"cpus": "string",
"idle": 0,
"mems": "string",
"period": 0
"quota": 0,
"realtimePeriod": 0,
"realtimeRuntime": 0,
"shares": 0
}
device_read_bps (list(dict)): Limit read rate (bytes per second) from a device,
in the form: [{"Path": "string", "Rate": 0}]
device_read_iops (list(dict)): Limit read rate (IO operations per second) from a device,
in the form: [{"Path": "string", "Rate": 0}]
device_write_bps (list(dict)): Limit write rate (bytes per second) to a device,
in the form: [{"Path": "string", "Rate": 0}]
device_write_iops (list(dict)): Limit write rate (IO operations per second) to a device,
in the form: [{"Path": "string", "Rate": 0}]
devices (list(dict)): Devices configures the device allowlist.
Example:
devices = [{
access: "string"
allow: 0,
major: 0,
minor: 0,
type: "string"
}]
health_cmd (str): set a healthcheck command for the container ('None' disables the
existing healthcheck)
health_interval (str): set an interval for the healthcheck (a value of disable results
in no automatic timer setup)(Changing this setting resets timer.) (default "30s")
health_log_destination (str): set the destination of the HealthCheck log. Directory
path, local or events_logger (local use container state file)(Warning: Changing
this setting may cause the loss of previous logs.) (default "local")
health_max_log_count (int): set maximum number of attempts in the HealthCheck log file.
('0' value means an infinite number of attempts in the log file) (default 5)
health_max_logs_size (int): set maximum length in characters of stored HealthCheck log.
('0' value means an infinite log length) (default 500)
health_on_failure (str): action to take once the container turns unhealthy
(default "none")
health_retries (int): the number of retries allowed before a healthcheck is considered
to be unhealthy (default 3)
health_start_period (str): the initialization time needed for a container to bootstrap
(default "0s")
health_startup_cmd (str): Set a startup healthcheck command for the container
health_startup_interval (str): Set an interval for the startup healthcheck. Changing
this setting resets the timer, depending on the state of the container.
(default "30s")
health_startup_retries (int): Set the maximum number of retries before the startup
healthcheck will restart the container
health_startup_success (int): Set the number of consecutive successes before the
startup healthcheck is marked as successful and the normal healthcheck begins
(0 indicates any success will start the regular healthcheck)
health_startup_timeout (str): Set the maximum amount of time that the startup
healthcheck may take before it is considered failed (default "30s")
health_timeout (str): the maximum time allowed to complete the healthcheck before an
interval is considered failed (default "30s")
no_healthcheck (bool): Disable healthchecks on container
hugepage_limits (list(dict)): Hugetlb limits (in bytes).
Default to reservation limits if supported.
Example:
huugepage_limits = [{"limit": 0, "pageSize": "string"}]
memory (dict): LinuxMemory for Linux cgroup 'memory' resource management
Example:
memory = {
"checkBeforeUpdate": True,
"disableOOMKiller": True,
"kernel": 0,
"kernelTCP": 0,
"limit": 0,
"reservation": 0,
"swap": 0,
"swappiness": 0,
"useHierarchy": True,
}
network (dict): LinuxNetwork identification and priority configuration
Example:
network = {
"classID": 0,
"priorities": {
"name": "string",
"priority": 0
}
)
pids (dict): LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3)
Example:
pids = {
"limit": 0
}
rdma (dict): Rdma resource restriction configuration. Limits are a set of key value
pairs that define RDMA resource limits, where the key is device name and value
is resource limits.
Example:
rdma = {
"property1": {
"hcaHandles": 0
"hcaObjects": 0
},
"property2": {
"hcaHandles": 0
"hcaObjects": 0
},
...
}
unified (dict): Unified resources.
Example:
unified = {
"property1": "value1",
"property2": "value2",
...
}
Raises:
NotImplementedError: Podman service unsupported operation.
"""
data = {}
params = {}
health_commands_data = [
"health_cmd",
"health_interval",
"health_log_destination",
"health_max_log_count",
"health_max_logs_size",
"health_on_failure",
"health_retries",
"health_start_period",
"health_startup_cmd",
"health_startup_interval",
"health_startup_retries",
"health_startup_success",
"health_startup_timeout",
"health_timeout",
]
# the healthcheck section of parameters accepted can be either no_healthcheck or a series
# of healthcheck parameters
if kwargs.get("no_healthcheck"):
for command in health_commands_data:
if command in kwargs:
raise ValueError(f"Cannot set {command} when no_healthcheck is True")
data["no_healthcheck"] = kwargs.get("no_healthcheck")
else:
for hc in health_commands_data:
if hc in kwargs:
data[hc] = kwargs.get(hc)
data_mapping = {
"BlkIOWeightDevice": "blkio_weight_device",
"blockio": "blockIO",
"cpu": "cpu",
"device_read_bps": "DeviceReadBPs",
"device_read_iops": "DeviceReadIOps",
"device_write_bps": "DeviceWriteBPs",
"device_write_iops": "DeviceWriteIOps",
"devices": "devices",
"hugepage_limits": "hugepageLimits",
"memory": "memory",
"network": "network",
"pids": "pids",
"rdma": "rdma",
"unified": "unified",
}
for kwarg_key, data_key in data_mapping.items():
value = kwargs.get(kwarg_key)
if value is not None:
data[data_key] = value
if kwargs.get("restart_policy"):
params["restartPolicy"] = kwargs.get("restart_policy")
if kwargs.get("restart_retries"):
params["restartRetries"] = kwargs.get("restart_retries")
response = self.client.post(
f"/containers/{self.id}/update", params=params, data=json.dumps(data)
)
response.raise_for_status()
raise NotImplementedError("Container.update() is not supported by Podman service.")
def wait(self, **kwargs) -> int:
"""Block until the container enters given state.
Keyword Args:
condition (Union[str, list[str]]): Container state on which to release.
condition (Union[str, List[str]]): Container state on which to release.
One or more of: "configured", "created", "running", "stopped",
"paused", "exited", "removing", "stopping".
interval (int): Time interval to wait before polling for completion.

View File

@ -5,8 +5,7 @@ import copy
import logging
import re
from contextlib import suppress
from typing import Any, Union
from collections.abc import MutableMapping
from typing import Any, Dict, List, MutableMapping, Union
from podman import api
from podman.domain.containers import Container
@ -17,17 +16,12 @@ from podman.errors import ImageNotFound
logger = logging.getLogger("podman.containers")
NAMED_VOLUME_PATTERN = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9_.-]*")
class CreateMixin: # pylint: disable=too-few-public-methods
"""Class providing create method for ContainersManager."""
def create(
self,
image: Union[Image, str],
command: Union[str, list[str], None] = None,
**kwargs,
self, image: Union[Image, str], command: Union[str, List[str], None] = None, **kwargs
) -> Container:
"""Create a container.
@ -38,12 +32,12 @@ class CreateMixin: # pylint: disable=too-few-public-methods
Keyword Args:
auto_remove (bool): Enable auto-removal of the container on daemon side when the
container's process exits.
blkio_weight_device (dict[str, Any]): Block IO weight (relative device weight)
blkio_weight_device (Dict[str, Any]): Block IO weight (relative device weight)
in the form of: [{"Path": "device_path", "Weight": weight}].
blkio_weight (int): Block IO weight (relative weight), accepts a weight value
between 10 and 1000.
cap_add (list[str]): Add kernel capabilities. For example: ["SYS_ADMIN", "MKNOD"]
cap_drop (list[str]): Drop kernel capabilities.
cap_add (List[str]): Add kernel capabilities. For example: ["SYS_ADMIN", "MKNOD"]
cap_drop (List[str]): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs (Windows only).
@ -56,32 +50,32 @@ class CreateMixin: # pylint: disable=too-few-public-methods
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution (0-3, 0,1).
Only effective on NUMA systems.
detach (bool): Run container in the background and return a Container object.
device_cgroup_rules (list[str]): A list of cgroup rules to apply to the container.
device_cgroup_rules (List[str]): A list of cgroup rules to apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device in the form of:
`[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (list[str]): Expose host devices to the container, as a list[str] in the form
devices (List[str]): Expose host devices to the container, as a List[str] in the form
<path_on_host>:<path_in_container>:<cgroup_permissions>.
For example:
/dev/sda:/dev/xvda:rwm allows the container to have read-write access to the
host's /dev/sda via a node named /dev/xvda inside the container.
dns (list[str]): Set custom DNS servers.
dns_opt (list[str]): Additional options to be added to the container's resolv.conf file.
dns_search (list[str]): DNS search domains.
domainname (Union[str, list[str]]): Set custom DNS search domains.
entrypoint (Union[str, list[str]]): The entrypoint for the container.
environment (Union[dict[str, str], list[str]): Environment variables to set inside
the container, as a dictionary or a list[str] in the format
dns (List[str]): Set custom DNS servers.
dns_opt (List[str]): Additional options to be added to the container's resolv.conf file.
dns_search (List[str]): DNS search domains.
domainname (Union[str, List[str]]): Set custom DNS search domains.
entrypoint (Union[str, List[str]]): The entrypoint for the container.
environment (Union[Dict[str, str], List[str]): Environment variables to set inside
the container, as a dictionary or a List[str] in the format
["SOMEVARIABLE=xxx", "SOMEOTHERVARIABLE=xyz"].
extra_hosts (dict[str, str]): Additional hostnames to resolve inside the container,
extra_hosts (Dict[str, str]): Additional hostnames to resolve inside the container,
as a mapping of hostname to IP address.
group_add (list[str]): List of additional group names and/or IDs that the container
group_add (List[str]): List of additional group names and/or IDs that the container
process will run as.
healthcheck (dict[str,Any]): Specify a test to perform to check that the
healthcheck (Dict[str,Any]): Specify a test to perform to check that the
container is healthy.
health_check_on_failure_action (int): Specify an action if a healthcheck fails.
hostname (str): Optional hostname for the container.
@ -90,14 +84,14 @@ class CreateMixin: # pylint: disable=too-few-public-methods
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (Union[dict[str, str], list[str]): A dictionary of name-value labels (e.g.
labels (Union[Dict[str, str], List[str]): A dictionary of name-value labels (e.g.
{"label1": "value1", "label2": "value2"}) or a list of names of labels to set
with empty values (e.g. ["label1", "label2"])
links (Optional[dict[str, str]]): Mapping of links using the {'container': 'alias'}
links (Optional[Dict[str, str]]): Mapping of links using the {'container': 'alias'}
format. The alias is optional. Containers declared in this dict will be linked to
the new container using the provided alias. Default: None.
log_config (LogConfig): Logging configuration.
lxc_config (dict[str, str]): LXC config.
lxc_config (Dict[str, str]): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (Union[int, str]): Memory limit. Accepts float values (which represent the
memory limit of the created container in bytes) or a string with a units
@ -108,7 +102,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
between 0 and 100.
memswap_limit (Union[int, str]): Maximum amount of memory + swap a container is allowed
to consume.
mounts (list[Mount]): Specification for mounts to be added to the container. More
mounts (List[Mount]): Specification for mounts to be added to the container. More
powerful alternative to volumes. Each item in the list is expected to be a
Mount object.
For example:
@ -154,7 +148,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
]
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
networks (dict[str, dict[str, Union[str, list[str]]):
networks (Dict[str, Dict[str, Union[str, List[str]]):
Networks which will be connected to container during container creation
Values of the network configuration can be :
@ -169,7 +163,6 @@ class CreateMixin: # pylint: disable=too-few-public-methods
- container:<name|id>: Reuse another container's network
stack.
- host: Use the host network stack.
- ns:<path>: User defined netns path.
Incompatible with network.
oom_kill_disable (bool): Whether to disable OOM killer.
@ -180,23 +173,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
pids_limit (int): Tune a container's pids limit. Set -1 for unlimited.
platform (str): Platform in the format os[/arch[/variant]]. Only used if the method
needs to pull the requested image.
ports (
dict[
Union[int, str],
Union[
int,
Tuple[str, int],
list[int],
dict[
str,
Union[
int,
Tuple[str, int],
list[int]
]
]
]
]): Ports to bind inside the container.
ports (Dict[str, Union[int, Tuple[str, int], List[int], Dict[str, Union[int, Tuple[str, int], List[int]]]]]): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the container, either as an
integer or a string in the form port/protocol, where the protocol is either
@ -246,7 +223,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
read_write_tmpfs (bool): Mount temporary file systems as read write,
in case of read_only options set to True. Default: False
remove (bool): Remove the container when it has finished running. Default: False.
restart_policy (dict[str, Union[str, int]]): Restart the container when it exits.
restart_policy (Dict[str, Union[str, int]]): Restart the container when it exits.
Configured as a dictionary with keys:
- Name: One of on-failure, or always.
@ -254,7 +231,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
For example: {"Name": "on-failure", "MaximumRetryCount": 5}
runtime (str): Runtime to use with this container.
secrets (list[Union[str, Secret, dict[str, Union[str, int]]]]): Secrets to
secrets (List[Union[str, Secret, Dict[str, Union[str, int]]]]): Secrets to
mount to this container.
For example:
@ -288,44 +265,42 @@ class CreateMixin: # pylint: disable=too-few-public-methods
},
]
secret_env (dict[str, str]): Secrets to add as environment variables available in the
secret_env (Dict[str, str]): Secrets to add as environment variables available in the
container.
For example: {"VARIABLE1": "NameOfSecret", "VARIABLE2": "NameOfAnotherSecret"}
security_opt (list[str]): A list[str]ing values to customize labels for MLS systems,
security_opt (List[str]): A List[str]ing values to customize labels for MLS systems,
such as SELinux.
shm_size (Union[str, int]): Size of /dev/shm (e.g. 1G).
stdin_open (bool): Keep STDIN open even if not attached.
stdout (bool): Return logs from STDOUT when detach=False. Default: True.
stderr (bool): Return logs from STDERR when detach=False. Default: False.
stop_signal (str): The stop signal to use to stop the container (e.g. SIGINT).
storage_opt (dict[str, str]): Storage driver options per container as a
storage_opt (Dict[str, str]): Storage driver options per container as a
key-value mapping.
stream (bool): If true and detach is false, return a log generator instead of a string.
Ignored if detach is true. Default: False.
sysctls (dict[str, str]): Kernel parameters to set in the container.
tmpfs (dict[str, str]): Temporary filesystems to mount, as a dictionary mapping a
sysctls (Dict[str, str]): Kernel parameters to set in the container.
tmpfs (Dict[str, str]): Temporary filesystems to mount, as a dictionary mapping a
path inside the container to options for that path.
For example: {'/mnt/vol2': '', '/mnt/vol1': 'size=3G,uid=1000'}
tty (bool): Allocate a pseudo-TTY.
ulimits (list[Ulimit]): Ulimits to set inside the container.
ulimits (List[Ulimit]): Ulimits to set inside the container.
use_config_proxy (bool): If True, and if the docker client configuration
file (~/.config/containers/config.json by default) contains a proxy configuration,
the corresponding environment variables will be set in the container being built.
user (Union[str, int]): Username or UID to run commands as inside the container.
userns_mode (str): Sets the user namespace mode for the container when user namespace
remapping option is enabled. Supported values documented
`here <https://docs.podman.io/en/latest/markdown/options/userns.container.html#userns-mode>`_
remapping option is enabled. Supported values documented `here <https://docs.podman.io/en/latest/markdown/options/userns.container.html#userns-mode>`_
uts_mode (str): Sets the UTS namespace mode for the container.
`These <https://docs.podman.io/en/latest/markdown/options/uts.container.html>`_
are the supported values.
`These <https://docs.podman.io/en/latest/markdown/options/uts.container.html>`_ are the supported values.
version (str): The version of the API to use. Set to auto to automatically detect
the server's version. Default: 3.0.0
volume_driver (str): The name of a volume driver/plugin.
volumes (dict[str, dict[str, Union[str, list]]]): A dictionary to configure
volumes (Dict[str, Dict[str, Union[str, list]]]): A dictionary to configure
volumes mounted inside the container.
The key is either the host path or a volume name, and the value is
a dictionary with the keys:
@ -353,9 +328,8 @@ class CreateMixin: # pylint: disable=too-few-public-methods
}
volumes_from (list[str]): List of container names or IDs to get volumes from.
volumes_from (List[str]): List of container names or IDs to get volumes from.
working_dir (str): Path to the working directory.
workdir (str): Alias of working_dir - Path to the working directory.
Returns:
A Container object.
@ -366,8 +340,6 @@ class CreateMixin: # pylint: disable=too-few-public-methods
"""
if isinstance(image, Image):
image = image.id
if isinstance(command, str):
command = [command]
payload = {"image": image, "command": command}
payload.update(kwargs)
@ -375,9 +347,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
payload = api.prepare_body(payload)
response = self.client.post(
"/containers/create",
headers={"content-type": "application/json"},
data=payload,
"/containers/create", headers={"content-type": "application/json"}, data=payload
)
response.raise_for_status(not_found=ImageNotFound)
@ -385,51 +355,9 @@ class CreateMixin: # pylint: disable=too-few-public-methods
return self.get(container_id)
@staticmethod
def _convert_env_list_to_dict(env_list):
"""Convert a list of environment variables to a dictionary.
Args:
env_list (List[str]): List of environment variables in the format ["KEY=value"]
Returns:
Dict[str, str]: Dictionary of environment variables
Raises:
ValueError: If any environment variable is not in the correct format
"""
if not isinstance(env_list, list):
raise TypeError(f"Expected list, got {type(env_list).__name__}")
env_dict = {}
for env_var in env_list:
if not isinstance(env_var, str):
raise TypeError(
f"Environment variable must be a string, "
f"got {type(env_var).__name__}: {repr(env_var)}"
)
# Handle empty strings
if not env_var.strip():
raise ValueError("Environment variable cannot be empty")
if "=" not in env_var:
raise ValueError(
f"Environment variable '{env_var}' is not in the correct format. "
"Expected format: 'KEY=value'"
)
key, value = env_var.split("=", 1) # Split on first '=' only
# Validate key is not empty
if not key.strip():
raise ValueError(f"Environment variable has empty key: '{env_var}'")
env_dict[key] = value
return env_dict
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
@staticmethod
def _render_payload(kwargs: MutableMapping[str, Any]) -> dict[str, Any]:
def _render_payload(kwargs: MutableMapping[str, Any]) -> Dict[str, Any]:
"""Map create/run kwargs into body parameters."""
args = copy.copy(kwargs)
@ -454,40 +382,21 @@ class CreateMixin: # pylint: disable=too-few-public-methods
with suppress(KeyError):
del args[key]
# Handle environment variables
environment = args.pop("environment", None)
if environment is not None:
if isinstance(environment, list):
try:
environment = CreateMixin._convert_env_list_to_dict(environment)
except ValueError as e:
raise ValueError(
"Failed to convert environment variables list to dictionary. "
f"Error: {str(e)}"
) from e
elif not isinstance(environment, dict):
raise TypeError(
"Environment variables must be provided as either a dictionary "
"or a list of strings in the format ['KEY=value']"
)
# These keywords are not supported for various reasons.
unsupported_keys = set(args.keys()).intersection(
(
"blkio_weight",
"blkio_weight_device", # FIXME In addition to device Major/Minor include path
"device_cgroup_rules", # FIXME Where to map for Podman API?
"device_read_bps", # FIXME In addition to device Major/Minor include path
"device_read_iops", # FIXME In addition to device Major/Minor include path
"device_requests", # FIXME In addition to device Major/Minor include path
"device_write_bps", # FIXME In addition to device Major/Minor include path
"device_write_iops", # FIXME In addition to device Major/Minor include path
"domainname",
"network_disabled", # FIXME Where to map for Podman API?
"storage_opt", # FIXME Where to map for Podman API?
"tmpfs", # FIXME Where to map for Podman API?
)
)
unsupported_keys = set(args.keys()).intersection((
"blkio_weight",
"blkio_weight_device", # FIXME In addition to device Major/Minor include path
"device_cgroup_rules", # FIXME Where to map for Podman API?
"device_read_bps", # FIXME In addition to device Major/Minor include path
"device_read_iops", # FIXME In addition to device Major/Minor include path
"device_requests", # FIXME In addition to device Major/Minor include path
"device_write_bps", # FIXME In addition to device Major/Minor include path
"device_write_iops", # FIXME In addition to device Major/Minor include path
"domainname",
"network_disabled", # FIXME Where to map for Podman API?
"storage_opt", # FIXME Where to map for Podman API?
"tmpfs", # FIXME Where to map for Podman API?
))
if len(unsupported_keys) > 0:
raise TypeError(
f"""Keyword(s) '{" ,".join(unsupported_keys)}' are"""
@ -497,13 +406,6 @@ class CreateMixin: # pylint: disable=too-few-public-methods
def pop(k):
return args.pop(k, None)
def normalize_nsmode(
mode: Union[str, MutableMapping[str, str]],
) -> dict[str, str]:
if isinstance(mode, dict):
return mode
return {"nsmode": mode}
def to_bytes(size: Union[int, str, None]) -> Union[int, None]:
"""
Converts str or int to bytes.
@ -527,9 +429,9 @@ class CreateMixin: # pylint: disable=too-few-public-methods
try:
return int(size)
except ValueError as bad_size:
mapping = {"b": 0, "k": 1, "m": 2, "g": 3}
mapping_regex = "".join(mapping.keys())
search = re.search(rf"^(\d+)([{mapping_regex}])$", size.lower())
mapping = {'b': 0, 'k': 1, 'm': 2, 'g': 3}
mapping_regex = ''.join(mapping.keys())
search = re.search(rf'^(\d+)([{mapping_regex}])$', size.lower())
if search:
return int(search.group(1)) * (1024 ** mapping[search.group(2)])
raise TypeError(
@ -554,11 +456,11 @@ class CreateMixin: # pylint: disable=too-few-public-methods
"conmon_pid_file": pop("conmon_pid_file"), # TODO document, podman only
"containerCreateCommand": pop("containerCreateCommand"), # TODO document, podman only
"devices": [],
"dns_option": pop("dns_opt"),
"dns_options": pop("dns_opt"),
"dns_search": pop("dns_search"),
"dns_server": pop("dns"),
"entrypoint": pop("entrypoint"),
"env": environment,
"env": pop("environment"),
"env_host": pop("env_host"), # TODO document, podman only
"expose": {},
"groups": pop("group_add"),
@ -622,7 +524,7 @@ class CreateMixin: # pylint: disable=too-few-public-methods
"version": pop("version"),
"volumes": [],
"volumes_from": pop("volumes_from"),
"work_dir": pop("workdir") or pop("working_dir"),
"work_dir": pop("working_dir"),
}
for device in args.pop("devices", []):
@ -645,12 +547,11 @@ class CreateMixin: # pylint: disable=too-few-public-methods
args.pop("log_config")
for item in args.pop("mounts", []):
normalized_item = {key.lower(): value for key, value in item.items()}
mount_point = {
"destination": normalized_item.get("target"),
"destination": item.get("target"),
"options": [],
"source": normalized_item.get("source"),
"type": normalized_item.get("type"),
"source": item.get("source"),
"type": item.get("type"),
}
# some names are different for podman-py vs REST API due to compatibility with docker
@ -663,13 +564,12 @@ class CreateMixin: # pylint: disable=too-few-public-methods
regular_options = ["consistency", "mode", "size"]
for k, v in item.items():
_k = k.lower()
option_name = names_dict.get(_k, _k)
if _k in bool_options and v is True:
option_name = names_dict.get(k, k)
if k in bool_options and v is True:
options.append(option_name)
elif _k in regular_options:
options.append(f"{option_name}={v}")
elif _k in simple_options:
elif k in regular_options:
options.append(f'{option_name}={v}')
elif k in simple_options:
options.append(v)
mount_point["options"] = options
@ -718,15 +618,10 @@ class CreateMixin: # pylint: disable=too-few-public-methods
return result
for container, host in args.pop("ports", {}).items():
# avoid redefinition of the loop variable, then ensure it's a string
str_container = container
if isinstance(str_container, int):
str_container = str(str_container)
if "/" in str_container:
container_port, protocol = str_container.split("/")
if "/" in container:
container_port, protocol = container.split("/")
else:
container_port, protocol = str_container, "tcp"
container_port, protocol = container, "tcp"
port_map_list = parse_host_port(container_port, protocol, host)
params["portmappings"].extend(port_map_list)
@ -760,42 +655,27 @@ class CreateMixin: # pylint: disable=too-few-public-methods
}
for item in args.pop("ulimits", []):
params["r_limits"].append(
{
"type": item["Name"],
"hard": item["Hard"],
"soft": item["Soft"],
}
)
params["r_limits"].append({
"type": item["Name"],
"hard": item["Hard"],
"soft": item["Soft"],
})
for item in args.pop("volumes", {}).items():
key, value = item
extended_mode = value.get("extended_mode", [])
extended_mode = value.get('extended_mode', [])
if not isinstance(extended_mode, list):
raise ValueError("'extended_mode' value should be a list")
options = extended_mode
mode = value.get("mode")
mode = value.get('mode')
if mode is not None:
if not isinstance(mode, str):
raise ValueError("'mode' value should be a str")
options.append(mode)
# The Podman API only supports named volumes through the ``volume`` parameter. Directory
# mounting needs to happen through the ``mounts`` parameter. Luckily the translation
# isn't too complicated so we can just do it for the user if we suspect that the key
# isn't a named volume.
if NAMED_VOLUME_PATTERN.match(key):
volume = {"Name": key, "Dest": value["bind"], "Options": options}
params["volumes"].append(volume)
else:
mount_point = {
"destination": value["bind"],
"options": options,
"source": key,
"type": "bind",
}
params["mounts"].append(mount_point)
volume = {"Name": key, "Dest": value["bind"], "Options": options}
params["volumes"].append(volume)
for item in args.pop("secrets", []):
if isinstance(item, Secret):
@ -814,27 +694,22 @@ class CreateMixin: # pylint: disable=too-few-public-methods
params["secret_env"] = args.pop("secret_env", {})
if "cgroupns" in args:
params["cgroupns"] = normalize_nsmode(args.pop("cgroupns"))
params["cgroupns"] = {"nsmode": args.pop("cgroupns")}
if "ipc_mode" in args:
params["ipcns"] = normalize_nsmode(args.pop("ipc_mode"))
params["ipcns"] = {"nsmode": args.pop("ipc_mode")}
if "network_mode" in args:
network_mode = args.pop("network_mode")
details = network_mode.split(":")
if len(details) == 2 and details[0] == "ns":
params["netns"] = {"nsmode": "path", "value": details[1]}
else:
params["netns"] = {"nsmode": network_mode}
params["netns"] = {"nsmode": args.pop("network_mode")}
if "pid_mode" in args:
params["pidns"] = normalize_nsmode(args.pop("pid_mode"))
params["pidns"] = {"nsmode": args.pop("pid_mode")}
if "userns_mode" in args:
params["userns"] = normalize_nsmode(args.pop("userns_mode"))
params["userns"] = {"nsmode": args.pop("userns_mode")}
if "uts_mode" in args:
params["utsns"] = normalize_nsmode(args.pop("uts_mode"))
params["utsns"] = {"nsmode": args.pop("uts_mode")}
if len(args) > 0:
raise TypeError(

View File

@ -2,8 +2,7 @@
import logging
import urllib
from collections.abc import Mapping
from typing import Any, Union
from typing import Any, Dict, List, Mapping, Union
from podman import api
from podman.domain.containers import Container
@ -27,14 +26,11 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
response = self.client.get(f"/containers/{key}/exists")
return response.ok
def get(self, key: str, **kwargs) -> Container:
def get(self, key: str) -> Container:
"""Get container by name or id.
Args:
key: Container name or id.
Keyword Args:
compatible (bool): Use Docker compatibility endpoint
container_id: Container name or id.
Returns:
A `Container` object corresponding to `key`.
@ -43,14 +39,12 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
NotFound: when Container does not exist
APIError: when an error return by service
"""
compatible = kwargs.get("compatible", False)
container_id = urllib.parse.quote_plus(key)
response = self.client.get(f"/containers/{container_id}/json", compatible=compatible)
response = self.client.get(f"/containers/{container_id}/json")
response.raise_for_status()
return self.prepare_model(attrs=response.json())
def list(self, **kwargs) -> list[Container]:
def list(self, **kwargs) -> List[Container]:
"""Report on containers.
Keyword Args:
@ -63,7 +57,7 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
- exited (int): Only containers with specified exit code
- status (str): One of restarting, running, paused, exited
- label (Union[str, list[str]]): Format either "key", "key=value" or a list of such.
- label (Union[str, List[str]]): Format either "key", "key=value" or a list of such.
- id (str): The id of the container.
- name (str): The name of the container.
- ancestor (str): Filter by container ancestor. Format of
@ -72,26 +66,12 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
Give the container name or id.
- since (str): Only containers created after a particular container.
Give container name or id.
sparse: If False, return basic container information without additional
inspection requests. This improves performance when listing many containers
but might provide less detail. You can call Container.reload() on individual
containers later to retrieve complete attributes. Default: True.
When Docker compatibility is enabled with `compatible=True`: Default: False.
sparse: Ignored
ignore_removed: If True, ignore failures due to missing containers.
Raises:
APIError: when service returns an error
"""
compatible = kwargs.get("compatible", False)
# Set sparse default based on mode:
# Libpod behavior: default is sparse=True (faster, requires reload for full details)
# Docker behavior: default is sparse=False (full details immediately, compatible)
if "sparse" in kwargs:
sparse = kwargs["sparse"]
else:
sparse = not compatible # True for libpod, False for compat
params = {
"all": kwargs.get("all"),
"filters": kwargs.get("filters", {}),
@ -105,33 +85,22 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
# filters formatted last because some kwargs may need to be mapped into filters
params["filters"] = api.prepare_filters(params["filters"])
response = self.client.get("/containers/json", params=params, compatible=compatible)
response = self.client.get("/containers/json", params=params)
response.raise_for_status()
containers: list[Container] = [self.prepare_model(attrs=i) for i in response.json()]
return [self.prepare_model(attrs=i) for i in response.json()]
# If sparse is False, reload each container to get full details
if not sparse:
for container in containers:
try:
container.reload(compatible=compatible)
except APIError:
# Skip containers that might have been removed
pass
return containers
def prune(self, filters: Mapping[str, str] = None) -> dict[str, Any]:
def prune(self, filters: Mapping[str, str] = None) -> Dict[str, Any]:
"""Delete stopped containers.
Args:
filters: Criteria for determining containers to remove. Available keys are:
- until (str): Delete containers before this time
- label (list[str]): Labels associated with containers
- label (List[str]): Labels associated with containers
Returns:
Keys:
- ContainersDeleted (list[str]): Identifiers of deleted containers.
- ContainersDeleted (List[str]): Identifiers of deleted containers.
- SpaceReclaimed (int): Amount of disk space reclaimed in bytes.
Raises:
@ -150,7 +119,7 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
explanation=f"""Failed to prune container '{entry["Id"]}'""",
)
results["ContainersDeleted"].append(entry["Id"]) # type: ignore[attr-defined]
results["ContainersDeleted"].append(entry["Id"])
results["SpaceReclaimed"] += entry["Size"]
return results
@ -170,8 +139,10 @@ class ContainersManager(RunMixin, CreateMixin, Manager):
if isinstance(container_id, Container):
container_id = container_id.id
# v is used for the compat endpoint while volumes is used for the libpod endpoint
params = {"v": kwargs.get("v"), "force": kwargs.get("force"), "volumes": kwargs.get("v")}
params = {
"v": kwargs.get("v"),
"force": kwargs.get("force"),
}
response = self.client.delete(f"/containers/{container_id}", params=params)
response.raise_for_status()

View File

@ -1,10 +1,8 @@
"""Mixin to provide Container run() method."""
import logging
import threading
from contextlib import suppress
from typing import Union
from collections.abc import Generator, Iterator
from typing import Generator, Iterator, List, Union
from podman.domain.containers import Container
from podman.domain.images import Image
@ -19,8 +17,7 @@ class RunMixin: # pylint: disable=too-few-public-methods
def run(
self,
image: Union[str, Image],
command: Union[str, list[str], None] = None,
*,
command: Union[str, List[str], None] = None,
stdout=True,
stderr=False,
remove: bool = False,
@ -31,27 +28,17 @@ class RunMixin: # pylint: disable=too-few-public-methods
By default, run() will wait for the container to finish and return its logs.
If detach=True, run() will start the container and return a Container object rather
than logs. In this case, if remove=True, run() will monitor and remove the
container after it finishes running; the logs will be lost in this case.
than logs.
Args:
image: Image to run.
command: Command to run in the container.
stdout: Include stdout. Default: True.
stderr: Include stderr. Default: False.
remove: Delete container on the client side when the container's processes exit.
The `auto_remove` flag is also available to manage the removal on the daemon
side. Default: False.
remove: Delete container when the container's processes exit. Default: False.
Keyword Args:
- These args are directly used to pull an image when the image is not found.
auth_config (Mapping[str, str]): Override the credentials that are found in the
config for this request. auth_config should contain the username and password
keys to be valid.
platform (str): Platform in the format os[/arch[/variant]]
policy (str): Pull policy. "missing" (default), "always", "never", "newer"
- See the create() method for other keyword arguments.
- See the create() method for keyword arguments.
Returns:
- When detach is True, return a Container
@ -73,30 +60,14 @@ class RunMixin: # pylint: disable=too-few-public-methods
try:
container = self.create(image=image, command=command, **kwargs)
except ImageNotFound:
self.podman_client.images.pull(
image,
auth_config=kwargs.get("auth_config"),
platform=kwargs.get("platform"),
policy=kwargs.get("policy", "missing"),
)
self.client.images.pull(image, platform=kwargs.get("platform"))
container = self.create(image=image, command=command, **kwargs)
container.start()
container.wait(condition=["running", "exited"])
container.reload()
def remove_container(container_object: Container) -> None:
"""
Wait the container to finish and remove it.
Args:
container_object: Container object
"""
container_object.wait() # Wait for the container to finish
container_object.remove() # Remove the container
if kwargs.get("detach", False):
if remove:
# Start a background thread to remove the container after finishing
threading.Thread(target=remove_container, args=(container,)).start()
return container
with suppress(KeyError):

View File

@ -3,8 +3,7 @@
import json
import logging
from datetime import datetime
from typing import Any, Optional, Union
from collections.abc import Iterator
from typing import Any, Dict, Optional, Union, Iterator
from podman import api
from podman.api.client import APIClient
@ -27,9 +26,9 @@ class EventsManager: # pylint: disable=too-few-public-methods
self,
since: Union[datetime, int, None] = None,
until: Union[datetime, int, None] = None,
filters: Optional[dict[str, Any]] = None,
filters: Optional[Dict[str, Any]] = None,
decode: bool = False,
) -> Iterator[Union[str, dict[str, Any]]]:
) -> Iterator[Union[str, Dict[str, Any]]]:
"""Report on networks.
Args:
@ -39,7 +38,7 @@ class EventsManager: # pylint: disable=too-few-public-methods
until: Get events older than this time.
Yields:
When decode is True, Iterator[dict[str, Any]]
When decode is True, Iterator[Dict[str, Any]]
When decode is False, Iterator[str]
"""

View File

@ -1,17 +1,11 @@
"""Model and Manager for Image resources."""
import logging
from typing import Any, Optional, Literal, Union, TYPE_CHECKING
from collections.abc import Iterator
from typing import Any, Dict, Iterator, List, Optional, Union
import urllib.parse
from podman.api import DEFAULT_CHUNK_SIZE
from podman import api
from podman.domain.manager import PodmanResource
from podman.errors import ImageNotFound, InvalidArgument
if TYPE_CHECKING:
from podman.domain.images_manager import ImagesManager
from podman.errors import ImageNotFound
logger = logging.getLogger("podman.images")
@ -19,8 +13,6 @@ logger = logging.getLogger("podman.images")
class Image(PodmanResource):
"""Details and configuration for an Image managed by the Podman service."""
manager: "ImagesManager"
def __repr__(self) -> str:
return f"""<{self.__class__.__name__}: '{"', '".join(self.tags)}'>"""
@ -42,7 +34,7 @@ class Image(PodmanResource):
return [tag for tag in repo_tags if tag != "<none>:<none>"]
def history(self) -> list[dict[str, Any]]:
def history(self) -> List[Dict[str, Any]]:
"""Returns history of the Image.
Raises:
@ -55,7 +47,7 @@ class Image(PodmanResource):
def remove(
self, **kwargs
) -> list[dict[Literal["Deleted", "Untagged", "Errors", "ExitCode"], Union[str, int]]]:
) -> List[Dict[api.Literal["Deleted", "Untagged", "Errors", "ExitCode"], Union[str, int]]]:
"""Delete image from Podman service.
Podman only
@ -75,8 +67,8 @@ class Image(PodmanResource):
def save(
self,
chunk_size: Optional[int] = DEFAULT_CHUNK_SIZE,
named: Union[str, bool] = False,
chunk_size: Optional[int] = api.DEFAULT_CHUNK_SIZE,
named: Union[str, bool] = False, # pylint: disable=unused-argument
) -> Iterator[bytes]:
"""Returns Image as tarball.
@ -85,28 +77,13 @@ class Image(PodmanResource):
Args:
chunk_size: If None, data will be streamed in received buffer size.
If not None, data will be returned in sized buffers. Default: 2MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
named: Ignored.
Raises:
APIError: When service returns an error
InvalidArgument: When the provided Tag name is not valid for the image.
APIError: when service returns an error
"""
img = self.id
if named:
img = urllib.parse.quote(self.tags[0] if self.tags else img)
if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(f"'{named}' is not a valid tag for this image")
img = urllib.parse.quote(named)
response = self.client.get(
f"/images/{img}/get", params={"format": ["docker-archive"]}, stream=True
f"/images/{self.id}/get", params={"format": ["docker-archive"]}, stream=True
)
response.raise_for_status(not_found=ImageNotFound)
return response.iter_content(chunk_size=chunk_size)

View File

@ -7,8 +7,7 @@ import random
import re
import shutil
import tempfile
from typing import Any
from collections.abc import Iterator
from typing import Any, Dict, Iterator, List, Tuple
import itertools
@ -23,7 +22,7 @@ class BuildMixin:
"""Class providing build method for ImagesManager."""
# pylint: disable=too-many-locals,too-many-branches,too-few-public-methods,too-many-statements
def build(self, **kwargs) -> tuple[Image, Iterator[bytes]]:
def build(self, **kwargs) -> Tuple[Image, Iterator[bytes]]:
"""Returns built image.
Keyword Args:
@ -34,13 +33,13 @@ class BuildMixin:
nocache (bool) Dont use the cache when set to True
rm (bool) Remove intermediate containers. Default True
timeout (int) HTTP timeout
custom_context (bool) Optional if using fileobj
custom_context (bool) Optional if using fileobj (ignored)
encoding (str) The encoding for a stream. Set to gzip for compressing (ignored)
pull (bool) Downloads any updates to the FROM image in Dockerfile
forcerm (bool) Always remove intermediate containers, even after unsuccessful builds
dockerfile (str) full path to the Dockerfile / Containerfile
buildargs (Mapping[str,str) A dictionary of build arguments
container_limits (dict[str, Union[int,str]])
container_limits (Dict[str, Union[int,str]])
A dictionary of limits applied to each container created by the build process.
Valid keys:
@ -53,11 +52,11 @@ class BuildMixin:
shmsize (int) Size of /dev/shm in bytes. The size must be greater than 0.
If omitted the system uses 64MB
labels (Mapping[str,str]) A dictionary of labels to set on the image
cache_from (list[str]) A list of image's identifier used for build cache resolution
cache_from (List[str]) A list of image's identifier used for build cache resolution
target (str) Name of the build-stage to build in a multi-stage Dockerfile
network_mode (str) networking mode for the run commands during build
squash (bool) Squash the resulting images layers into a single layer.
extra_hosts (dict[str,str]) Extra hosts to add to /etc/hosts in building
extra_hosts (Dict[str,str]) Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str) Platform in the format os[/arch[/variant]].
isolation (str) Isolation technology used during build. (ignored)
@ -82,23 +81,7 @@ class BuildMixin:
body = None
path = None
if kwargs.get("custom_context"):
if "fileobj" not in kwargs:
raise PodmanError(
"Custom context requires fileobj to be set to a binary file-like object "
"containing a build-directory tarball."
)
if "dockerfile" not in kwargs:
# TODO: Scan the tarball for either a Dockerfile or a Containerfile.
# This could be slow if the tarball is large,
# and could require buffering/copying the tarball if `fileobj` is not seekable.
# As a workaround for now, don't support omitting the filename.
raise PodmanError(
"Custom context requires specifying the name of the Dockerfile "
"(typically 'Dockerfile' or 'Containerfile')."
)
body = kwargs["fileobj"]
elif "fileobj" in kwargs:
if "fileobj" in kwargs:
path = tempfile.TemporaryDirectory() # pylint: disable=consider-using-with
filename = pathlib.Path(path.name) / params["dockerfile"]
@ -157,7 +140,7 @@ class BuildMixin:
raise BuildError(unknown or "Unknown", report_stream)
@staticmethod
def _render_params(kwargs) -> dict[str, list[Any]]:
def _render_params(kwargs) -> Dict[str, List[Any]]:
"""Map kwargs to query parameters.
All unsupported kwargs are silently ignored.

View File

@ -1,35 +1,21 @@
"""PodmanResource manager subclassed for Images."""
import builtins
import io
import json
import logging
import os
import urllib.parse
from typing import Any, Literal, Optional, Union
from collections.abc import Iterator, Mapping, Generator
from pathlib import Path
from typing import Any, Dict, Generator, Iterator, List, Mapping, Optional, Union
import requests
from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn, TimeRemainingColumn
from podman import api
from podman.api.parse_utils import parse_repository
from podman.api import Literal
from podman.api.http_utils import encode_auth_header
from podman.domain.images import Image
from podman.domain.images_build import BuildMixin
from podman.domain.json_stream import json_stream
from podman.domain.manager import Manager
from podman.domain.registry_data import RegistryData
from podman.errors import APIError, ImageNotFound, PodmanError
try:
from rich.progress import (
Progress,
TextColumn,
BarColumn,
TaskProgressColumn,
TimeRemainingColumn,
)
except (ImportError, ModuleNotFoundError):
Progress = None
from podman.errors import APIError, ImageNotFound
logger = logging.getLogger("podman.images")
@ -48,28 +34,25 @@ class ImagesManager(BuildMixin, Manager):
response = self.client.get(f"/images/{key}/exists")
return response.ok
def list(self, **kwargs) -> builtins.list[Image]:
def list(self, **kwargs) -> List[Image]:
"""Report on images.
Keyword Args:
name (str) Only show images belonging to the repository name
all (bool) Show intermediate image layers. By default, these are filtered out.
filters (Mapping[str, Union[str, list[str]]) Filters to be used on the image list.
filters (Mapping[str, Union[str, List[str]]) Filters to be used on the image list.
Available filters:
- dangling (bool)
- label (Union[str, list[str]]): format either "key" or "key=value"
- label (Union[str, List[str]]): format either "key" or "key=value"
Raises:
APIError: when service returns an error
"""
filters = kwargs.get("filters", {}).copy()
if name := kwargs.get("name"):
filters["reference"] = name
params = {
"all": kwargs.get("all"),
"filters": api.prepare_filters(filters=filters),
"name": kwargs.get("name"),
"filters": api.prepare_filters(kwargs.get("filters")),
}
response = self.client.get("/images/json", params=params)
if response.status_code == requests.codes.not_found:
@ -120,107 +103,60 @@ class ImagesManager(BuildMixin, Manager):
collection=self,
)
def load(
self, data: Optional[bytes] = None, file_path: Optional[os.PathLike] = None
) -> Generator[Image, None, None]:
def load(self, data: bytes) -> Generator[Image, None, None]:
"""Restore an image previously saved.
Args:
data: Image to be loaded in tarball format.
file_path: Path of the Tarball.
It works with both str and Path-like objects
Raises:
APIError: When service returns an error.
PodmanError: When the arguments are not set correctly.
APIError: when service returns an error
"""
# TODO fix podman swagger cannot use this header!
# headers = {"Content-type": "application/x-www-form-urlencoded"}
# Check that exactly one of the data or file_path is provided
if not data and not file_path:
raise PodmanError("The 'data' or 'file_path' parameter should be set.")
if data and file_path:
raise PodmanError(
"Only one parameter should be set from 'data' and 'file_path' parameters."
)
post_data = data
if file_path:
# Convert to Path if file_path is a string
file_path_object = Path(file_path)
post_data = file_path_object.read_bytes() # Read the tarball file as bytes
# Make the client request before entering the generator
response = self.client.post(
"/images/load", data=post_data, headers={"Content-type": "application/x-tar"}
"/images/load", data=data, headers={"Content-type": "application/x-tar"}
)
response.raise_for_status() # Catch any errors before proceeding
response.raise_for_status()
def _generator(body: dict) -> Generator[Image, None, None]:
# Iterate and yield images from response body
for item in body["Names"]:
yield self.get(item)
# Pass the response body to the generator
return _generator(response.json())
body = response.json()
for item in body["Names"]:
yield self.get(item)
def prune(
self,
all: Optional[bool] = False, # pylint: disable=redefined-builtin
external: Optional[bool] = False,
filters: Optional[Mapping[str, Any]] = None,
) -> dict[Literal["ImagesDeleted", "SpaceReclaimed"], Any]:
self, filters: Optional[Mapping[str, Any]] = None
) -> Dict[Literal["ImagesDeleted", "SpaceReclaimed"], Any]:
"""Delete unused images.
The Untagged keys will always be "".
Args:
all: Remove all images not in use by containers, not just dangling ones.
external: Remove images even when they are used by external containers
(e.g, by build containers).
filters: Qualify Images to prune. Available filters:
- dangling (bool): when true, only delete unused and untagged images.
- label: (dict): filter by label.
Examples:
filters={"label": {"key": "value"}}
filters={"label!": {"key": "value"}}
- until (str): Delete images older than this timestamp.
Raises:
APIError: when service returns an error
"""
params = {
"all": all,
"external": external,
"filters": api.prepare_filters(filters),
}
response = self.client.post("/images/prune", params=params)
response = self.client.post(
"/images/prune", params={"filters": api.prepare_filters(filters)}
)
response.raise_for_status()
deleted: builtins.list[dict[str, str]] = []
error: builtins.list[str] = []
deleted: List[Dict[str, str]] = []
error: List[str] = []
reclaimed: int = 0
# If the prune doesn't remove images, the API returns "null"
# and it's interpreted as None (NoneType)
# so the for loop throws "TypeError: 'NoneType' object is not iterable".
# The below if condition fixes this issue.
if response.json() is not None:
for element in response.json():
if "Err" in element and element["Err"] is not None:
error.append(element["Err"])
else:
reclaimed += element["Size"]
deleted.append(
{
"Deleted": element["Id"],
"Untagged": "",
}
)
for element in response.json():
if "Err" in element and element["Err"] is not None:
error.append(element["Err"])
else:
reclaimed += element["Size"]
deleted.append({
"Deleted": element["Id"],
"Untagged": "",
})
if len(error) > 0:
raise APIError(response.url, response=response, explanation="; ".join(error))
@ -229,7 +165,7 @@ class ImagesManager(BuildMixin, Manager):
"SpaceReclaimed": reclaimed,
}
def prune_builds(self) -> dict[Literal["CachesDeleted", "SpaceReclaimed"], Any]:
def prune_builds(self) -> Dict[Literal["CachesDeleted", "SpaceReclaimed"], Any]:
"""Delete builder cache.
Method included to complete API, the operation always returns empty
@ -239,7 +175,7 @@ class ImagesManager(BuildMixin, Manager):
def push(
self, repository: str, tag: Optional[str] = None, **kwargs
) -> Union[str, Iterator[Union[str, dict[str, Any]]]]:
) -> Union[str, Iterator[Union[str, Dict[str, Any]]]]:
"""Push Image or repository to the registry.
Args:
@ -249,37 +185,29 @@ class ImagesManager(BuildMixin, Manager):
Keyword Args:
auth_config (Mapping[str, str]: Override configured credentials. Must include
username and password keys.
decode (bool): return data from server as dict[str, Any]. Ignored unless stream=True.
decode (bool): return data from server as Dict[str, Any]. Ignored unless stream=True.
destination (str): alternate destination for image. (Podman only)
stream (bool): return output as blocking generator. Default: False.
tlsVerify (bool): Require TLS verification.
format (str): Manifest type (oci, v2s1, or v2s2) to use when pushing an image.
Default is manifest type of source, with fallbacks.
Raises:
APIError: when service returns an error
"""
auth_config: Optional[dict[str, str]] = kwargs.get("auth_config")
auth_config: Optional[Dict[str, str]] = kwargs.get("auth_config")
headers = {
# A base64url-encoded auth configuration
"X-Registry-Auth": api.encode_auth_header(auth_config) if auth_config else ""
"X-Registry-Auth": encode_auth_header(auth_config) if auth_config else ""
}
params = {
"destination": kwargs.get("destination"),
"tlsVerify": kwargs.get("tlsVerify"),
"format": kwargs.get("format"),
}
stream = kwargs.get("stream", False)
decode = kwargs.get("decode", False)
name = f'{repository}:{tag}' if tag else repository
name = urllib.parse.quote_plus(name)
response = self.client.post(
f"/images/{name}/push", params=params, stream=stream, headers=headers
)
response = self.client.post(f"/images/{name}/push", params=params, headers=headers)
response.raise_for_status(not_found=ImageNotFound)
tag_count = 0 if tag is None else 1
@ -294,6 +222,8 @@ class ImagesManager(BuildMixin, Manager):
},
]
stream = kwargs.get("stream", False)
decode = kwargs.get("decode", False)
if stream:
return self._push_helper(decode, body)
@ -304,8 +234,8 @@ class ImagesManager(BuildMixin, Manager):
@staticmethod
def _push_helper(
decode: bool, body: builtins.list[dict[str, Any]]
) -> Iterator[Union[str, dict[str, Any]]]:
decode: bool, body: List[Dict[str, Any]]
) -> Iterator[Union[str, Dict[str, Any]]]:
"""Helper needed to allow push() to return either a generator or a str."""
for entry in body:
if decode:
@ -315,12 +245,8 @@ class ImagesManager(BuildMixin, Manager):
# pylint: disable=too-many-locals,too-many-branches
def pull(
self,
repository: str,
tag: Optional[str] = None,
all_tags: bool = False,
**kwargs,
) -> Union[Image, builtins.list[Image], Iterator[str]]:
self, repository: str, tag: Optional[str] = None, all_tags: bool = False, **kwargs
) -> Union[Image, List[Image], Iterator[str]]:
"""Request Podman service to pull image(s) from repository.
Args:
@ -332,12 +258,7 @@ class ImagesManager(BuildMixin, Manager):
auth_config (Mapping[str, str]) Override the credentials that are found in the
config for this request. auth_config should contain the username and password
keys to be valid.
compatMode (bool) Return the same JSON payload as the Docker-compat endpoint.
Default: True.
decode (bool) Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str) Platform in the format os[/arch[/variant]]
policy (str) - Pull policy. "always" (default), "missing", "never", "newer"
progress_bar (bool) - Display a progress bar with the image pull progress (uses
the compat endpoint). Default: False
tls_verify (bool) - Require TLS verification. Default: True.
@ -352,24 +273,23 @@ class ImagesManager(BuildMixin, Manager):
APIError: when service returns an error
"""
if tag is None or len(tag) == 0:
repository, parsed_tag = parse_repository(repository)
if parsed_tag is not None:
tag = parsed_tag
tokens = repository.split(":")
if len(tokens) == 2:
repository = tokens[0]
tag = tokens[1]
else:
tag = "latest"
auth_config: Optional[dict[str, str]] = kwargs.get("auth_config")
auth_config: Optional[Dict[str, str]] = kwargs.get("auth_config")
headers = {
# A base64url-encoded auth configuration
"X-Registry-Auth": api.encode_auth_header(auth_config) if auth_config else ""
"X-Registry-Auth": encode_auth_header(auth_config) if auth_config else ""
}
params = {
"policy": kwargs.get("policy", "always"),
"reference": repository,
"tlsVerify": kwargs.get("tls_verify", True),
"compatMode": kwargs.get("compatMode", True),
"tlsVerify": kwargs.get("tls_verify"),
}
if all_tags:
@ -377,8 +297,7 @@ class ImagesManager(BuildMixin, Manager):
else:
params["reference"] = f"{repository}:{tag}"
# Check if "platform" in kwargs AND it has value.
if "platform" in kwargs and kwargs["platform"]:
if "platform" in kwargs:
tokens = kwargs.get("platform").split("/")
if 1 < len(tokens) > 3:
raise ValueError(f'\'{kwargs.get("platform")}\' is not a legal platform.')
@ -395,8 +314,6 @@ class ImagesManager(BuildMixin, Manager):
# progress bar
progress_bar = kwargs.get("progress_bar", False)
if progress_bar:
if Progress is None:
raise ModuleNotFoundError('progress_bar requires \'rich.progress\' module')
params["compatMode"] = True
stream = True
@ -419,12 +336,12 @@ class ImagesManager(BuildMixin, Manager):
return None
if stream:
return self._stream_helper(response, decode=kwargs.get("decode"))
return response.iter_lines()
for item in reversed(list(response.iter_lines())):
for item in response.iter_lines():
obj = json.loads(item)
if all_tags and "images" in obj:
images: builtins.list[Image] = []
images: List[Image] = []
for name in obj["images"]:
images.append(self.get(name))
return images
@ -469,7 +386,7 @@ class ImagesManager(BuildMixin, Manager):
image: Union[Image, str],
force: Optional[bool] = None,
noprune: bool = False, # pylint: disable=unused-argument
) -> builtins.list[dict[Literal["Deleted", "Untagged", "Errors", "ExitCode"], Union[str, int]]]:
) -> List[Dict[Literal["Deleted", "Untagged", "Errors", "ExitCode"], Union[str, int]]]:
"""Delete image from Podman service.
Args:
@ -488,7 +405,7 @@ class ImagesManager(BuildMixin, Manager):
response.raise_for_status(not_found=ImageNotFound)
body = response.json()
results: builtins.list[dict[str, Union[int, str]]] = []
results: List[Dict[str, Union[int, str]]] = []
for key in ("Deleted", "Untagged", "Errors"):
if key in body:
for element in body[key]:
@ -496,14 +413,14 @@ class ImagesManager(BuildMixin, Manager):
results.append({"ExitCode": body["ExitCode"]})
return results
def search(self, term: str, **kwargs) -> builtins.list[dict[str, Any]]:
def search(self, term: str, **kwargs) -> List[Dict[str, Any]]:
"""Search Images on registries.
Args:
term: Used to target Image results.
Keyword Args:
filters (Mapping[str, list[str]): Refine results of search. Available filters:
filters (Mapping[str, List[str]): Refine results of search. Available filters:
- is-automated (bool): Image build is automated.
- is-official (bool): Image build is owned by product provider.
@ -556,24 +473,3 @@ class ImagesManager(BuildMixin, Manager):
response = self.client.post(f"/images/scp/{source}", params=params)
response.raise_for_status()
return response.json()
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
if decode:
yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)

View File

@ -3,8 +3,7 @@
Provided for compatibility
"""
from typing import Any, Optional
from collections.abc import Mapping
from typing import Any, List, Mapping, Optional
class IPAMPool(dict):
@ -26,14 +25,12 @@ class IPAMPool(dict):
aux_addresses: Ignored.
"""
super().__init__()
self.update(
{
"AuxiliaryAddresses": aux_addresses,
"Gateway": gateway,
"IPRange": iprange,
"Subnet": subnet,
}
)
self.update({
"AuxiliaryAddresses": aux_addresses,
"Gateway": gateway,
"IPRange": iprange,
"Subnet": subnet,
})
class IPAMConfig(dict):
@ -41,8 +38,8 @@ class IPAMConfig(dict):
def __init__(
self,
driver: Optional[str] = "host-local",
pool_configs: Optional[list[IPAMPool]] = None,
driver: Optional[str] = "default",
pool_configs: Optional[List[IPAMPool]] = None,
options: Optional[Mapping[str, Any]] = None,
):
"""Create IPAMConfig.
@ -53,10 +50,8 @@ class IPAMConfig(dict):
options: Options to provide to the Network driver.
"""
super().__init__()
self.update(
{
"Config": pool_configs or [],
"Driver": driver,
"Options": options or {},
}
)
self.update({
"Config": pool_configs or [],
"Driver": driver,
"Options": options or {},
})

View File

@ -1,75 +0,0 @@
import json
import json.decoder
from podman.errors import StreamParseError
json_decoder = json.JSONDecoder()
def stream_as_text(stream):
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
for data in stream:
_data = data
if not isinstance(data, str):
_data = data.decode('utf-8', 'replace')
yield _data
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end() :]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer, separator='\n'):
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[: index + 1], buffer[index + 1 :]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = ''
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
raise StreamParseError(e) from e

View File

@ -2,19 +2,15 @@
from abc import ABC, abstractmethod
from collections import abc
from typing import Any, Optional, TypeVar, Union, TYPE_CHECKING
from collections.abc import Mapping
from typing import Any, List, Mapping, Optional, TypeVar, Union
from podman.api.client import APIClient
if TYPE_CHECKING:
from podman import PodmanClient
# Methods use this Type when a subclass of PodmanResource is expected.
PodmanResourceType: TypeVar = TypeVar("PodmanResourceType", bound="PodmanResource")
class PodmanResource(ABC): # noqa: B024
class PodmanResource(ABC):
"""Base class for representing resource of a Podman service.
Attributes:
@ -26,7 +22,6 @@ class PodmanResource(ABC): # noqa: B024
attrs: Optional[Mapping[str, Any]] = None,
client: Optional[APIClient] = None,
collection: Optional["Manager"] = None,
podman_client: Optional["PodmanClient"] = None,
):
"""Initialize base class for PodmanResource's.
@ -34,12 +29,10 @@ class PodmanResource(ABC): # noqa: B024
attrs: Mapping of attributes for resource from Podman service.
client: Configured connection to a Podman service.
collection: Manager of this category of resource, named `collection` for compatibility
podman_client: PodmanClient() configured to connect to Podman object.
"""
super().__init__()
self.client = client
self.manager = collection
self.podman_client = podman_client
self.attrs = {}
if attrs is not None:
@ -70,13 +63,9 @@ class PodmanResource(ABC): # noqa: B024
return self.id[:17]
return self.id[:10]
def reload(self, **kwargs) -> None:
"""Refresh this object's data from the service.
Keyword Args:
compatible (bool): Use Docker compatibility endpoint
"""
latest = self.manager.get(self.id, **kwargs)
def reload(self) -> None:
"""Refresh this object's data from the service."""
latest = self.manager.get(self.id)
self.attrs = latest.attrs
@ -88,18 +77,14 @@ class Manager(ABC):
def resource(self):
"""Type[PodmanResource]: Class which the factory method prepare_model() will use."""
def __init__(
self, client: Optional[APIClient] = None, podman_client: Optional["PodmanClient"] = None
) -> None:
def __init__(self, client: APIClient = None) -> None:
"""Initialize Manager() object.
Args:
client: APIClient() configured to connect to Podman service.
podman_client: PodmanClient() configured to connect to Podman object.
"""
super().__init__()
self.client = client
self.podman_client = podman_client
@abstractmethod
def exists(self, key: str) -> bool:
@ -116,7 +101,7 @@ class Manager(ABC):
"""Returns representation of resource."""
@abstractmethod
def list(self, **kwargs) -> list[PodmanResourceType]:
def list(self, **kwargs) -> List[PodmanResourceType]:
"""Returns list of resources."""
def prepare_model(self, attrs: Union[PodmanResource, Mapping[str, Any]]) -> PodmanResourceType:
@ -125,7 +110,6 @@ class Manager(ABC):
# Refresh existing PodmanResource.
if isinstance(attrs, PodmanResource):
attrs.client = self.client
attrs.podman_client = self.podman_client
attrs.collection = self
return attrs
@ -133,9 +117,7 @@ class Manager(ABC):
if isinstance(attrs, abc.Mapping):
# TODO Determine why pylint is reporting typing.Type not callable
# pylint: disable=not-callable
return self.resource(
attrs=attrs, client=self.client, podman_client=self.podman_client, collection=self
)
return self.resource(attrs=attrs, client=self.client, collection=self)
# pylint: disable=broad-exception-raised
raise Exception(f"Can't create {self.resource.__name__} from {attrs}")

View File

@ -3,7 +3,7 @@
import logging
import urllib.parse
from contextlib import suppress
from typing import Any, Optional, Union
from typing import Any, Dict, List, Optional, Union
from podman import api
from podman.domain.images import Image
@ -38,7 +38,7 @@ class Manifest(PodmanResource):
@property
def names(self):
"""list[str]: Returns the identifier of the manifest."""
"""List[str]: Returns the identifier of the manifest."""
return self.name
@property
@ -51,7 +51,7 @@ class Manifest(PodmanResource):
"""int: Returns the schema version type for this manifest."""
return self.attrs.get("schemaVersion")
def add(self, images: list[Union[Image, str]], **kwargs) -> None:
def add(self, images: List[Union[Image, str]], **kwargs) -> None:
"""Add Image to manifest list.
Args:
@ -59,9 +59,9 @@ class Manifest(PodmanResource):
Keyword Args:
all (bool):
annotation (dict[str, str]):
annotation (Dict[str, str]):
arch (str):
features (list[str]):
features (List[str]):
os (str):
os_version (str):
variant (str):
@ -82,11 +82,9 @@ class Manifest(PodmanResource):
"operation": "update",
}
for item in images:
# avoid redefinition of the loop variable, then ensure it's an image
img_item = item
if isinstance(img_item, Image):
img_item = img_item.attrs["RepoTags"][0]
data["images"].append(img_item)
if isinstance(item, Image):
item = item.attrs["RepoTags"][0]
data["images"].append(item)
data = api.prepare_body(data)
response = self.client.put(f"/manifests/{self.quoted_name}", data=data)
@ -97,7 +95,6 @@ class Manifest(PodmanResource):
self,
destination: str,
all: Optional[bool] = None, # pylint: disable=redefined-builtin
**kwargs,
) -> None:
"""Push a manifest list or image index to a registry.
@ -105,32 +102,15 @@ class Manifest(PodmanResource):
destination: Target for push.
all: Push all images.
Keyword Args:
auth_config (Mapping[str, str]: Override configured credentials. Must include
username and password keys.
Raises:
NotFound: when the Manifest could not be found
APIError: when service reports an error
"""
auth_config: Optional[dict[str, str]] = kwargs.get("auth_config")
headers = {
# A base64url-encoded auth configuration
"X-Registry-Auth": api.encode_auth_header(auth_config) if auth_config else ""
}
params = {
"all": all,
"destination": destination,
}
destination_quoted = urllib.parse.quote_plus(destination)
response = self.client.post(
f"/manifests/{self.quoted_name}/registry/{destination_quoted}",
params=params,
headers=headers,
)
response = self.client.post(f"/manifests/{self.quoted_name}/push", params=params)
response.raise_for_status()
def remove(self, digest: str) -> None:
@ -171,7 +151,7 @@ class ManifestsManager(Manager):
def create(
self,
name: str,
images: Optional[list[Union[Image, str]]] = None,
images: Optional[List[Union[Image, str]]] = None,
all: Optional[bool] = None, # pylint: disable=redefined-builtin
) -> Manifest:
"""Create a Manifest.
@ -185,15 +165,13 @@ class ManifestsManager(Manager):
ValueError: when no names are provided
NotFoundImage: when a given image does not exist
"""
params: dict[str, Any] = {}
params: Dict[str, Any] = {}
if images is not None:
params["images"] = []
for item in images:
# avoid redefinition of the loop variable, then ensure it's an image
img_item = item
if isinstance(img_item, Image):
img_item = img_item.attrs["RepoTags"][0]
params["images"].append(img_item)
if isinstance(item, Image):
item = item.attrs["RepoTags"][0]
params["images"].append(item)
if all is not None:
params["all"] = all
@ -237,12 +215,12 @@ class ManifestsManager(Manager):
body["names"] = key
return self.prepare_model(attrs=body)
def list(self, **kwargs) -> list[Manifest]:
def list(self, **kwargs) -> List[Manifest]:
"""Not Implemented."""
raise NotImplementedError("Podman service currently does not support listing manifests.")
def remove(self, name: Union[Manifest, str]) -> dict[str, Any]:
def remove(self, name: Union[Manifest, str]) -> Dict[str, Any]:
"""Delete the manifest list from the Podman service."""
if isinstance(name, Manifest):
name = name.name

View File

@ -24,7 +24,7 @@ class Network(PodmanResource):
"""Details and configuration for a networks managed by the Podman service.
Attributes:
attrs (dict[str, Any]): Attributes of Network reported from Podman service
attrs (Dict[str, Any]): Attributes of Network reported from Podman service
"""
@property
@ -41,7 +41,7 @@ class Network(PodmanResource):
@property
def containers(self):
"""list[Container]: Returns list of Containers connected to network."""
"""List[Container]: Returns list of Containers connected to network."""
with suppress(KeyError):
container_manager = ContainersManager(client=self.client)
return [container_manager.get(ident) for ident in self.attrs["Containers"].keys()]
@ -71,12 +71,12 @@ class Network(PodmanResource):
container: To add to this Network
Keyword Args:
aliases (list[str]): Aliases to add for this endpoint
driver_opt (dict[str, Any]): Options to provide to network driver
aliases (List[str]): Aliases to add for this endpoint
driver_opt (Dict[str, Any]): Options to provide to network driver
ipv4_address (str): IPv4 address for given Container on this network
ipv6_address (str): IPv6 address for given Container on this network
link_local_ips (list[str]): list of link-local addresses
links (list[Union[str, Containers]]): Ignored
link_local_ips (List[str]): list of link-local addresses
links (List[Union[str, Containers]]): Ignored
Raises:
APIError: when Podman service reports an error
@ -111,7 +111,6 @@ class Network(PodmanResource):
f"/networks/{self.name}/connect",
data=json.dumps(data),
headers={"Content-type": "application/json"},
**kwargs,
)
response.raise_for_status()

View File

@ -12,9 +12,10 @@ Example:
import ipaddress
import logging
from contextlib import suppress
from typing import Any, Optional, Literal, Union
from typing import Any, Dict, List, Optional
from podman.api import http_utils, prepare_filters
from podman import api
from podman.api import http_utils
from podman.domain.manager import Manager
from podman.domain.networks import Network
from podman.errors import APIError
@ -45,8 +46,8 @@ class NetworksManager(Manager):
ingress (bool): Ignored, always False.
internal (bool): Restrict external access to the network.
ipam (IPAMConfig): Optional custom IP scheme for the network.
labels (dict[str, str]): Map of labels to set on the network.
options (dict[str, Any]): Driver options.
labels (Dict[str, str]): Map of labels to set on the network.
options (Dict[str, Any]): Driver options.
scope (str): Ignored, always "local".
Raises:
@ -74,10 +75,7 @@ class NetworksManager(Manager):
response.raise_for_status()
return self.prepare_model(attrs=response.json())
def _prepare_ipam(self, data: dict[str, Any], ipam: dict[str, Any]):
if "Driver" in ipam:
data["ipam_options"] = {"driver": ipam["Driver"]}
def _prepare_ipam(self, data: Dict[str, Any], ipam: Dict[str, Any]):
if "Config" not in ipam:
return
@ -116,23 +114,23 @@ class NetworksManager(Manager):
return self.prepare_model(attrs=response.json())
def list(self, **kwargs) -> list[Network]:
def list(self, **kwargs) -> List[Network]:
"""Report on networks.
Keyword Args:
names (list[str]): List of names to filter by.
ids (list[str]): List of identifiers to filter by.
names (List[str]): List of names to filter by.
ids (List[str]): List of identifiers to filter by.
filters (Mapping[str,str]): Criteria for listing networks. Available filters:
- driver="bridge": Matches a network's driver. Only "bridge" is supported.
- label=(Union[str, list[str]]): format either "key", "key=value"
- label=(Union[str, List[str]]): format either "key", "key=value"
or a list of such.
- type=(str): Filters networks by type, legal values are:
- "custom"
- "builtin"
- plugin=(list[str]]): Matches CNI plugins included in a network, legal
- plugin=(List[str]]): Matches CNI plugins included in a network, legal
values are (Podman only):
- bridge
@ -151,7 +149,7 @@ class NetworksManager(Manager):
filters = kwargs.get("filters", {})
filters["name"] = kwargs.get("names")
filters["id"] = kwargs.get("ids")
filters = prepare_filters(filters)
filters = api.prepare_filters(filters)
params = {"filters": filters}
response = self.client.get("/networks/json", params=params)
@ -160,8 +158,8 @@ class NetworksManager(Manager):
return [self.prepare_model(i) for i in response.json()]
def prune(
self, filters: Optional[dict[str, Any]] = None
) -> dict[Literal["NetworksDeleted", "SpaceReclaimed"], Any]:
self, filters: Optional[Dict[str, Any]] = None
) -> Dict[api.Literal["NetworksDeleted", "SpaceReclaimed"], Any]:
"""Delete unused Networks.
SpaceReclaimed always reported as 0
@ -172,11 +170,11 @@ class NetworksManager(Manager):
Raises:
APIError: when service reports error
"""
params = {"filters": prepare_filters(filters)}
params = {"filters": api.prepare_filters(filters)}
response = self.client.post("/networks/prune", params=params)
response.raise_for_status()
deleted: list[str] = []
deleted: List[str] = []
for item in response.json():
if item["Error"] is not None:
raise APIError(
@ -188,7 +186,7 @@ class NetworksManager(Manager):
return {"NetworksDeleted": deleted, "SpaceReclaimed": 0}
def remove(self, name: Union[Network, str], force: Optional[bool] = None) -> None:
def remove(self, name: [Network, str], force: Optional[bool] = None) -> None:
"""Remove Network resource.
Args:

View File

@ -1,14 +1,11 @@
"""Model and Manager for Pod resources."""
import logging
from typing import Any, Optional, Union, TYPE_CHECKING
from typing import Any, Dict, Optional, Tuple, Union
from podman.domain.manager import PodmanResource
if TYPE_CHECKING:
from podman.domain.pods_manager import PodsManager
_Timeout = Union[None, int, tuple[int, int], tuple[int, None]]
_Timeout = Union[None, float, Tuple[float, float], Tuple[float, None]]
logger = logging.getLogger("podman.pods")
@ -16,8 +13,6 @@ logger = logging.getLogger("podman.pods")
class Pod(PodmanResource):
"""Details and configuration for a pod managed by the Podman service."""
manager: "PodsManager"
@property
def id(self): # pylint: disable=invalid-name
return self.attrs.get("ID", self.attrs.get("Id"))
@ -93,7 +88,7 @@ class Pod(PodmanResource):
response = self.client.post(f"/pods/{self.id}/stop", params=params)
response.raise_for_status()
def top(self, **kwargs) -> dict[str, Any]:
def top(self, **kwargs) -> Dict[str, Any]:
"""Report on running processes in pod.
Keyword Args:

View File

@ -1,10 +1,8 @@
"""PodmanResource manager subclassed for Networks."""
import builtins
import json
import logging
from typing import Any, Optional, Union
from collections.abc import Iterator
from typing import Any, Dict, List, Optional, Union, Iterator
from podman import api
from podman.domain.manager import Manager
@ -59,24 +57,24 @@ class PodsManager(Manager):
response.raise_for_status()
return self.prepare_model(attrs=response.json())
def list(self, **kwargs) -> builtins.list[Pod]:
def list(self, **kwargs) -> List[Pod]:
"""Report on pods.
Keyword Args:
filters (Mapping[str, str]): Criteria for listing pods. Available filters:
- ctr-ids (list[str]): list of container ids to filter by.
- ctr-names (list[str]): list of container names to filter by.
- ctr-number (list[int]): list pods with given number of containers.
- ctr-status (list[str]): list pods with containers in given state.
- ctr-ids (List[str]): List of container ids to filter by.
- ctr-names (List[str]): List of container names to filter by.
- ctr-number (List[int]): list pods with given number of containers.
- ctr-status (List[str]): List pods with containers in given state.
Legal values are: "created", "running", "paused", "stopped",
"exited", or "unknown"
- id (str) - List pod with this id.
- name (str) - List pod with this name.
- status (list[str]): List pods in given state. Legal values are:
- status (List[str]): List pods in given state. Legal values are:
"created", "running", "paused", "stopped", "exited", or "unknown"
- label (list[str]): List pods with given labels.
- network (list[str]): List pods associated with given Network Ids (not Names).
- label (List[str]): List pods with given labels.
- network (List[str]): List pods associated with given Network Ids (not Names).
Raises:
APIError: when an error returned by service
@ -86,12 +84,12 @@ class PodsManager(Manager):
response.raise_for_status()
return [self.prepare_model(attrs=i) for i in response.json()]
def prune(self, filters: Optional[dict[str, str]] = None) -> dict[str, Any]:
def prune(self, filters: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
"""Delete unused Pods.
Returns:
Dictionary Keys:
- PodsDeleted (list[str]): List of pod ids deleted.
- PodsDeleted (List[str]): List of pod ids deleted.
- SpaceReclaimed (int): Always zero.
Raises:
@ -100,7 +98,7 @@ class PodsManager(Manager):
response = self.client.post("/pods/prune", params={"filters": api.prepare_filters(filters)})
response.raise_for_status()
deleted: builtins.list[str] = []
deleted: List[str] = []
for item in response.json():
if item["Err"] is not None:
raise APIError(
@ -131,14 +129,12 @@ class PodsManager(Manager):
response = self.client.delete(f"/pods/{pod_id}", params={"force": force})
response.raise_for_status()
def stats(
self, **kwargs
) -> Union[builtins.list[dict[str, Any]], Iterator[builtins.list[dict[str, Any]]]]:
def stats(self, **kwargs) -> Union[List[Dict[str, Any]], Iterator[List[Dict[str, Any]]]]:
"""Resource usage statistics for the containers in pods.
Keyword Args:
all (bool): Provide statistics for all running pods.
name (Union[str, list[str]]): Pods to include in report.
name (Union[str, List[str]]): Pods to include in report.
stream (bool): Stream statistics until cancelled. Default: False.
decode (bool): If True, response will be decoded into dict. Default: False.

View File

@ -1,8 +1,7 @@
"""Module for tracking registry metadata."""
import logging
from typing import Any, Optional, Union
from collections.abc import Mapping
from typing import Any, Mapping, Optional, Union
from podman import api
from podman.domain.images import Image
@ -40,7 +39,7 @@ class RegistryData(PodmanResource):
Args:
platform: Platform for which to pull Image. Default: None (all platforms.)
"""
repository, _ = api.parse_repository(self.image_name)
repository = api.parse_repository(self.image_name)
return self.manager.pull(repository, tag=self.id, platform=platform)
def has_platform(self, platform: Union[str, Mapping[str, Any]]) -> bool:

View File

@ -1,8 +1,7 @@
"""Model and Manager for Secrets resources."""
from contextlib import suppress
from typing import Any, Optional, Union
from collections.abc import Mapping
from typing import Any, List, Mapping, Optional, Union
from podman.api import APIClient
from podman.domain.manager import Manager, PodmanResource
@ -76,11 +75,11 @@ class SecretsManager(Manager):
response.raise_for_status()
return self.prepare_model(attrs=response.json())
def list(self, **kwargs) -> list[Secret]:
def list(self, **kwargs) -> List[Secret]:
"""Report on Secrets.
Keyword Args:
filters (dict[str, Any]): Ignored.
filters (Dict[str, Any]): Ignored.
Raises:
APIError: when error returned by service

View File

@ -1,7 +1,7 @@
"""SystemManager to provide system level information from Podman service."""
import logging
from typing import Any, Optional, Union
from typing import Any, Dict, Optional
from podman.api.client import APIClient
from podman import api
@ -20,7 +20,7 @@ class SystemManager:
"""
self.client = client
def df(self) -> dict[str, Any]: # pylint: disable=invalid-name
def df(self) -> Dict[str, Any]: # pylint: disable=invalid-name
"""Disk usage by Podman resources.
Returns:
@ -30,25 +30,21 @@ class SystemManager:
response.raise_for_status()
return response.json()
def info(self, *_, **__) -> dict[str, Any]:
def info(self, *_, **__) -> Dict[str, Any]:
"""Returns information on Podman service."""
response = self.client.get("/info")
response.raise_for_status()
return response.json()
def login( # pylint: disable=too-many-arguments,too-many-positional-arguments,unused-argument
def login(
self,
username: str,
password: Optional[str] = None,
email: Optional[str] = None,
registry: Optional[str] = None,
reauth: Optional[bool] = False,
dockercfg_path: Optional[str] = None,
auth: Optional[str] = None,
identitytoken: Optional[str] = None,
registrytoken: Optional[str] = None,
tls_verify: Optional[Union[bool, str]] = None,
) -> dict[str, Any]:
reauth: Optional[bool] = False, # pylint: disable=unused-argument
dockercfg_path: Optional[str] = None, # pylint: disable=unused-argument
) -> Dict[str, Any]:
"""Log into Podman service.
Args:
@ -56,14 +52,9 @@ class SystemManager:
password: Registry plaintext password
email: Registry account email address
registry: URL for registry access. For example,
https://quay.io/v2
reauth: Ignored: If True, refresh existing authentication. Default: False
dockercfg_path: Ignored: Path to custom configuration file.
auth: TODO: Add description based on the source code of Podman.
identitytoken: IdentityToken is used to authenticate the user and
get an access token for the registry.
registrytoken: RegistryToken is a bearer token to be sent to a registry
tls_verify: Whether to verify TLS certificates.
https://quay.io/v2
"""
payload = {
@ -71,9 +62,6 @@ class SystemManager:
"password": password,
"email": email,
"serveraddress": registry,
"auth": auth,
"identitytoken": identitytoken,
"registrytoken": registrytoken,
}
payload = api.prepare_body(payload)
response = self.client.post(
@ -81,7 +69,6 @@ class SystemManager:
headers={"Content-type": "application/json"},
data=payload,
compatible=True,
verify=tls_verify, # Pass tls_verify to the client
)
response.raise_for_status()
return response.json()
@ -91,7 +78,7 @@ class SystemManager:
response = self.client.head("/_ping")
return response.ok
def version(self, **kwargs) -> dict[str, Any]:
def version(self, **kwargs) -> Dict[str, Any]:
"""Get version information from service.
Keyword Args:

View File

@ -1,11 +1,12 @@
"""Model and Manager for Volume resources."""
import logging
from typing import Any, Literal, Optional, Union
from typing import Any, Dict, List, Optional, Union
import requests
from podman import api
from podman.api import Literal
from podman.domain.manager import Manager, PodmanResource
from podman.errors import APIError
@ -35,23 +36,6 @@ class Volume(PodmanResource):
"""
self.manager.remove(self.name, force=force)
def inspect(self, **kwargs) -> dict:
"""Inspect this volume
Keyword Args:
tls_verify (bool) - Require TLS verification. Default: True.
Returns:
Display attributes of volume.
Raises:
APIError: when service reports an error
"""
params = {"tlsVerify": kwargs.get("tls_verify", True)}
response = self.client.get(f"/volumes/{self.id}/json", params=params)
response.raise_for_status()
return response.json()
class VolumesManager(Manager):
"""Specialized Manager for Volume resources."""
@ -69,8 +53,8 @@ class VolumesManager(Manager):
Keyword Args:
driver (str): Volume driver to use
driver_opts (dict[str, str]): Options to use with driver
labels (dict[str, str]): Labels to apply to volume
driver_opts (Dict[str, str]): Options to use with driver
labels (Dict[str, str]): Labels to apply to volume
Raises:
APIError: when service reports error
@ -108,14 +92,14 @@ class VolumesManager(Manager):
response.raise_for_status()
return self.prepare_model(attrs=response.json())
def list(self, *_, **kwargs) -> list[Volume]:
def list(self, *_, **kwargs) -> List[Volume]:
"""Report on volumes.
Keyword Args:
filters (dict[str, str]): criteria to filter Volume list
filters (Dict[str, str]): criteria to filter Volume list
- driver (str): filter volumes by their driver
- label (dict[str, str]): filter by label and/or value
- label (Dict[str, str]): filter by label and/or value
- name (str): filter by volume's name
"""
filters = api.prepare_filters(kwargs.get("filters"))
@ -128,9 +112,8 @@ class VolumesManager(Manager):
return [self.prepare_model(i) for i in response.json()]
def prune(
self,
filters: Optional[dict[str, str]] = None, # pylint: disable=unused-argument
) -> dict[Literal["VolumesDeleted", "SpaceReclaimed"], Any]:
self, filters: Optional[Dict[str, str]] = None # pylint: disable=unused-argument
) -> Dict[Literal["VolumesDeleted", "SpaceReclaimed"], Any]:
"""Delete unused volumes.
Args:
@ -143,7 +126,7 @@ class VolumesManager(Manager):
data = response.json()
response.raise_for_status()
volumes: list[str] = []
volumes: List[str] = []
space_reclaimed = 0
for item in data:
if "Err" in item:

View File

@ -21,7 +21,6 @@ __all__ = [
'NotFound',
'NotFoundError',
'PodmanError',
'StreamParseError',
]
try:
@ -33,7 +32,6 @@ try:
InvalidArgument,
NotFound,
PodmanError,
StreamParseError,
)
except ImportError:
pass
@ -48,9 +46,7 @@ class NotFoundError(HTTPException):
def __init__(self, message, response=None):
super().__init__(message)
self.response = response
warnings.warn(
"APIConnection() and supporting classes.", PendingDeprecationWarning, stacklevel=2
)
warnings.warn("APIConnection() and supporting classes.", PendingDeprecationWarning)
# If found, use new ImageNotFound otherwise old class
@ -58,7 +54,7 @@ try:
from .exceptions import ImageNotFound
except ImportError:
class ImageNotFound(NotFoundError): # type: ignore[no-redef]
class ImageNotFound(NotFoundError):
"""HTTP request returned a http.HTTPStatus.NOT_FOUND.
Specialized for Image not found. Deprecated.
@ -102,9 +98,7 @@ class RequestError(HTTPException):
def __init__(self, message, response=None):
super().__init__(message)
self.response = response
warnings.warn(
"APIConnection() and supporting classes.", PendingDeprecationWarning, stacklevel=2
)
warnings.warn("APIConnection() and supporting classes.", PendingDeprecationWarning)
class InternalServerError(HTTPException):
@ -116,6 +110,4 @@ class InternalServerError(HTTPException):
def __init__(self, message, response=None):
super().__init__(message)
self.response = response
warnings.warn(
"APIConnection() and supporting classes.", PendingDeprecationWarning, stacklevel=2
)
warnings.warn("APIConnection() and supporting classes.", PendingDeprecationWarning)

View File

@ -1,7 +1,6 @@
"""Podman API Errors."""
from typing import Optional, Union, TYPE_CHECKING
from collections.abc import Iterable
from typing import Iterable, List, Optional, Union, TYPE_CHECKING
from requests import Response
from requests.exceptions import HTTPError
@ -113,10 +112,10 @@ class ContainerError(PodmanError):
self,
container: "Container",
exit_status: int,
command: Union[str, list[str]],
command: Union[str, List[str]],
image: str,
stderr: Optional[Iterable[str]] = None,
): # pylint: disable=too-many-positional-arguments
):
"""Initialize ContainerError.
Args:
@ -143,8 +142,3 @@ class ContainerError(PodmanError):
class InvalidArgument(PodmanError):
"""Parameter to method/function was not valid."""
class StreamParseError(RuntimeError):
def __init__(self, reason):
self.msg = reason

View File

View File

@ -7,3 +7,4 @@
## Coverage Reporting Framework
`coverage.py` see https://coverage.readthedocs.io/en/coverage-5.0.3/#quick-start

View File

@ -3,5 +3,5 @@
# Do not auto-update these from version.py,
# as test code should be changed to reflect changes in Podman API versions
BASE_SOCK = "unix:///run/api.sock"
LIBPOD_URL = "http://%2Frun%2Fapi.sock/v5.6.0/libpod"
LIBPOD_URL = "http://%2Frun%2Fapi.sock/v4.8.1/libpod"
COMPATIBLE_URL = "http://%2Frun%2Fapi.sock/v1.40"

View File

@ -1,21 +0,0 @@
import pytest
def pytest_addoption(parser):
parser.addoption(
"--pnext", action="store_true", default=False, help="run tests against podman_next copr"
)
def pytest_configure(config):
config.addinivalue_line("markers", "pnext: mark test as run against podman_next")
def pytest_collection_modifyitems(config, items):
if config.getoption("--pnext"):
# --pnext given in cli: run tests marked as pnext
return
podman_next = pytest.mark.skip(reason="need --pnext option to run")
for item in items:
if "pnext" in item.keywords:
item.add_marker(podman_next)

View File

@ -13,7 +13,6 @@
# under the License.
#
"""Base integration test code"""
import logging
import os
import shutil

View File

@ -39,10 +39,10 @@ class AdapterIntegrationTest(base.IntegrationTest):
podman.start(check_socket=False)
time.sleep(0.5)
with PodmanClient(base_url="tcp:localhost:8889") as client:
with PodmanClient(base_url=f"tcp:localhost:8889") as client:
self.assertTrue(client.ping())
with PodmanClient(base_url="http://localhost:8889") as client:
with PodmanClient(base_url=f"http://localhost:8889") as client:
self.assertTrue(client.ping())
finally:
podman.stop()

View File

@ -1,11 +1,9 @@
import unittest
import re
import os
import pytest
import podman.tests.integration.base as base
from podman import PodmanClient
from podman.tests.utils import PODMAN_VERSION
# @unittest.skipIf(os.geteuid() != 0, 'Skipping, not running as root')
@ -22,11 +20,11 @@ class ContainersIntegrationTest(base.IntegrationTest):
self.alpine_image = self.client.images.pull("quay.io/libpod/alpine", tag="latest")
self.containers = []
def tearDown(self):
def tearUp(self):
for container in self.containers:
container.remove(force=True)
def test_container_named_volume_mount(self):
def test_container_volume_mount(self):
with self.subTest("Check volume mount"):
volumes = {
'test_bind_1': {'bind': '/mnt/vol1', 'mode': 'rw'},
@ -54,33 +52,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
for o in other_options:
self.assertIn(o, mount.get('Options'))
def test_container_directory_volume_mount(self):
"""Test that directories can be mounted with the ``volume`` parameter."""
with self.subTest("Check bind mount"):
volumes = {
"/etc/hosts": dict(bind="/test_ro", mode='ro'),
"/etc/hosts": dict(bind="/test_rw", mode='rw'), # noqa: F601
}
container = self.client.containers.create(
self.alpine_image, command=["cat", "/test_ro", "/test_rw"], volumes=volumes
)
container_mounts = container.attrs.get('Mounts', {})
self.assertEqual(len(container_mounts), len(volumes))
self.containers.append(container)
for directory, mount_spec in volumes.items():
self.assertIn(
f"{directory}:{mount_spec['bind']}:{mount_spec['mode']},rprivate,rbind",
container.attrs.get('HostConfig', {}).get('Binds', list()),
)
# check if container can be started and exits with EC == 0
container.start()
container.wait()
self.assertEqual(container.attrs.get('State', dict()).get('ExitCode', 256), 0)
def test_container_extra_hosts(self):
"""Test Container Extra hosts"""
extra_hosts = {"host1 host3": "127.0.0.2", "host2": "127.0.0.3"}
@ -104,44 +75,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
for hosts_entry in formatted_hosts:
self.assertIn(hosts_entry, logs)
def test_container_environment_variables(self):
"""Test environment variables passed to the container."""
with self.subTest("Check environment variables as dictionary"):
env_dict = {"MY_VAR": "123", "ANOTHER_VAR": "456"}
container = self.client.containers.create(
self.alpine_image, command=["env"], environment=env_dict
)
self.containers.append(container)
container_env = container.attrs.get('Config', {}).get('Env', [])
for key, value in env_dict.items():
self.assertIn(f"{key}={value}", container_env)
container.start()
container.wait()
logs = b"\n".join(container.logs()).decode()
for key, value in env_dict.items():
self.assertIn(f"{key}={value}", logs)
with self.subTest("Check environment variables as list"):
env_list = ["MY_VAR=123", "ANOTHER_VAR=456"]
container = self.client.containers.create(
self.alpine_image, command=["env"], environment=env_list
)
self.containers.append(container)
container_env = container.attrs.get('Config', {}).get('Env', [])
for env in env_list:
self.assertIn(env, container_env)
container.start()
container.wait()
logs = b"\n".join(container.logs()).decode()
for env in env_list:
self.assertIn(env, logs)
def _test_memory_limit(self, parameter_name, host_config_name, set_mem_limit=False):
"""Base for tests which checks memory limits"""
memory_limit_tests = [
@ -209,16 +142,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
'1223/tcp': [{'HostIp': '', 'HostPort': '1235'}],
},
},
{
'input': {
2244: 3344,
},
'expected_output': {
'2244/tcp': [
{'HostIp': '', 'HostPort': '3344'},
],
},
},
]
for port_test in port_tests:
@ -226,32 +149,10 @@ class ContainersIntegrationTest(base.IntegrationTest):
self.containers.append(container)
self.assertTrue(
all(
[
x in port_test['expected_output']
for x in container.attrs.get('HostConfig', {}).get('PortBindings')
]
)
)
def test_container_dns_option(self):
expected_dns_opt = ['edns0']
container = self.client.containers.create(
self.alpine_image, command=["cat", "/etc/resolv.conf"], dns_opt=expected_dns_opt
)
self.containers.append(container)
with self.subTest("Check HostConfig"):
self.assertEqual(
container.attrs.get('HostConfig', {}).get('DnsOptions'), expected_dns_opt
)
with self.subTest("Check content of /etc/resolv.conf"):
container.start()
container.wait()
self.assertTrue(
all([opt in b"\n".join(container.logs()).decode() for opt in expected_dns_opt])
all([
x in port_test['expected_output']
for x in container.attrs.get('HostConfig', {}).get('PortBindings')
])
)
def test_container_healthchecks(self):
@ -279,11 +180,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
"""Test passing shared memory size"""
self._test_memory_limit('shm_size', 'ShmSize')
@pytest.mark.skipif(os.geteuid() != 0, reason='Skipping, not running as root')
@pytest.mark.skipif(
PODMAN_VERSION >= (5, 6, 0),
reason="Test against this feature in Podman 5.6.0 or greater https://github.com/containers/podman/pull/25942",
)
def test_container_mounts(self):
"""Test passing mounts"""
with self.subTest("Check bind mount"):
@ -333,70 +229,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
)
)
with self.subTest("Check uppercase mount option attributes"):
mount = {
"TypE": "bind",
"SouRce": "/etc/hosts",
"TarGet": "/test",
"Read_Only": True,
"ReLabel": "Z",
}
container = self.client.containers.create(
self.alpine_image, command=["cat", "/test"], mounts=[mount]
)
self.containers.append(container)
self.assertIn(
f"{mount['SouRce']}:{mount['TarGet']}:ro,Z,rprivate,rbind",
container.attrs.get('HostConfig', {}).get('Binds', list()),
)
# check if container can be started and exits with EC == 0
container.start()
container.wait()
self.assertEqual(container.attrs.get('State', dict()).get('ExitCode', 256), 0)
@pytest.mark.skipif(os.geteuid() != 0, reason='Skipping, not running as root')
@pytest.mark.skipif(
PODMAN_VERSION < (5, 6, 0),
reason="Test against this feature before Podman 5.6.0 https://github.com/containers/podman/pull/25942",
)
def test_container_mounts_without_rw_as_default(self):
"""Test passing mounts"""
with self.subTest("Check bind mount"):
mount = {
"type": "bind",
"source": "/etc/hosts",
"target": "/test",
"read_only": True,
"relabel": "Z",
}
container = self.client.containers.create(
self.alpine_image, command=["cat", "/test"], mounts=[mount]
)
self.containers.append(container)
self.assertIn(
f"{mount['source']}:{mount['target']}:ro,Z,rprivate,rbind",
container.attrs.get('HostConfig', {}).get('Binds', list()),
)
# check if container can be started and exits with EC == 0
container.start()
container.wait()
self.assertEqual(container.attrs.get('State', dict()).get('ExitCode', 256), 0)
with self.subTest("Check tmpfs mount"):
mount = {"type": "tmpfs", "source": "tmpfs", "target": "/test", "size": "456k"}
container = self.client.containers.create(
self.alpine_image, command=["df", "-h"], mounts=[mount]
)
self.containers.append(container)
self.assertEqual(
container.attrs.get('HostConfig', {}).get('Tmpfs', {}).get(mount['target']),
f"size={mount['size']},rprivate,nosuid,nodev,tmpcopyup",
)
def test_container_devices(self):
devices = ["/dev/null:/dev/foo", "/dev/zero:/dev/bar"]
container = self.client.containers.create(
@ -409,13 +241,11 @@ class ContainersIntegrationTest(base.IntegrationTest):
for device in devices:
path_on_host, path_in_container = device.split(':', 1)
self.assertTrue(
any(
[
c.get('PathOnHost') == path_on_host
and c.get('PathInContainer') == path_in_container
for c in container_devices
]
)
any([
c.get('PathOnHost') == path_on_host
and c.get('PathInContainer') == path_in_container
for c in container_devices
])
)
with self.subTest("Check devices in running container object"):

View File

@ -1,122 +0,0 @@
import podman.tests.integration.base as base
from podman import PodmanClient
# @unittest.skipIf(os.geteuid() != 0, 'Skipping, not running as root')
class ContainersExecIntegrationTests(base.IntegrationTest):
"""Containers integration tests for exec"""
def setUp(self):
super().setUp()
self.client = PodmanClient(base_url=self.socket_uri)
self.addCleanup(self.client.close)
self.alpine_image = self.client.images.pull("quay.io/libpod/alpine", tag="latest")
self.containers = []
def tearDown(self):
for container in self.containers:
container.remove(force=True)
def test_container_exec_run(self):
"""Test any command that will return code 0 and no output"""
container = self.client.containers.create(self.alpine_image, command=["top"], detach=True)
container.start()
error_code, stdout = container.exec_run("echo hello")
self.assertEqual(error_code, 0)
self.assertEqual(stdout, b'\x01\x00\x00\x00\x00\x00\x00\x06hello\n')
def test_container_exec_run_errorcode(self):
"""Test a failing command with stdout and stderr in a single bytestring"""
container = self.client.containers.create(self.alpine_image, command=["top"], detach=True)
container.start()
error_code, output = container.exec_run("ls nonexistent")
self.assertEqual(error_code, 1)
self.assertEqual(
output, b"\x02\x00\x00\x00\x00\x00\x00+ls: nonexistent: No such file or directory\n"
)
def test_container_exec_run_demux(self):
"""Test a failing command with stdout and stderr in a bytestring tuple"""
container = self.client.containers.create(self.alpine_image, command=["top"], detach=True)
container.start()
error_code, output = container.exec_run("ls nonexistent", demux=True)
self.assertEqual(error_code, 1)
self.assertEqual(output[0], None)
self.assertEqual(output[1], b"ls: nonexistent: No such file or directory\n")
def test_container_exec_run_stream(self):
"""Test streaming the output from a long running command."""
container = self.client.containers.create(self.alpine_image, command=["top"], detach=True)
container.start()
command = [
'/bin/sh',
'-c',
'echo 0 ; sleep .1 ; echo 1 ; sleep .1 ; echo 2 ; sleep .1 ;',
]
error_code, output = container.exec_run(command, stream=True)
self.assertEqual(error_code, None)
self.assertEqual(
list(output),
[
b'0\n',
b'1\n',
b'2\n',
],
)
def test_container_exec_run_stream_demux(self):
"""Test streaming the output from a long running command with demux enabled."""
container = self.client.containers.create(self.alpine_image, command=["top"], detach=True)
container.start()
command = [
'/bin/sh',
'-c',
'echo 0 ; >&2 echo 1 ; sleep .1 ; '
+ 'echo 2 ; >&2 echo 3 ; sleep .1 ; '
+ 'echo 4 ; >&2 echo 5 ; sleep .1 ;',
]
error_code, output = container.exec_run(command, stream=True, demux=True)
self.assertEqual(error_code, None)
self.assertEqual(
list(output),
[
(b'0\n', None),
(None, b'1\n'),
(b'2\n', None),
(None, b'3\n'),
(b'4\n', None),
(None, b'5\n'),
],
)
def test_container_exec_run_stream_detach(self):
"""Test streaming the output from a long running command with detach enabled."""
container = self.client.containers.create(self.alpine_image, command=["top"], detach=True)
container.start()
command = [
'/bin/sh',
'-c',
'echo 0 ; sleep .1 ; echo 1 ; sleep .1 ; echo 2 ; sleep .1 ;',
]
error_code, output = container.exec_run(command, stream=True, detach=True)
# Detach should make the ``exec_run`` ignore the ``stream`` flag so we will
# assert against the standard, non-streaming behavior.
self.assertEqual(error_code, 0)
# The endpoint should return immediately, before we are able to actually
# get any of the output.
self.assertEqual(
output,
b'\n',
)

View File

@ -1,15 +1,14 @@
import io
import random
import tarfile
import tempfile
import unittest
try:
# Python >= 3.10
from collections.abc import Iterator
except ImportError:
except:
# Python < 3.10
from collections.abc import Iterator
from collections import Iterator
import podman.tests.integration.base as base
from podman import PodmanClient
@ -17,6 +16,7 @@ from podman.domain.containers import Container
from podman.domain.images import Image
from podman.errors import NotFound
# @unittest.skipIf(os.geteuid() != 0, 'Skipping, not running as root')
@ -42,9 +42,7 @@ class ContainersIntegrationTest(base.IntegrationTest):
with self.subTest("Create from Alpine Image"):
container = self.client.containers.create(
self.alpine_image,
command=["echo", random_string],
ports={'2222/tcp': 3333, 2244: 3344},
self.alpine_image, command=["echo", random_string], ports={'2222/tcp': 3333}
)
self.assertIsInstance(container, Container)
self.assertGreater(len(container.attrs), 0)
@ -64,10 +62,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
self.assertEqual(
"3333", container.attrs["NetworkSettings"]["Ports"]["2222/tcp"][0]["HostPort"]
)
self.assertIn("2244/tcp", container.attrs["NetworkSettings"]["Ports"])
self.assertEqual(
"3344", container.attrs["NetworkSettings"]["Ports"]["2244/tcp"][0]["HostPort"]
)
file_contents = b"This is an integration test for archive."
file_buffer = io.BytesIO(file_contents)
@ -142,24 +136,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
top_ctnr.reload()
self.assertIn(top_ctnr.status, ("exited", "stopped"))
with self.subTest("Create-Init-Start Container"):
top_ctnr = self.client.containers.create(
self.alpine_image, ["/usr/bin/top"], name="TestInitPs", detach=True
)
self.assertEqual(top_ctnr.status, "created")
top_ctnr.init()
top_ctnr.reload()
self.assertEqual(top_ctnr.status, "initialized")
top_ctnr.start()
top_ctnr.reload()
self.assertEqual(top_ctnr.status, "running")
top_ctnr.stop()
top_ctnr.reload()
self.assertIn(top_ctnr.status, ("exited", "stopped"))
with self.subTest("Prune Containers"):
report = self.client.containers.prune()
self.assertIn(top_ctnr.id, report["ContainersDeleted"])
@ -182,93 +158,6 @@ class ContainersIntegrationTest(base.IntegrationTest):
self.assertIn("localhost/busybox.local:unittest", image.attrs["RepoTags"])
busybox.remove(force=True)
def test_container_rm_anonymous_volume(self):
with self.subTest("Check anonymous volume is removed"):
container_file = """
FROM alpine
VOLUME myvol
ENV foo=bar
"""
tmp_file = tempfile.mktemp()
file = open(tmp_file, 'w')
file.write(container_file)
file.close()
self.client.images.build(dockerfile=tmp_file, tag="test-img", path=".")
# get existing number of containers and volumes
existing_containers = self.client.containers.list(all=True)
existing_volumes = self.client.volumes.list()
container = self.client.containers.create("test-img")
container_list = self.client.containers.list(all=True)
self.assertEqual(len(container_list), len(existing_containers) + 1)
volume_list = self.client.volumes.list()
self.assertEqual(len(volume_list), len(existing_volumes) + 1)
# remove the container with v=True
container.remove(v=True)
container_list = self.client.containers.list(all=True)
self.assertEqual(len(container_list), len(existing_containers))
volume_list = self.client.volumes.list()
self.assertEqual(len(volume_list), len(existing_volumes))
def test_container_labels(self):
labels = {'label1': 'value1', 'label2': 'value2'}
labeled_container = self.client.containers.create(self.alpine_image, labels=labels)
unlabeled_container = self.client.containers.create(
self.alpine_image,
)
# inspect and list have 2 different schemas so we need to verify that we can
# successfully retrieve the labels on both
try:
# inspect schema
self.assertEqual(labeled_container.labels, labels)
self.assertEqual(unlabeled_container.labels, {})
# list schema
for container in self.client.containers.list(all=True):
if container.id == labeled_container.id:
self.assertEqual(container.labels, labels)
elif container.id == unlabeled_container.id:
self.assertEqual(container.labels, {})
finally:
labeled_container.remove(v=True)
unlabeled_container.remove(v=True)
def test_container_update(self):
"""Update container"""
to_update_container = self.client.containers.run(
self.alpine_image, name="to_update_container", detach=True
)
with self.subTest("Test container update changing the healthcheck"):
to_update_container.update(health_cmd="ls")
self.assertEqual(
to_update_container.inspect()['Config']['Healthcheck']['Test'], ['CMD-SHELL', 'ls']
)
with self.subTest("Test container update disabling the healthcheck"):
to_update_container.update(no_healthcheck=True)
self.assertEqual(
to_update_container.inspect()['Config']['Healthcheck']['Test'], ['NONE']
)
with self.subTest("Test container update passing payload and data"):
to_update_container.update(
restart_policy="always", health_cmd="echo", health_timeout="10s"
)
self.assertEqual(
to_update_container.inspect()['Config']['Healthcheck']['Test'],
['CMD-SHELL', 'echo'],
)
self.assertEqual(
to_update_container.inspect()['Config']['Healthcheck']['Timeout'], 10000000000
)
self.assertEqual(
to_update_container.inspect()['HostConfig']['RestartPolicy']['Name'], 'always'
)
to_update_container.remove(v=True)
if __name__ == '__main__':
unittest.main()

View File

@ -13,17 +13,19 @@
# under the License.
#
"""Images integration tests."""
import io
import platform
import queue
import tarfile
import threading
import types
import unittest
from contextlib import suppress
from datetime import datetime, timedelta
import podman.tests.integration.base as base
from podman import PodmanClient
from podman.domain.images import Image
from podman.errors import APIError, ImageNotFound, PodmanError
from podman.errors import APIError, ImageNotFound
# @unittest.skipIf(os.geteuid() != 0, 'Skipping, not running as root')
@ -42,7 +44,7 @@ class ImagesIntegrationTest(base.IntegrationTest):
"""Test Image CRUD.
Notes:
Written to maximize reuse of pulled image.
Written to maximize re-use of pulled image.
"""
with self.subTest("Pull Alpine Image"):
@ -107,89 +109,31 @@ class ImagesIntegrationTest(base.IntegrationTest):
self.assertIn(image.id, deleted)
self.assertGreater(actual["SpaceReclaimed"], 0)
with self.subTest("Export Image to tarball (in memory) with named mode"):
alpine_image = self.client.images.pull("quay.io/libpod/alpine", tag="latest")
image_buffer = io.BytesIO()
for chunk in alpine_image.save(named=True):
image_buffer.write(chunk)
image_buffer.seek(0, 0)
with tarfile.open(fileobj=image_buffer, mode="r") as tar:
items_in_tar = tar.getnames()
# Check if repositories file is available in the tarball
self.assertIn("repositories", items_in_tar)
# Extract the 'repositories' file
repositories_file = tar.extractfile("repositories")
if repositories_file is not None:
# Check the content of the "repositories" file.
repositories_content = repositories_file.read().decode("utf-8")
# Check if "repositories" file contains the name of the Image (named).
self.assertTrue("alpine" in str(repositories_content))
def test_search(self):
# N/B: This is an infrequently used feature, that tends to flake a lot.
# Just check that it doesn't throw an exception and move on.
self.client.images.search("alpine")
actual = self.client.images.search("alpine", filters={"is-official": True})
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]["Official"], "[OK]")
actual = self.client.images.search("alpine", listTags=True)
self.assertIsNotNone(actual[0]["Tag"])
@unittest.skip("Needs Podman 3.1.0")
def test_corrupt_load(self):
with self.assertRaises(APIError) as e:
next(self.client.images.load(b"This is a corrupt tarball"))
next(self.client.images.load("This is a corrupt tarball".encode("utf-8")))
self.assertIn("payload does not match", e.exception.explanation)
def test_build(self):
buffer = io.StringIO("""FROM quay.io/libpod/alpine_labels:latest""")
buffer = io.StringIO(f"""FROM quay.io/libpod/alpine_labels:latest""")
image, stream = self.client.images.build(fileobj=buffer)
self.assertIsNotNone(image)
self.assertIsNotNone(image.id)
def test_build_with_context(self):
context = io.BytesIO()
with tarfile.open(fileobj=context, mode="w") as tar:
def add_file(name: str, content: str):
binary_content = content.encode("utf-8")
fileobj = io.BytesIO(binary_content)
tarinfo = tarfile.TarInfo(name=name)
tarinfo.size = len(binary_content)
tar.addfile(tarinfo, fileobj)
# Use a non-standard Dockerfile name to test the 'dockerfile' argument
add_file(
"MyDockerfile", ("FROM quay.io/libpod/alpine_labels:latest\nCOPY example.txt .\n")
)
add_file("example.txt", "This is an example file.\n")
# Rewind to the start of the generated file so we can read it
context.seek(0)
with self.assertRaises(PodmanError):
# If requesting a custom context, must provide the context as `fileobj`
self.client.images.build(custom_context=True, path='invalid')
with self.assertRaises(PodmanError):
# If requesting a custom context, currently must specify the dockerfile name
self.client.images.build(custom_context=True, fileobj=context)
image, stream = self.client.images.build(
fileobj=context,
dockerfile="MyDockerfile",
custom_context=True,
)
self.assertIsNotNone(image)
self.assertIsNotNone(image.id)
@unittest.skipIf(platform.architecture()[0] == "32bit", "no 32-bit image available")
def test_pull_stream(self):
generator = self.client.images.pull("ubi8", tag="latest", stream=True)
self.assertIsInstance(generator, types.GeneratorType)
@unittest.skipIf(platform.architecture()[0] == "32bit", "no 32-bit image available")
def test_pull_stream_decode(self):
generator = self.client.images.pull("ubi8", tag="latest", stream=True, decode=True)
self.assertIsInstance(generator, types.GeneratorType)
def test_scp(self):
with self.assertRaises(APIError) as e:
next(

View File

@ -13,7 +13,7 @@
# under the License.
#
"""Network integration tests."""
import os
import random
import unittest
from contextlib import suppress

View File

@ -62,5 +62,7 @@ class SystemIntegrationTest(base.IntegrationTest):
)
def test_from_env(self):
"""integration: from_env() no error"""
PodmanClient.from_env()
"""integration: from_env() error message"""
with self.assertRaises(ValueError) as e:
next(self.client.from_env())
self.assertIn("CONTAINER_HOST or DOCKER_HOST", repr(e.exception))

View File

@ -13,14 +13,13 @@
# under the License.
#
"""Integration Test Utils"""
import logging
import os
import shutil
import subprocess
import threading
from contextlib import suppress
from typing import Optional
from typing import List, Optional
import time
@ -50,10 +49,10 @@ class PodmanLauncher:
self.socket_file: str = socket_uri.replace('unix://', '')
self.log_level = log_level
self.proc: Optional[subprocess.Popen[bytes]] = None
self.proc = None
self.reference_id = hash(time.monotonic())
self.cmd: list[str] = []
self.cmd: List[str] = []
if privileged:
self.cmd.append('sudo')
@ -67,14 +66,12 @@ class PodmanLauncher:
if os.environ.get("container") == "oci":
self.cmd.append("--storage-driver=vfs")
self.cmd.extend(
[
"system",
"service",
f"--time={timeout}",
socket_uri,
]
)
self.cmd.extend([
"system",
"service",
f"--time={timeout}",
socket_uri,
])
process = subprocess.run(
[podman_exe, "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
@ -98,7 +95,9 @@ class PodmanLauncher:
def consume(line: str):
logger.debug(line.strip("\n") + f" refid={self.reference_id}")
self.proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # pylint: disable=consider-using-with
self.proc = subprocess.Popen(
self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) # pylint: disable=consider-using-with
threading.Thread(target=consume_lines, args=[self.proc.stdout, consume]).start()
if not check_socket:

View File

@ -3,7 +3,7 @@ import pathlib
import unittest
from typing import Any, Optional
from unittest import mock
from unittest.mock import mock_open, patch
from unittest.mock import Mock, mock_open, patch
from dataclasses import dataclass
@ -11,7 +11,7 @@ from podman import api
class TestUtilsCase(unittest.TestCase):
def test_format_filters(self) -> None:
def test_format_filters(self):
@dataclass
class TestCase:
name: str
@ -22,10 +22,10 @@ class TestUtilsCase(unittest.TestCase):
TestCase(name="empty str", input="", expected=None),
TestCase(name="str", input="reference=fedora", expected='{"reference": ["fedora"]}'),
TestCase(
name="list[str]", input=["reference=fedora"], expected='{"reference": ["fedora"]}'
name="List[str]", input=["reference=fedora"], expected='{"reference": ["fedora"]}'
),
TestCase(
name="dict[str,str]",
name="Dict[str,str]",
input={"reference": "fedora"},
expected='{"reference": ["fedora"]}',
),
@ -42,12 +42,12 @@ class TestUtilsCase(unittest.TestCase):
if actual is not None:
self.assertIsInstance(actual, str)
def test_containerignore_404(self) -> None:
def test_containerignore_404(self):
actual = api.prepare_containerignore("/does/not/exists")
self.assertListEqual([], actual)
@patch.object(pathlib.Path, "exists", return_value=True)
def test_containerignore_read(self, patch_exists) -> None:
def test_containerignore_read(self, patch_exists):
data = r"""# unittest
#Ignore the logs directory
@ -74,7 +74,7 @@ class TestUtilsCase(unittest.TestCase):
patch_exists.assert_called_once_with()
@patch.object(pathlib.Path, "exists", return_value=True)
def test_containerignore_empty(self, patch_exists) -> None:
def test_containerignore_empty(self, patch_exists):
data = r"""# unittest
"""
@ -86,21 +86,21 @@ class TestUtilsCase(unittest.TestCase):
patch_exists.assert_called_once_with()
@mock.patch("pathlib.Path.parent", autospec=True)
def test_containerfile_1(self, mock_parent) -> None:
def test_containerfile_1(self, mock_parent):
mock_parent.samefile.return_value = True
actual = api.prepare_containerfile("/work", "/work/Dockerfile")
self.assertEqual(actual, "Dockerfile")
mock_parent.samefile.assert_called()
@mock.patch("pathlib.Path.parent", autospec=True)
def test_containerfile_2(self, mock_parent) -> None:
def test_containerfile_2(self, mock_parent):
mock_parent.samefile.return_value = True
actual = api.prepare_containerfile(".", "Dockerfile")
self.assertEqual(actual, "Dockerfile")
mock_parent.samefile.assert_called()
@mock.patch("shutil.copy2")
def test_containerfile_copy(self, mock_copy) -> None:
def test_containerfile_copy(self, mock_copy):
mock_copy.return_value = None
with mock.patch.object(pathlib.Path, "parent") as mock_parent:
@ -109,7 +109,7 @@ class TestUtilsCase(unittest.TestCase):
actual = api.prepare_containerfile("/work", "/home/Dockerfile")
self.assertRegex(actual, r"\.containerfile\..*")
def test_prepare_body_all_types(self) -> None:
def test_prepare_body_all_types(self):
payload = {
"String": "string",
"Integer": 42,
@ -121,7 +121,7 @@ class TestUtilsCase(unittest.TestCase):
actual = api.prepare_body(payload)
self.assertEqual(actual, json.dumps(payload, sort_keys=True))
def test_prepare_body_none(self) -> None:
def test_prepare_body_none(self):
payload = {
"String": "",
"Integer": None,
@ -133,8 +133,8 @@ class TestUtilsCase(unittest.TestCase):
actual = api.prepare_body(payload)
self.assertEqual(actual, '{"Boolean": false}')
def test_prepare_body_embedded(self) -> None:
payload: dict[str, Any] = {
def test_prepare_body_embedded(self):
payload = {
"String": "",
"Integer": None,
"Boolean": False,
@ -154,7 +154,7 @@ class TestUtilsCase(unittest.TestCase):
self.assertDictEqual(actual_dict["Dictionary"], payload["Dictionary"])
self.assertEqual(set(actual_dict["Set1"]), {"item1", "item2"})
def test_prepare_body_dict_empty_string(self) -> None:
def test_prepare_body_dict_empty_string(self):
payload = {"Dictionary": {"key1": "", "key2": {"key3": ""}, "key4": [], "key5": {}}}
actual = api.prepare_body(payload)
@ -164,15 +164,6 @@ class TestUtilsCase(unittest.TestCase):
self.assertDictEqual(payload, actual_dict)
def test_encode_auth_header(self):
auth_config = {
"username": "user",
"password": "pass",
}
expected = b"eyJ1c2VybmFtZSI6ICJ1c2VyIiwgInBhc3N3b3JkIjogInBhc3MifQ=="
actual = api.encode_auth_header(auth_config)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()

View File

@ -5,9 +5,9 @@ import unittest
try:
# Python >= 3.10
from collections.abc import Iterable
except ImportError:
except:
# Python < 3.10
from collections.abc import Iterable
from collections import Iterable
from unittest.mock import patch
import requests_mock
@ -61,7 +61,8 @@ class TestBuildCase(unittest.TestCase):
with requests_mock.Mocker() as mock:
mock.post(
tests.LIBPOD_URL + "/build"
tests.LIBPOD_URL
+ "/build"
"?t=latest"
"&buildargs=%7B%22BUILD_DATE%22%3A+%22January+1%2C+1970%22%7D"
"&cpuperiod=10"

View File

@ -1,40 +1,14 @@
import unittest
import urllib.parse
import json
import os
import tempfile
from pathlib import Path
from unittest import mock
from unittest.mock import MagicMock
from podman.domain.config import PodmanConfig
class PodmanConfigTestCaseDefault(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
# Data to be written to the JSON file
self.data_json = """
{
"Connection": {
"Default": "testing_json",
"Connections": {
"testing_json": {
"URI": "ssh://qe@localhost:2222/run/podman/podman.sock",
"Identity": "/home/qe/.ssh/id_rsa"
},
"production": {
"URI": "ssh://root@localhost:22/run/podman/podman.sock",
"Identity": "/home/root/.ssh/id_rsajson"
}
}
},
"Farm": {}
}
"""
# Data to be written to the TOML file
self.data_toml = """
class PodmanConfigTestCase(unittest.TestCase):
opener = mock.mock_open(read_data="""
[containers]
log_size_max = -1
pids_limit = 2048
@ -53,61 +27,13 @@ class PodmanConfigTestCaseDefault(unittest.TestCase):
identity = "/home/qe/.ssh/id_rsa"
[network]
"""
# Define the file path
self.path_json = os.path.join(self.temp_dir, 'podman-connections.json')
self.path_toml = os.path.join(self.temp_dir, 'containers.conf')
# Write data to the JSON file
j_data = json.loads(self.data_json)
with open(self.path_json, 'w+') as file_json:
json.dump(j_data, file_json)
# Write data to the TOML file
with open(self.path_toml, 'w+') as file_toml:
# toml.dump(self.data_toml, file_toml)
file_toml.write(self.data_toml)
def test_connections(self):
config = PodmanConfig("@@is_test@@" + self.temp_dir)
self.assertEqual(config.active_service.id, "testing_json")
expected = urllib.parse.urlparse("ssh://qe@localhost:2222/run/podman/podman.sock")
self.assertEqual(config.active_service.url, expected)
self.assertEqual(config.services["production"].identity, Path("/home/root/.ssh/id_rsajson"))
class PodmanConfigTestCaseTOML(unittest.TestCase):
opener = mock.mock_open(
read_data="""
[containers]
log_size_max = -1
pids_limit = 2048
userns_size = 65536
[engine]
num_locks = 2048
active_service = "testing"
stop_timeout = 10
[engine.service_destinations]
[engine.service_destinations.production]
uri = "ssh://root@localhost:22/run/podman/podman.sock"
identity = "/home/root/.ssh/id_rsa"
[engine.service_destinations.testing]
uri = "ssh://qe@localhost:2222/run/podman/podman.sock"
identity = "/home/qe/.ssh/id_rsa"
[network]
"""
)
""")
def setUp(self) -> None:
super().setUp()
def mocked_open(self, *args, **kwargs):
return PodmanConfigTestCaseTOML.opener(self, *args, **kwargs)
return PodmanConfigTestCase.opener(self, *args, **kwargs)
self.mocked_open = mocked_open
@ -121,50 +47,10 @@ class PodmanConfigTestCaseTOML(unittest.TestCase):
self.assertEqual(config.active_service.url, expected)
self.assertEqual(config.services["production"].identity, Path("/home/root/.ssh/id_rsa"))
PodmanConfigTestCaseTOML.opener.assert_called_with(
PodmanConfigTestCase.opener.assert_called_with(
Path("/home/developer/containers.conf"), encoding='utf-8'
)
class PodmanConfigTestCaseJSON(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.temp_dir = tempfile.mkdtemp()
self.data = """
{
"Connection": {
"Default": "testing",
"Connections": {
"testing": {
"URI": "ssh://qe@localhost:2222/run/podman/podman.sock",
"Identity": "/home/qe/.ssh/id_rsa"
},
"production": {
"URI": "ssh://root@localhost:22/run/podman/podman.sock",
"Identity": "/home/root/.ssh/id_rsa"
}
}
},
"Farm": {}
}
"""
self.path = os.path.join(self.temp_dir, 'podman-connections.json')
# Write data to the JSON file
data = json.loads(self.data)
with open(self.path, 'w+') as file:
json.dump(data, file)
def test_connections(self):
config = PodmanConfig(self.path)
self.assertEqual(config.active_service.id, "testing")
expected = urllib.parse.urlparse("ssh://qe@localhost:2222/run/podman/podman.sock")
self.assertEqual(config.active_service.url, expected)
self.assertEqual(config.services["production"].identity, Path("/home/root/.ssh/id_rsa"))
if __name__ == '__main__':
unittest.main()

View File

@ -6,9 +6,9 @@ import unittest
try:
# Python >= 3.10
from collections.abc import Iterable
except ImportError:
except:
# Python < 3.10
from collections.abc import Iterable
from collections import Iterable
import requests_mock
@ -38,7 +38,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_remove(self, mock):
adapter = mock.delete(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd?v=True&force=True",
status_code=204,
)
@ -70,7 +71,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_restart(self, mock):
adapter = mock.post(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/restart?timeout=10",
status_code=204,
)
@ -81,7 +83,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_start_dkeys(self, mock):
adapter = mock.post(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/start"
"?detachKeys=%5Ef%5Eu",
status_code=204,
@ -101,40 +104,24 @@ class ContainersTestCase(unittest.TestCase):
container.start()
self.assertTrue(adapter.called_once)
@requests_mock.Mocker()
def test_init(self, mock):
adapter = mock.post(
tests.LIBPOD_URL
+ "/containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/init",
status_code=204,
)
container = Container(attrs=FIRST_CONTAINER, client=self.client.api)
container.init()
self.assertTrue(adapter.called_once)
@requests_mock.Mocker()
def test_stats(self, mock):
stream = [
{
"Error": None,
"Stats": [
{
"ContainerId": (
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
),
"Name": "evil_ptolemy",
"CPU": 1000.0,
}
],
}
]
stream = [{
"Error": None,
"Stats": [{
"ContainerId": "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
"Name": "evil_ptolemy",
"CPU": 1000.0,
}],
}]
buffer = io.StringIO()
for entry in stream:
buffer.write(json.JSONEncoder().encode(entry))
buffer.write("\n")
adapter = mock.get(
tests.LIBPOD_URL + "/containers/stats"
tests.LIBPOD_URL
+ "/containers/stats"
"?containers=87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
"&stream=True",
text=buffer.getvalue(),
@ -156,7 +143,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_stop(self, mock):
adapter = mock.post(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/stop"
"?all=True&timeout=10.0",
status_code=204,
@ -185,7 +173,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_unpause(self, mock):
adapter = mock.post(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/unpause",
status_code=204,
)
@ -236,7 +225,8 @@ class ContainersTestCase(unittest.TestCase):
{"Path": "deleted", "Kind": 2},
]
adapter = mock.get(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/changes",
json=payload,
)
@ -248,7 +238,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_diff_404(self, mock):
adapter = mock.get(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/changes",
json={
"cause": "Container not found.",
@ -293,7 +284,8 @@ class ContainersTestCase(unittest.TestCase):
encoded_value = base64.urlsafe_b64encode(json.dumps(header_value).encode("utf8"))
adapter = mock.get(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/archive"
"?path=/etc/motd",
body=body,
@ -314,7 +306,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_commit(self, mock):
post_adapter = mock.post(
tests.LIBPOD_URL + "/commit"
tests.LIBPOD_URL
+ "/commit"
"?author=redhat&changes=ADD+%2fetc%2fmod&comment=This+is+a+unittest"
"&container=87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd&format=docker"
"&pause=True&repo=quay.local&tag=unittest",
@ -347,7 +340,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_put_archive(self, mock):
adapter = mock.put(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/archive"
"?path=%2fetc%2fmotd",
status_code=200,
@ -363,7 +357,8 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_put_archive_404(self, mock):
adapter = mock.put(
tests.LIBPOD_URL + "/containers/"
tests.LIBPOD_URL
+ "/containers/"
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/archive"
"?path=deadbeef",
status_code=404,
@ -412,26 +407,23 @@ class ContainersTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_top_with_streaming(self, mock):
stream = [
{
"Processes": [
[
'jhonce',
'2417',
'2274',
'0',
'Mar01',
'?',
'00:00:01',
'/usr/bin/ssh-agent /bin/sh -c exec -l /bin/bash'
+ '-c "/usr/bin/gnome-session"',
],
['jhonce', '5544', '3522', '0', 'Mar01', 'pts/1', '00:00:02', '-bash'],
['jhonce', '6140', '3522', '0', 'Mar01', 'pts/2', '00:00:00', '-bash'],
stream = [{
"Processes": [
[
'jhonce',
'2417',
'2274',
'0',
'Mar01',
'?',
'00:00:01',
'/usr/bin/ssh-agent /bin/sh -c exec -l /bin/bash -c "/usr/bin/gnome-session"',
],
"Titles": ["UID", "PID", "PPID", "C", "STIME", "TTY", "TIME CMD"],
}
]
['jhonce', '5544', '3522', '0', 'Mar01', 'pts/1', '00:00:02', '-bash'],
['jhonce', '6140', '3522', '0', 'Mar01', 'pts/2', '00:00:00', '-bash'],
],
"Titles": ["UID", "PID", "PPID", "C", "STIME", "TTY", "TIME CMD"],
}]
buffer = io.StringIO()
for entry in stream:

View File

@ -1,20 +1,18 @@
import json
import unittest
try:
# Python >= 3.10
from collections.abc import Iterator
except ImportError:
except:
# Python < 3.10
from collections.abc import Iterator
from collections import Iterator
from unittest.mock import DEFAULT, MagicMock, patch
from unittest.mock import DEFAULT, patch
import requests_mock
from podman import PodmanClient, tests
from podman.domain.containers import Container
from podman.domain.containers_create import CreateMixin
from podman.domain.containers_manager import ContainersManager
from podman.errors import ImageNotFound, NotFound
@ -65,8 +63,7 @@ class ContainersManagerTestCase(unittest.TestCase):
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
self.assertEqual(
actual.id,
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
actual.id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
@requests_mock.Mocker()
@ -106,18 +103,17 @@ class ContainersManagerTestCase(unittest.TestCase):
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id,
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
actual[0].id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
self.assertEqual(
actual[1].id,
"6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03",
actual[1].id, "6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03"
)
@requests_mock.Mocker()
def test_list_filtered(self, mock):
mock.get(
tests.LIBPOD_URL + "/containers/json?"
tests.LIBPOD_URL
+ "/containers/json?"
"all=True"
"&filters=%7B"
"%22before%22%3A"
@ -136,12 +132,10 @@ class ContainersManagerTestCase(unittest.TestCase):
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id,
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
actual[0].id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
self.assertEqual(
actual[1].id,
"6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03",
actual[1].id, "6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03"
)
@requests_mock.Mocker()
@ -153,24 +147,6 @@ class ContainersManagerTestCase(unittest.TestCase):
actual = self.client.containers.list()
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id,
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
)
self.assertEqual(
actual[1].id,
"6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03",
)
@requests_mock.Mocker()
def test_list_sparse_libpod_default(self, mock):
mock.get(
tests.LIBPOD_URL + "/containers/json",
json=[FIRST_CONTAINER, SECOND_CONTAINER],
)
actual = self.client.containers.list()
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
@ -178,118 +154,6 @@ class ContainersManagerTestCase(unittest.TestCase):
actual[1].id, "6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03"
)
# Verify that no individual reload() calls were made for sparse=True (default)
# Should be only 1 request for the list endpoint
self.assertEqual(len(mock.request_history), 1)
# lower() needs to be enforced since the mocked url is transformed as lowercase and
# this avoids %2f != %2F errors. Same applies for other instances of assertEqual
self.assertEqual(mock.request_history[0].url, tests.LIBPOD_URL.lower() + "/containers/json")
@requests_mock.Mocker()
def test_list_sparse_libpod_false(self, mock):
mock.get(
tests.LIBPOD_URL + "/containers/json",
json=[FIRST_CONTAINER, SECOND_CONTAINER],
)
# Mock individual container detail endpoints for reload() calls
# that are done for sparse=False
mock.get(
tests.LIBPOD_URL + f"/containers/{FIRST_CONTAINER['Id']}/json",
json=FIRST_CONTAINER,
)
mock.get(
tests.LIBPOD_URL + f"/containers/{SECOND_CONTAINER['Id']}/json",
json=SECOND_CONTAINER,
)
actual = self.client.containers.list(sparse=False)
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
self.assertEqual(
actual[1].id, "6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03"
)
# Verify that individual reload() calls were made for sparse=False
# Should be 3 requests total: 1 for list + 2 for individual container details
self.assertEqual(len(mock.request_history), 3)
# Verify the list endpoint was called first
self.assertEqual(mock.request_history[0].url, tests.LIBPOD_URL.lower() + "/containers/json")
# Verify the individual container detail endpoints were called
individual_urls = {req.url for req in mock.request_history[1:]}
expected_urls = {
tests.LIBPOD_URL.lower() + f"/containers/{FIRST_CONTAINER['Id']}/json",
tests.LIBPOD_URL.lower() + f"/containers/{SECOND_CONTAINER['Id']}/json",
}
self.assertEqual(individual_urls, expected_urls)
@requests_mock.Mocker()
def test_list_sparse_compat_default(self, mock):
mock.get(
tests.COMPATIBLE_URL + "/containers/json",
json=[FIRST_CONTAINER, SECOND_CONTAINER],
)
# Mock individual container detail endpoints for reload() calls
# that are done for sparse=False
mock.get(
tests.COMPATIBLE_URL + f"/containers/{FIRST_CONTAINER['Id']}/json",
json=FIRST_CONTAINER,
)
mock.get(
tests.COMPATIBLE_URL + f"/containers/{SECOND_CONTAINER['Id']}/json",
json=SECOND_CONTAINER,
)
actual = self.client.containers.list(compatible=True)
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
self.assertEqual(
actual[1].id, "6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03"
)
# Verify that individual reload() calls were made for compat default (sparse=True)
# Should be 3 requests total: 1 for list + 2 for individual container details
self.assertEqual(len(mock.request_history), 3)
self.assertEqual(
mock.request_history[0].url, tests.COMPATIBLE_URL.lower() + "/containers/json"
)
# Verify the individual container detail endpoints were called
individual_urls = {req.url for req in mock.request_history[1:]}
expected_urls = {
tests.COMPATIBLE_URL.lower() + f"/containers/{FIRST_CONTAINER['Id']}/json",
tests.COMPATIBLE_URL.lower() + f"/containers/{SECOND_CONTAINER['Id']}/json",
}
self.assertEqual(individual_urls, expected_urls)
@requests_mock.Mocker()
def test_list_sparse_compat_true(self, mock):
mock.get(
tests.COMPATIBLE_URL + "/containers/json",
json=[FIRST_CONTAINER, SECOND_CONTAINER],
)
actual = self.client.containers.list(sparse=True, compatible=True)
self.assertIsInstance(actual, list)
self.assertEqual(
actual[0].id, "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
self.assertEqual(
actual[1].id, "6dc84cc0a46747da94e4c1571efcc01a756b4017261440b4b8985d37203c3c03"
)
# Verify that no individual reload() calls were made for sparse=True
# Should be only 1 request for the list endpoint
self.assertEqual(len(mock.request_history), 1)
self.assertEqual(
mock.request_history[0].url, tests.COMPATIBLE_URL.lower() + "/containers/json"
)
@requests_mock.Mocker()
def test_prune(self, mock):
mock.post(
@ -350,226 +214,14 @@ class ContainersManagerTestCase(unittest.TestCase):
with self.assertRaises(ImageNotFound):
self.client.containers.create("fedora", "/usr/bin/ls", cpu_count=9999)
@requests_mock.Mocker()
def test_create_parse_host_port(self, mock):
mock_response = MagicMock()
mock_response.json = lambda: {
"Id": "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
"Size": 1024,
}
self.client.containers.client.post = MagicMock(return_value=mock_response)
mock.get(
tests.LIBPOD_URL
+ "/containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=FIRST_CONTAINER,
)
port_str = {"2233": 3333}
port_str_protocol = {"2244/tcp": 3344}
port_int = {2255: 3355}
ports = {**port_str, **port_str_protocol, **port_int}
self.client.containers.create("fedora", "/usr/bin/ls", ports=ports)
self.client.containers.client.post.assert_called()
expected_ports = [
{
"container_port": 2233,
"host_port": 3333,
"protocol": "tcp",
},
{
"container_port": 2244,
"host_port": 3344,
"protocol": "tcp",
},
{
"container_port": 2255,
"host_port": 3355,
"protocol": "tcp",
},
]
actual_ports = json.loads(self.client.containers.client.post.call_args[1]["data"])[
"portmappings"
]
self.assertEqual(expected_ports, actual_ports)
@requests_mock.Mocker()
def test_create_userns_mode_simple(self, mock):
mock_response = MagicMock()
mock_response.json = lambda: {
"Id": "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
"Size": 1024,
}
self.client.containers.client.post = MagicMock(return_value=mock_response)
mock.get(
tests.LIBPOD_URL
+ "/containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=FIRST_CONTAINER,
)
userns = "keep-id"
self.client.containers.create("fedora", "/usr/bin/ls", userns_mode=userns)
self.client.containers.client.post.assert_called()
expected_userns = {"nsmode": userns}
actual_userns = json.loads(self.client.containers.client.post.call_args[1]["data"])[
"userns"
]
self.assertEqual(expected_userns, actual_userns)
@requests_mock.Mocker()
def test_create_userns_mode_dict(self, mock):
mock_response = MagicMock()
mock_response.json = lambda: {
"Id": "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
"Size": 1024,
}
self.client.containers.client.post = MagicMock(return_value=mock_response)
mock.get(
tests.LIBPOD_URL
+ "/containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=FIRST_CONTAINER,
)
userns = {"nsmode": "keep-id", "value": "uid=900"}
self.client.containers.create("fedora", "/usr/bin/ls", userns_mode=userns)
self.client.containers.client.post.assert_called()
expected_userns = dict(**userns)
actual_userns = json.loads(self.client.containers.client.post.call_args[1]["data"])[
"userns"
]
self.assertEqual(expected_userns, actual_userns)
def test_create_unsupported_key(self):
with self.assertRaises(TypeError):
with self.assertRaises(TypeError) as e:
self.client.containers.create("fedora", "/usr/bin/ls", blkio_weight=100.0)
def test_create_unknown_key(self):
with self.assertRaises(TypeError):
with self.assertRaises(TypeError) as e:
self.client.containers.create("fedora", "/usr/bin/ls", unknown_key=100.0)
@requests_mock.Mocker()
def test_create_convert_env_list_to_dict(self, mock):
env_list1 = ["FOO=foo", "BAR=bar"]
# Test valid list
converted_dict1 = {"FOO": "foo", "BAR": "bar"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list1), converted_dict1)
# Test empty string
env_list2 = ["FOO=foo", ""]
self.assertRaises(ValueError, CreateMixin._convert_env_list_to_dict, env_list2)
# Test non iterable
env_list3 = ["FOO=foo", None]
self.assertRaises(TypeError, CreateMixin._convert_env_list_to_dict, env_list3)
# Test iterable with non string element
env_list4 = ["FOO=foo", []]
self.assertRaises(TypeError, CreateMixin._convert_env_list_to_dict, env_list4)
# Test empty list
env_list5 = []
converted_dict5 = {}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list5), converted_dict5)
# Test single valid environment variable
env_list6 = ["SINGLE=value"]
converted_dict6 = {"SINGLE": "value"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list6), converted_dict6)
# Test environment variable with empty value
env_list7 = ["EMPTY="]
converted_dict7 = {"EMPTY": ""}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list7), converted_dict7)
# Test environment variable with multiple equals signs
env_list8 = ["URL=https://example.com/path?param=value"]
converted_dict8 = {"URL": "https://example.com/path?param=value"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list8), converted_dict8)
# Test environment variable with spaces in value
env_list9 = ["MESSAGE=Hello World", "PATH=/usr/local/bin:/usr/bin"]
converted_dict9 = {"MESSAGE": "Hello World", "PATH": "/usr/local/bin:/usr/bin"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list9), converted_dict9)
# Test environment variable with special characters
env_list10 = ["SPECIAL=!@#$%^&*()_+-=[]{}|;':\",./<>?"]
converted_dict10 = {"SPECIAL": "!@#$%^&*()_+-=[]{}|;':\",./<>?"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list10), converted_dict10)
# Test environment variable with numeric values
env_list11 = ["PORT=8080", "TIMEOUT=30"]
converted_dict11 = {"PORT": "8080", "TIMEOUT": "30"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list11), converted_dict11)
# Test environment variable with boolean-like values
env_list12 = ["DEBUG=true", "VERBOSE=false", "ENABLED=1", "DISABLED=0"]
converted_dict12 = {
"DEBUG": "true",
"VERBOSE": "false",
"ENABLED": "1",
"DISABLED": "0",
}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list12), converted_dict12)
# Test environment variable with whitespace in key (should preserve)
env_list13 = [" SPACED_KEY =value", "KEY= spaced_value "]
converted_dict13 = {" SPACED_KEY ": "value", "KEY": " spaced_value "}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list13), converted_dict13)
# Test missing equals sign
env_list14 = ["FOO=foo", "INVALID"]
self.assertRaises(ValueError, CreateMixin._convert_env_list_to_dict, env_list14)
# Test environment variable with only equals sign (empty key)
env_list15 = ["FOO=foo", "=value"]
self.assertRaises(ValueError, CreateMixin._convert_env_list_to_dict, env_list15)
# Test environment variable with only whitespace key
env_list16 = ["FOO=foo", " =value"]
self.assertRaises(ValueError, CreateMixin._convert_env_list_to_dict, env_list16)
# Test whitespace-only string
env_list17 = ["FOO=foo", " "]
self.assertRaises(ValueError, CreateMixin._convert_env_list_to_dict, env_list17)
# Test various non-string types in list
env_list18 = ["FOO=foo", 123]
self.assertRaises(TypeError, CreateMixin._convert_env_list_to_dict, env_list18)
env_list19 = ["FOO=foo", {"key": "value"}]
self.assertRaises(TypeError, CreateMixin._convert_env_list_to_dict, env_list19)
env_list20 = ["FOO=foo", True]
self.assertRaises(TypeError, CreateMixin._convert_env_list_to_dict, env_list20)
# Test duplicate keys (last one should win)
env_list21 = ["KEY=first", "KEY=second", "OTHER=value"]
converted_dict21 = {"KEY": "second", "OTHER": "value"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list21), converted_dict21)
# Test very long environment variable
long_value = "x" * 1000
env_list22 = [f"LONG_VAR={long_value}"]
converted_dict22 = {"LONG_VAR": long_value}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list22), converted_dict22)
# Test environment variable with newlines and tabs
env_list23 = ["MULTILINE=line1\nline2\ttabbed"]
converted_dict23 = {"MULTILINE": "line1\nline2\ttabbed"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list23), converted_dict23)
# Test environment variable with unicode characters
env_list24 = ["UNICODE=こんにちは", "EMOJI=🚀🌟"]
converted_dict24 = {"UNICODE": "こんにちは", "EMOJI": "🚀🌟"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list24), converted_dict24)
# Test case sensitivity
env_list25 = ["path=/usr/bin", "PATH=/usr/local/bin"]
converted_dict25 = {"path": "/usr/bin", "PATH": "/usr/local/bin"}
self.assertEqual(CreateMixin._convert_env_list_to_dict(env_list25), converted_dict25)
@requests_mock.Mocker()
def test_run_detached(self, mock):
mock.post(
@ -632,7 +284,7 @@ class ContainersManagerTestCase(unittest.TestCase):
actual = self.client.containers.run("fedora", "/usr/bin/ls")
self.assertIsInstance(actual, bytes)
self.assertEqual(actual, b"This is a unittest - line 1This is a unittest - line 2")
self.assertEqual(actual, b'This is a unittest - line 1This is a unittest - line 2')
# iter() cannot be reset so subtests used to create new instance
with self.subTest("Stream results"):
@ -645,5 +297,5 @@ class ContainersManagerTestCase(unittest.TestCase):
self.assertEqual(next(actual), b"This is a unittest - line 2")
if __name__ == "__main__":
if __name__ == '__main__':
unittest.main()

View File

@ -1,84 +0,0 @@
import unittest
import requests_mock
from podman import PodmanClient, tests
CONTAINER = {
"Id": "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd",
"Name": "quay.io/fedora:latest",
"Image": "eloquent_pare",
"State": {"Status": "running"},
}
class PodmanResourceTestCase(unittest.TestCase):
"""Test PodmanResource area of concern."""
def setUp(self) -> None:
super().setUp()
self.client = PodmanClient(base_url=tests.BASE_SOCK)
def tearDown(self) -> None:
super().tearDown()
self.client.close()
@requests_mock.Mocker()
def test_reload_with_compatible_options(self, mock):
"""Test that reload uses the correct endpoint."""
# Mock the get() call
mock.get(
f"{tests.LIBPOD_URL}/"
f"containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=CONTAINER,
)
# Mock the reload() call
mock.get(
f"{tests.LIBPOD_URL}/"
f"containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=CONTAINER,
)
# Mock the reload(compatible=False) call
mock.get(
f"{tests.LIBPOD_URL}/"
f"containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=CONTAINER,
)
# Mock the reload(compatible=True) call
mock.get(
f"{tests.COMPATIBLE_URL}/"
f"containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
json=CONTAINER,
)
container = self.client.containers.get(
"87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd"
)
container.reload()
container.reload(compatible=False)
container.reload(compatible=True)
self.assertEqual(len(mock.request_history), 4)
for i in range(3):
self.assertEqual(
mock.request_history[i].url,
tests.LIBPOD_URL.lower()
+ "/containers/"
+ "87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
)
self.assertEqual(
mock.request_history[3].url,
tests.COMPATIBLE_URL.lower()
+ "/containers/87e1325c82424e49a00abdd4de08009eb76c7de8d228426a9b8af9318ced5ecd/json",
)
if __name__ == '__main__':
unittest.main()

View File

@ -22,29 +22,27 @@ class EventsManagerTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_list(self, mock):
stream = [
{
"Type": "pod",
"Action": "create",
"Actor": {
"ID": "",
"Attributes": {
"image": "",
"name": "",
"containerExitCode": 0,
},
stream = [{
"Type": "pod",
"Action": "create",
"Actor": {
"ID": "",
"Attributes": {
"image": "",
"name": "",
"containerExitCode": 0,
},
"Scope": "local",
"Time": 1615845480,
"TimeNano": 1615845480,
}
]
},
"Scope": "local",
"Time": 1615845480,
"TimeNano": 1615845480,
}]
buffer = io.StringIO()
for item in stream:
buffer.write(json.JSONEncoder().encode(item))
buffer.write("\n")
adapter = mock.get(tests.LIBPOD_URL + "/events", text=buffer.getvalue()) # noqa: F841
adapter = mock.get(tests.LIBPOD_URL + "/events", text=buffer.getvalue())
manager = EventsManager(client=self.client.api)
actual = manager.list(decode=True)

View File

@ -51,16 +51,14 @@ class ImageTestCase(unittest.TestCase):
adapter = mock.get(
tests.LIBPOD_URL
+ "/images/326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/history",
json=[
{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Comment": "",
"Created": 1614208404,
"CreatedBy": "2021-02-24T23:13:24+00:00",
"Tags": ["latest"],
"Size": 1024,
}
],
json=[{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Comment": "",
"Created": 1614208404,
"CreatedBy": "2021-02-24T23:13:24+00:00",
"Tags": ["latest"],
"Size": 1024,
}],
)
image = Image(attrs=FIRST_IMAGE, client=self.client.api)

View File

@ -1,20 +1,19 @@
import types
import unittest
from unittest.mock import patch
try:
# Python >= 3.10
from collections.abc import Iterable
except ImportError:
except:
# Python < 3.10
from collections.abc import Iterable
from collections import Iterable
import requests_mock
from podman import PodmanClient, tests
from podman.domain.images import Image
from podman.domain.images_manager import ImagesManager
from podman.errors import APIError, ImageNotFound, PodmanError
from podman.errors import APIError, ImageNotFound
FIRST_IMAGE = {
"Id": "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
@ -157,13 +156,11 @@ class ImagesManagerTestCase(unittest.TestCase):
"""Unit test Images prune()."""
mock.post(
tests.LIBPOD_URL + "/images/prune",
json=[
{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Err": None,
"Size": 1024,
}
],
json=[{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Err": None,
"Size": 1024,
}],
)
results = self.client.images.prune()
@ -208,91 +205,20 @@ class ImagesManagerTestCase(unittest.TestCase):
self.assertEqual(len(untagged), 2)
self.assertEqual(len("".join(untagged)), 0)
@requests_mock.Mocker()
def test_prune_filters_label(self, mock):
"""Unit test filters param label for Images prune()."""
mock.post(
tests.LIBPOD_URL
+ "/images/prune?filters=%7B%22label%22%3A+%5B%22%7B%27license%27%3A+"
+ "%27Apache-2.0%27%7D%22%5D%7D",
json=[
{
"Id": "326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
"Size": 1024,
},
],
)
report = self.client.images.prune(filters={"label": {"license": "Apache-2.0"}})
self.assertIn("ImagesDeleted", report)
self.assertIn("SpaceReclaimed", report)
self.assertEqual(report["SpaceReclaimed"], 1024)
deleted = [r["Deleted"] for r in report["ImagesDeleted"] if "Deleted" in r]
self.assertEqual(len(deleted), 1)
self.assertIn("326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab", deleted)
self.assertGreater(len("".join(deleted)), 0)
untagged = [r["Untagged"] for r in report["ImagesDeleted"] if "Untagged" in r]
self.assertEqual(len(untagged), 1)
self.assertEqual(len("".join(untagged)), 0)
@requests_mock.Mocker()
def test_prune_filters_not_label(self, mock):
"""Unit test filters param NOT-label for Images prune()."""
mock.post(
tests.LIBPOD_URL
+ "/images/prune?filters=%7B%22label%21%22%3A+%5B%22%7B%27license%27%3A+"
+ "%27Apache-2.0%27%7D%22%5D%7D",
json=[
{
"Id": "c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e",
"Size": 1024,
},
],
)
report = self.client.images.prune(filters={"label!": {"license": "Apache-2.0"}})
self.assertIn("ImagesDeleted", report)
self.assertIn("SpaceReclaimed", report)
self.assertEqual(report["SpaceReclaimed"], 1024)
deleted = [r["Deleted"] for r in report["ImagesDeleted"] if "Deleted" in r]
self.assertEqual(len(deleted), 1)
self.assertIn("c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e", deleted)
self.assertGreater(len("".join(deleted)), 0)
untagged = [r["Untagged"] for r in report["ImagesDeleted"] if "Untagged" in r]
self.assertEqual(len(untagged), 1)
self.assertEqual(len("".join(untagged)), 0)
@requests_mock.Mocker()
def test_prune_failure(self, mock):
"""Unit test to report error carried in response body."""
mock.post(
tests.LIBPOD_URL + "/images/prune",
json=[
{
"Err": "Test prune failure in response body.",
}
],
json=[{
"Err": "Test prune failure in response body.",
}],
)
with self.assertRaises(APIError) as e:
self.client.images.prune()
self.assertEqual(e.exception.explanation, "Test prune failure in response body.")
@requests_mock.Mocker()
def test_prune_empty(self, mock):
"""Unit test if prune API responses null (None)."""
mock.post(tests.LIBPOD_URL + "/images/prune", text="null")
report = self.client.images.prune()
self.assertEqual(report["ImagesDeleted"], [])
self.assertEqual(report["SpaceReclaimed"], 0)
@requests_mock.Mocker()
def test_get(self, mock):
mock.get(
@ -381,37 +307,6 @@ class ImagesManagerTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_load(self, mock):
with self.assertRaises(PodmanError):
self.client.images.load()
with self.assertRaises(PodmanError):
self.client.images.load(b'data', b'file_path')
with self.assertRaises(PodmanError):
self.client.images.load(data=b'data', file_path=b'file_path')
# Patch Path.read_bytes to mock the file reading behavior
with patch("pathlib.Path.read_bytes", return_value=b"mock tarball data"):
mock.post(
tests.LIBPOD_URL + "/images/load",
json={"Names": ["quay.io/fedora:latest"]},
)
mock.get(
tests.LIBPOD_URL + "/images/quay.io%2ffedora%3Alatest/json",
json=FIRST_IMAGE,
)
# 3a. Test the case where only 'file_path' is provided
gntr = self.client.images.load(file_path="mock_file.tar")
self.assertIsInstance(gntr, types.GeneratorType)
report = list(gntr)
self.assertEqual(len(report), 1)
self.assertEqual(
report[0].id,
"sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab",
)
mock.post(
tests.LIBPOD_URL + "/images/load",
json={"Names": ["quay.io/fedora:latest"]},
@ -517,7 +412,7 @@ class ImagesManagerTestCase(unittest.TestCase):
self.assertEqual(report[0]["name"], "quay.io/libpod/fedora")
@requests_mock.Mocker()
def test_search_list_tags(self, mock):
def test_search_listTags(self, mock):
mock.get(
tests.LIBPOD_URL + "/images/search?term=fedora&noTrunc=true&listTags=true",
json=[
@ -562,7 +457,8 @@ class ImagesManagerTestCase(unittest.TestCase):
},
)
mock.get(
tests.LIBPOD_URL + "/images"
tests.LIBPOD_URL
+ "/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
@ -583,7 +479,8 @@ class ImagesManagerTestCase(unittest.TestCase):
},
)
mock.get(
tests.LIBPOD_URL + "/images"
tests.LIBPOD_URL
+ "/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
@ -604,7 +501,8 @@ class ImagesManagerTestCase(unittest.TestCase):
},
)
mock.get(
tests.LIBPOD_URL + "/images"
tests.LIBPOD_URL
+ "/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
@ -629,7 +527,8 @@ class ImagesManagerTestCase(unittest.TestCase):
},
)
mock.get(
tests.LIBPOD_URL + "/images"
tests.LIBPOD_URL
+ "/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
@ -649,109 +548,6 @@ class ImagesManagerTestCase(unittest.TestCase):
images[1].id, "c4b16966ecd94ffa910eab4e630e24f259bf34a87e924cd4b1434f267b0e354e"
)
@requests_mock.Mocker()
def test_pull_policy(self, mock):
image_id = "sha256:326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab"
mock.post(
tests.LIBPOD_URL + "/images/pull?reference=quay.io%2ffedora%3Alatest&policy=missing",
json={
"error": "",
"id": image_id,
"images": [image_id],
"stream": "",
},
)
mock.get(
tests.LIBPOD_URL + "/images"
"/sha256%3A326dd9d7add24646a325e8eaa82125294027db2332e49c5828d96312c5d773ab/json",
json=FIRST_IMAGE,
)
image = self.client.images.pull("quay.io/fedora:latest", policy="missing")
self.assertEqual(image.id, image_id)
@requests_mock.Mocker()
def test_list_with_name_parameter(self, mock):
"""Test that name parameter is correctly converted to a reference filter"""
mock.get(
tests.LIBPOD_URL + "/images/json?filters=%7B%22reference%22%3A+%5B%22fedora%22%5D%7D",
json=[FIRST_IMAGE],
)
images = self.client.images.list(name="fedora")
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
self.assertEqual(images[0].tags, ["fedora:latest", "fedora:33"])
@requests_mock.Mocker()
def test_list_with_name_and_existing_filters(self, mock):
"""Test that name parameter works alongside other filters"""
mock.get(
tests.LIBPOD_URL
+ (
"/images/json?filters=%7B%22dangling%22%3A+%5B%22True%22%5D%2C+"
"%22reference%22%3A+%5B%22fedora%22%5D%7D"
),
json=[FIRST_IMAGE],
)
images = self.client.images.list(name="fedora", filters={"dangling": True})
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
@requests_mock.Mocker()
def test_list_with_name_overrides_reference_filter(self, mock):
"""Test that name parameter takes precedence over existing reference filter"""
mock.get(
tests.LIBPOD_URL + "/images/json?filters=%7B%22reference%22%3A+%5B%22fedora%22%5D%7D",
json=[FIRST_IMAGE],
)
# The name parameter should override the reference filter
images = self.client.images.list(
name="fedora",
filters={"reference": "ubuntu"}, # This should be overridden
)
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
@requests_mock.Mocker()
def test_list_with_all_and_name(self, mock):
"""Test that all parameter works alongside name filter"""
mock.get(
tests.LIBPOD_URL
+ "/images/json?all=true&filters=%7B%22reference%22%3A+%5B%22fedora%22%5D%7D",
json=[FIRST_IMAGE],
)
images = self.client.images.list(all=True, name="fedora")
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
@requests_mock.Mocker()
def test_list_with_empty_name(self, mock):
"""Test that empty name parameter doesn't add a reference filter"""
mock.get(tests.LIBPOD_URL + "/images/json", json=[FIRST_IMAGE])
images = self.client.images.list(name="")
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
@requests_mock.Mocker()
def test_list_with_none_name(self, mock):
"""Test that None name parameter doesn't add a reference filter"""
mock.get(tests.LIBPOD_URL + "/images/json", json=[FIRST_IMAGE])
images = self.client.images.list(name=None)
self.assertEqual(len(images), 1)
self.assertIsInstance(images[0], Image)
if __name__ == '__main__':
unittest.main()

View File

@ -1,15 +1,8 @@
import unittest
import requests_mock
from podman import PodmanClient, tests
from podman.domain.manifests import Manifest, ManifestsManager
FIRST_MANIFEST = {
"Id": "326dd9d7add24646a389e8eaa82125294027db2332e49c5828d96312c5d773ab",
"names": "quay.io/fedora:latest",
}
class ManifestTestCase(unittest.TestCase):
def setUp(self) -> None:
@ -30,34 +23,6 @@ class ManifestTestCase(unittest.TestCase):
manifest = Manifest()
self.assertIsNone(manifest.name)
@requests_mock.Mocker()
def test_push(self, mock):
adapter = mock.post(
tests.LIBPOD_URL + "/manifests/quay.io%2Ffedora%3Alatest/registry/quay.io%2Ffedora%3Av1"
)
manifest = Manifest(attrs=FIRST_MANIFEST, client=self.client.api)
manifest.push(destination="quay.io/fedora:v1")
self.assertTrue(adapter.called_once)
@requests_mock.Mocker()
def test_push_with_auth(self, mock):
adapter = mock.post(
tests.LIBPOD_URL
+ "/manifests/quay.io%2Ffedora%3Alatest/registry/quay.io%2Ffedora%3Av1",
request_headers={
"X-Registry-Auth": b"eyJ1c2VybmFtZSI6ICJ1c2VyIiwgInBhc3N3b3JkIjogInBhc3MifQ=="
},
)
manifest = Manifest(attrs=FIRST_MANIFEST, client=self.client.api)
manifest.push(
destination="quay.io/fedora:v1", auth_config={"username": "user", "password": "pass"}
)
self.assertTrue(adapter.called_once)
if __name__ == '__main__':
unittest.main()

View File

@ -34,16 +34,14 @@ FIRST_NETWORK_LIBPOD = {
"driver": "bridge",
"network_interface": "libpod_veth0",
"created": "2022-01-28T09:18:37.491308364-07:00",
"subnets": [
{
"subnet": "10.11.12.0/24",
"gateway": "10.11.12.1",
"lease_range": {
"start_ip": "10.11.12.1",
"end_ip": "10.11.12.63",
},
}
],
"subnets": [{
"subnet": "10.11.12.0/24",
"gateway": "10.11.12.1",
"lease_range": {
"start_ip": "10.11.12.1",
"end_ip": "10.11.12.63",
},
}],
"ipv6_enabled": False,
"internal": False,
"dns_enabled": False,

View File

@ -56,16 +56,14 @@ FIRST_NETWORK_LIBPOD = {
"driver": "bridge",
"network_interface": "libpod_veth0",
"created": "2022-01-28T09:18:37.491308364-07:00",
"subnets": [
{
"subnet": "10.11.12.0/24",
"gateway": "10.11.12.1",
"lease_range": {
"start_ip": "10.11.12.1",
"end_ip": "10.11.12.63",
},
}
],
"subnets": [{
"subnet": "10.11.12.0/24",
"gateway": "10.11.12.1",
"lease_range": {
"start_ip": "10.11.12.1",
"end_ip": "10.11.12.63",
},
}],
"ipv6_enabled": False,
"internal": False,
"dns_enabled": False,
@ -80,16 +78,14 @@ SECOND_NETWORK_LIBPOD = {
"created": "2021-03-01T09:18:37.491308364-07:00",
"driver": "bridge",
"network_interface": "libpod_veth1",
"subnets": [
{
"subnet": "10.11.12.0/24",
"gateway": "10.11.12.1",
"lease_range": {
"start_ip": "10.11.12.1",
"end_ip": "10.11.12.63",
},
}
],
"subnets": [{
"subnet": "10.11.12.0/24",
"gateway": "10.11.12.1",
"lease_range": {
"start_ip": "10.11.12.1",
"end_ip": "10.11.12.63",
},
}],
"ipv6_enabled": False,
"internal": False,
"dns_enabled": False,
@ -171,8 +167,6 @@ class NetworksManagerTestCase(unittest.TestCase):
adapter = mock.post(tests.LIBPOD_URL + "/networks/create", json=FIRST_NETWORK_LIBPOD)
network = self.client.networks.create("podman")
self.assertIsInstance(network, Network)
self.assertEqual(adapter.call_count, 1)
self.assertDictEqual(
adapter.last_request.json(),

View File

@ -3,8 +3,7 @@ import ipaddress
import json
import unittest
from dataclasses import dataclass
from typing import Any, Optional
from collections.abc import Iterable
from typing import Any, Iterable, Optional, Tuple
from unittest import mock
from requests import Response
@ -13,12 +12,12 @@ from podman import api
class ParseUtilsTestCase(unittest.TestCase):
def test_parse_repository(self) -> None:
def test_parse_repository(self):
@dataclass
class TestCase:
name: str
input: Any
expected: tuple[str, Optional[str]]
expected: Tuple[str, Optional[str]]
cases = [
TestCase(name="empty str", input="", expected=("", None)),
@ -29,39 +28,14 @@ class ParseUtilsTestCase(unittest.TestCase):
),
TestCase(
name="@digest",
input="quay.io/libpod/testimage@sha256:71f1b47263fc",
expected=("quay.io/libpod/testimage@sha256", "71f1b47263fc"),
input="quay.io/libpod/testimage@71f1b47263fc",
expected=("quay.io/libpod/testimage", "71f1b47263fc"),
),
TestCase(
name=":tag",
input="quay.io/libpod/testimage:latest",
expected=("quay.io/libpod/testimage", "latest"),
),
TestCase(
name=":tag@digest",
input="quay.io/libpod/testimage:latest@sha256:71f1b47263fc",
expected=("quay.io/libpod/testimage:latest@sha256", "71f1b47263fc"),
),
TestCase(
name=":port",
input="quay.io:5000/libpod/testimage",
expected=("quay.io:5000/libpod/testimage", None),
),
TestCase(
name=":port@digest",
input="quay.io:5000/libpod/testimage@sha256:71f1b47263fc",
expected=("quay.io:5000/libpod/testimage@sha256", "71f1b47263fc"),
),
TestCase(
name=":port:tag",
input="quay.io:5000/libpod/testimage:latest",
expected=("quay.io:5000/libpod/testimage", "latest"),
),
TestCase(
name=":port:tag:digest",
input="quay.io:5000/libpod/testimage:latest@sha256:71f1b47263fc",
expected=("quay.io:5000/libpod/testimage:latest@sha256", "71f1b47263fc"),
),
]
for case in cases:
@ -72,13 +46,13 @@ class ParseUtilsTestCase(unittest.TestCase):
f"failed test {case.name} expected {case.expected}, actual {actual}",
)
def test_decode_header(self) -> None:
def test_decode_header(self):
actual = api.decode_header("eyJIZWFkZXIiOiJ1bml0dGVzdCJ9")
self.assertDictEqual(actual, {"Header": "unittest"})
self.assertDictEqual(api.decode_header(None), {})
def test_prepare_timestamp(self) -> None:
def test_prepare_timestamp(self):
time = datetime.datetime(2022, 1, 24, 12, 0, 0)
self.assertEqual(api.prepare_timestamp(time), 1643025600)
self.assertEqual(api.prepare_timestamp(2), 2)
@ -87,11 +61,11 @@ class ParseUtilsTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
api.prepare_timestamp("bad input") # type: ignore
def test_prepare_cidr(self) -> None:
def test_prepare_cidr(self):
net = ipaddress.IPv4Network("127.0.0.0/24")
self.assertEqual(api.prepare_cidr(net), ("127.0.0.0", "////AA=="))
def test_stream_helper(self) -> None:
def test_stream_helper(self):
streamed_results = [b'{"test":"val1"}', b'{"test":"val2"}']
mock_response = mock.Mock(spec=Response)
mock_response.iter_lines.return_value = iter(streamed_results)
@ -103,7 +77,7 @@ class ParseUtilsTestCase(unittest.TestCase):
self.assertIsInstance(actual, bytes)
self.assertEqual(expected, actual)
def test_stream_helper_with_decode(self) -> None:
def test_stream_helper_with_decode(self):
streamed_results = [b'{"test":"val1"}', b'{"test":"val2"}']
mock_response = mock.Mock(spec=Response)
mock_response.iter_lines.return_value = iter(streamed_results)
@ -113,7 +87,7 @@ class ParseUtilsTestCase(unittest.TestCase):
self.assertIsInstance(streamable, Iterable)
for expected, actual in zip(streamed_results, streamable):
self.assertIsInstance(actual, dict)
self.assertDictEqual(json.loads(expected), actual) # type: ignore[arg-type]
self.assertDictEqual(json.loads(expected), actual)
if __name__ == '__main__':

View File

@ -1,37 +0,0 @@
import os
import unittest
import tempfile
from unittest import mock
from podman import api
class PathUtilsTestCase(unittest.TestCase):
def setUp(self):
self.xdg_runtime_dir = os.getenv('XDG_RUNTIME_DIR')
@mock.patch.dict(os.environ, clear=True)
def test_get_runtime_dir_env_var_set(self):
with tempfile.TemporaryDirectory() as tmpdir:
os.environ['XDG_RUNTIME_DIR'] = str(tmpdir)
self.assertEqual(str(tmpdir), api.path_utils.get_runtime_dir())
@mock.patch.dict(os.environ, clear=True)
def test_get_runtime_dir_env_var_not_set(self):
if not self.xdg_runtime_dir:
self.skipTest('XDG_RUNTIME_DIR must be set for this test.')
if self.xdg_runtime_dir.startswith('/run/user/'):
self.skipTest("XDG_RUNTIME_DIR in /run/user/, can't check")
self.assertNotEqual(self.xdg_runtime_dir, api.path_utils.get_runtime_dir())
@mock.patch('os.path.isdir', lambda d: False)
@mock.patch.dict(os.environ, clear=True)
def test_get_runtime_dir_env_var_not_set_and_no_run(self):
"""Fake that XDG_RUNTIME_DIR is not set and /run/user/ does not exist."""
if not self.xdg_runtime_dir:
self.skipTest('XDG_RUNTIME_DIR must be set to fetch a working dir.')
self.assertNotEqual(self.xdg_runtime_dir, api.path_utils.get_runtime_dir())
if __name__ == '__main__':
unittest.main()

View File

@ -149,7 +149,7 @@ class PodTestCase(unittest.TestCase):
def test_stop(self, mock):
adapter = mock.post(
tests.LIBPOD_URL
+ "/pods/c8b9f5b17dc1406194010c752fc6dcb330192032e27648db9b14060447ecf3b8/stop?t=70",
+ "/pods/c8b9f5b17dc1406194010c752fc6dcb330192032e27648db9b14060447ecf3b8/stop?t=70.0",
json={
"Errs": [],
"Id": "c8b9f5b17dc1406194010c752fc6dcb330192032e27648db9b14060447ecf3b8",
@ -157,7 +157,7 @@ class PodTestCase(unittest.TestCase):
)
pod = Pod(attrs=FIRST_POD, client=self.client.api)
pod.stop(timeout=70)
pod.stop(timeout=70.0)
self.assertTrue(adapter.called_once)
@requests_mock.Mocker()
@ -180,7 +180,8 @@ class PodTestCase(unittest.TestCase):
"Titles": ["UID", "PID", "PPID", "C", "STIME", "TTY", "TIME CMD"],
}
adapter = mock.get(
tests.LIBPOD_URL + "/pods"
tests.LIBPOD_URL
+ "/pods"
"/c8b9f5b17dc1406194010c752fc6dcb330192032e27648db9b14060447ecf3b8/top"
"?ps_args=aux&stream=False",
json=body,

View File

@ -5,16 +5,15 @@ from unittest import mock
from unittest.mock import MagicMock
import requests_mock
import xdg
from podman import PodmanClient, tests
from podman.api.path_utils import get_runtime_dir, get_xdg_config_home
class PodmanClientTestCase(unittest.TestCase):
"""Test the PodmanClient() object."""
opener = mock.mock_open(
read_data="""
opener = mock.mock_open(read_data="""
[containers]
log_size_max = -1
pids_limit = 2048
@ -33,8 +32,7 @@ class PodmanClientTestCase(unittest.TestCase):
identity = "/home/qe/.ssh/id_rsa"
[network]
"""
)
""")
def setUp(self) -> None:
super().setUp()
@ -59,7 +57,7 @@ class PodmanClientTestCase(unittest.TestCase):
"os": "linux",
}
}
adapter = mock.get(tests.LIBPOD_URL + "/info", json=body) # noqa: F841
adapter = mock.get(tests.LIBPOD_URL + "/info", json=body)
with PodmanClient(base_url=tests.BASE_SOCK) as client:
actual = client.info()
@ -88,7 +86,7 @@ class PodmanClientTestCase(unittest.TestCase):
)
# Build path to support tests running as root or a user
expected = Path(get_xdg_config_home()) / "containers" / "containers.conf"
expected = Path(xdg.BaseDirectory.xdg_config_home) / "containers" / "containers.conf"
PodmanClientTestCase.opener.assert_called_with(expected, encoding="utf-8")
def test_connect_404(self):
@ -100,12 +98,16 @@ class PodmanClientTestCase(unittest.TestCase):
with mock.patch.multiple(Path, open=self.mocked_open, exists=MagicMock(return_value=True)):
with PodmanClient() as client:
expected = "http+unix://" + urllib.parse.quote_plus(
str(Path(get_runtime_dir()) / "podman" / "podman.sock")
str(
Path(xdg.BaseDirectory.get_runtime_dir(strict=False))
/ "podman"
/ "podman.sock"
)
)
self.assertEqual(client.api.base_url.geturl(), expected)
# Build path to support tests running as root or a user
expected = Path(get_xdg_config_home()) / "containers" / "containers.conf"
expected = Path(xdg.BaseDirectory.xdg_config_home) / "containers" / "containers.conf"
PodmanClientTestCase.opener.assert_called_with(expected, encoding="utf-8")

View File

@ -1,7 +1,7 @@
import io
import json
import unittest
from collections.abc import Iterable
from typing import Iterable
import requests_mock
@ -149,7 +149,8 @@ class PodsManagerTestCase(unittest.TestCase):
"Titles": ["UID", "PID", "PPID", "C", "STIME", "TTY", "TIME CMD"],
}
mock.get(
tests.LIBPOD_URL + "/pods/stats"
tests.LIBPOD_URL
+ "/pods/stats"
"?namesOrIDs=c8b9f5b17dc1406194010c752fc6dcb330192032e27648db9b14060447ecf3b8",
json=body,
)
@ -179,7 +180,8 @@ class PodsManagerTestCase(unittest.TestCase):
"Titles": ["UID", "PID", "PPID", "C", "STIME", "TTY", "TIME CMD"],
}
mock.get(
tests.LIBPOD_URL + "/pods/stats"
tests.LIBPOD_URL
+ "/pods/stats"
"?namesOrIDs=c8b9f5b17dc1406194010c752fc6dcb330192032e27648db9b14060447ecf3b8",
json=body,
)
@ -192,34 +194,30 @@ class PodsManagerTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_top_with_streaming(self, mock):
stream = [
[
{
'CPU': '2.53%',
'MemUsage': '49.15kB / 16.71GB',
'MemUsageBytes': '48KiB / 15.57GiB',
'Mem': '0.00%',
'NetIO': '7.638kB / 430B',
'BlockIO': '-- / --',
'PIDS': '1',
'Pod': '1c948ab42339',
'CID': 'd999c49a7b6c',
'Name': '1c948ab42339-infra',
}
],
[
{
'CPU': '1.46%',
'MemUsage': '57.23B / 16.71GB',
'MemUsageBytes': '48KiB / 15.57GiB',
'Mem': '0.00%',
'NetIO': '7.638kB / 430B',
'BlockIO': '-- / --',
'PIDS': '1',
'Pod': '1c948ab42339',
'CID': 'd999c49a7b6c',
'Name': '1c948ab42339-infra',
}
],
[{
'CPU': '2.53%',
'MemUsage': '49.15kB / 16.71GB',
'MemUsageBytes': '48KiB / 15.57GiB',
'Mem': '0.00%',
'NetIO': '7.638kB / 430B',
'BlockIO': '-- / --',
'PIDS': '1',
'Pod': '1c948ab42339',
'CID': 'd999c49a7b6c',
'Name': '1c948ab42339-infra',
}],
[{
'CPU': '1.46%',
'MemUsage': '57.23B / 16.71GB',
'MemUsageBytes': '48KiB / 15.57GiB',
'Mem': '0.00%',
'NetIO': '7.638kB / 430B',
'BlockIO': '-- / --',
'PIDS': '1',
'Pod': '1c948ab42339',
'CID': 'd999c49a7b6c',
'Name': '1c948ab42339-infra',
}],
]
buffer = io.StringIO()

View File

@ -1,44 +0,0 @@
import unittest
from unittest.mock import patch, MagicMock
from podman.tests import utils
class TestPodmanVersion(unittest.TestCase):
@patch('podman.tests.utils.subprocess.Popen')
def test_podman_version(self, mock_popen):
mock_proc = MagicMock()
mock_proc.stdout.read.return_value = b'5.6.0'
mock_popen.return_value.__enter__.return_value = mock_proc
self.assertEqual(utils.podman_version(), (5, 6, 0))
@patch('podman.tests.utils.subprocess.Popen')
def test_podman_version_dev(self, mock_popen):
mock_proc = MagicMock()
mock_proc.stdout.read.return_value = b'5.6.0-dev'
mock_popen.return_value.__enter__.return_value = mock_proc
self.assertEqual(utils.podman_version(), (5, 6, 0))
@patch('podman.tests.utils.subprocess.Popen')
def test_podman_version_four_digits(self, mock_popen):
mock_proc = MagicMock()
mock_proc.stdout.read.return_value = b'5.6.0.1'
mock_popen.return_value.__enter__.return_value = mock_proc
self.assertEqual(utils.podman_version(), (5, 6, 0))
@patch('podman.tests.utils.subprocess.Popen')
def test_podman_version_release_candidate(self, mock_popen):
mock_proc = MagicMock()
mock_proc.stdout.read.return_value = b'5.6.0-rc1'
mock_popen.return_value.__enter__.return_value = mock_proc
self.assertEqual(utils.podman_version(), (5, 6, 0))
@patch('podman.tests.utils.subprocess.Popen')
def test_podman_version_none(self, mock_popen):
mock_proc = MagicMock()
mock_proc.stdout.read.return_value = b''
mock_popen.return_value.__enter__.return_value = mock_proc
with self.assertRaises(RuntimeError) as context:
utils.podman_version()
self.assertEqual(str(context.exception), "Unable to detect podman version. Got \"\"")

View File

@ -39,13 +39,6 @@ class VolumeTestCase(unittest.TestCase):
volume.remove(force=True)
self.assertTrue(adapter.called_once)
@requests_mock.Mocker()
def test_inspect(self, mock):
mock.get(tests.LIBPOD_URL + "/volumes/dbase/json?tlsVerify=False", json=FIRST_VOLUME)
vol_manager = VolumesManager(self.client.api)
actual = vol_manager.prepare_model(attrs=FIRST_VOLUME)
self.assertEqual(actual.inspect(tls_verify=False)["Mountpoint"], "/var/database")
if __name__ == '__main__':
unittest.main()

View File

@ -1,32 +0,0 @@
import pathlib
import csv
import re
import subprocess
try:
from platform import freedesktop_os_release
except ImportError:
def freedesktop_os_release() -> dict[str, str]:
"""This is a fallback for platforms that don't have the freedesktop_os_release function.
Python < 3.10
"""
path = pathlib.Path("/etc/os-release")
with open(path) as f:
reader = csv.reader(f, delimiter="=")
return dict(reader)
def podman_version() -> tuple[int, ...]:
cmd = ["podman", "info", "--format", "{{.Version.Version}}"]
with subprocess.Popen(cmd, stdout=subprocess.PIPE) as proc:
version = proc.stdout.read().decode("utf-8").strip()
match = re.match(r"(\d+\.\d+\.\d+)", version)
if not match:
raise RuntimeError(f"Unable to detect podman version. Got \"{version}\"")
version = match.group(1)
return tuple(int(x) for x in version.split("."))
OS_RELEASE = freedesktop_os_release()
PODMAN_VERSION = podman_version()

View File

@ -1,4 +1,4 @@
"""Version of PodmanPy."""
__version__ = "5.6.0"
__version__ = "4.8.1"
__compatible_version__ = "1.40"

View File

@ -1,164 +1,33 @@
[tool.black]
line-length = 100
skip-string-normalization = true
preview = true
target-version = ["py36"]
include = '\.pyi?$'
exclude = '''
/(
\.git
| \.tox
| \.venv
| \.history
| build
| dist
| docs
| hack
)/
'''
[tool.isort]
profile = "black"
line_length = 100
[build-system]
requires = ["setuptools>=46.4"]
# Any changes should be copied into requirements.txt, setup.cfg, and/or test-requirements.txt
requires = [
"setuptools>=46.4",
]
build-backend = "setuptools.build_meta"
[project]
name = "podman"
# TODO: remove the line version = ... on podman-py > 5.4.0 releases
# dynamic = ["version"]
version = "5.6.0"
description = "Bindings for Podman RESTful API"
readme = "README.md"
license = {file = "LICENSE"}
requires-python = ">=3.9"
authors = [
{ name = "Brent Baude" },
{ name = "Jhon Honce", email = "jhonce@redhat.com" },
{ name = "Urvashi Mohnani" },
{ name = "Nicola Sella", email = "nsella@redhat.com" },
]
keywords = [
"libpod",
"podman",
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Topic :: Software Development :: Libraries :: Python Modules",
]
# compatible releases
# ~= with version numbers
dependencies = [
"requests >=2.24",
"tomli>=1.2.3; python_version<'3.11'",
"urllib3",
]
[project.optional-dependencies]
progress_bar = [
"rich >= 12.5.1",
]
docs = [
"sphinx"
]
test = [
"coverage",
"fixtures",
"pytest",
"requests-mock",
"tox",
]
[project.urls]
"Bug Tracker" = "https://github.com/containers/podman-py/issues"
Homepage = "https://github.com/containers/podman-py"
"Libpod API" = "https://docs.podman.io/en/latest/_static/api.html"
[tool.pytest.ini_options]
log_cli = true
log_cli_level = "DEBUG"
log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)"
log_cli_date_format = "%Y-%m-%d %H:%M:%S"
testpaths = [
"podman/tests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["podman*"]
# TODO: remove the line version = ... on podman-py > 5.4.0 releases
# [tool.setuptools.dynamic]
# version = {attr = "podman.version.__version__"}
[tool.ruff]
line-length = 100
src = ["podman"]
# This is the section where Black is mostly replaced with Ruff
[tool.ruff.format]
exclude = [
".git",
".history",
".tox",
".venv",
"build",
"dist",
"docs",
"hack",
]
quote-style = "preserve"
[tool.ruff.lint]
select = [
# More stuff here https://docs.astral.sh/ruff/rules/
"F", # Pyflakes
"E", # Pycodestyle Error
"W", # Pycodestyle Warning
"N", # PEP8 Naming
"UP", # Pyupgrade
# TODO "ANN",
# TODO "S", # Bandit
"B", # Bugbear
"A", # flake-8-builtins
"YTT", # flake-8-2020
"PLC", # Pylint Convention
"PLE", # Pylint Error
"PLW", # Pylint Warning
]
# Some checks should be enabled for code sanity disabled now
# to avoid changing too many lines
ignore = [
"N818", # TODO Error Suffix in exception name
]
[tool.ruff.lint.flake8-builtins]
builtins-ignorelist = ["copyright", "all"]
[tool.ruff.lint.per-file-ignores]
"podman/tests/*.py" = ["S"]
[tool.mypy]
install_types = true
non_interactive = true
allow_redefinition = true
no_strict_optional = true
ignore_missing_imports = true
[[tool.mypy.overrides]]
module = [
"podman.api.adapter_utils",
"podman.api.client",
"podman.api.ssh",
"podman.api.tar_utils",
"podman.api.uds",
"podman.domain.config",
"podman.domain.containers",
"podman.domain.containers_create",
"podman.domain.containers_run",
"podman.domain.events",
"podman.domain.images_build",
"podman.domain.images_manager",
"podman.domain.manager",
"podman.domain.manifests",
"podman.domain.networks",
"podman.domain.networks_manager",
"podman.domain.pods",
"podman.domain.pods_manager",
"podman.domain.registry_data",
"podman.domain.secrets",
"podman.domain.volumes",
"podman.errors.exceptions"
]
ignore_errors = true
[tool.coverage.report]
exclude_also = [
"unittest.main()",
]

9
requirements.txt Normal file
View File

@ -0,0 +1,9 @@
# Any changes should be copied into pyproject.toml
pyxdg>=0.26
requests>=2.24
setuptools
sphinx
tomli>=1.2.3; python_version<'3.11'
urllib3
wheel
rich >= 12.5.1

View File

@ -81,8 +81,6 @@ export PBR_VERSION="0.0.0"
%pyproject_save_files %{pypi_name}
%endif
%check
%if %{defined rhel8_py}
%files -n python%{python3_pkgversion}-%{pypi_name}
%dir %{python3_sitelib}/%{pypi_name}-*-py%{python3_version}.egg-info
@ -90,11 +88,15 @@ export PBR_VERSION="0.0.0"
%dir %{python3_sitelib}/%{pypi_name}
%{python3_sitelib}/%{pypi_name}/*
%else
%pyproject_extras_subpkg -n python%{python3_pkgversion}-%{pypi_name} progress_bar
%files -n python%{python3_pkgversion}-%{pypi_name} -f %{pyproject_files}
%endif
%license LICENSE
%doc README.md
%changelog
%if %{defined autochangelog}
%autochangelog
%else
* Mon May 01 2023 RH Container Bot <rhcontainerbot@fedoraproject.org>
- Placeholder changelog for envs that are not autochangelog-ready
%endif

View File

@ -1,7 +1,7 @@
[metadata]
name = podman
version = 5.6.0
author = Brent Baude, Jhon Honce, Urvashi Mohnani, Nicola Sella
version = 4.8.1
author = Brent Baude, Jhon Honce
author_email = jhonce@redhat.com
description = Bindings for Podman RESTful API
long_description = file: README.md
@ -19,26 +19,25 @@ classifiers =
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.12
Programming Language :: Python :: 3.13
Topic :: Software Development :: Libraries :: Python Modules
keywords = podman, libpod
[options]
include_package_data = True
python_requires = >=3.9
python_requires = >=3.6
test_suite =
# Any changes should be copied into pyproject.toml
install_requires =
pyxdg >=0.26
requests >=2.24
tomli>=1.2.3; python_version<'3.11'
urllib3
[options.extras_require]
progress_bar =
rich >= 12.5.1
# typing_extensions are included for RHEL 8.5

View File

@ -9,7 +9,7 @@ excluded = [
]
class build_py(build_py_orig): # noqa: N801
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
modules = super().find_package_modules(package, package_dir)
return [

Some files were not shown because too many files have changed in this diff Show More