diff --git a/.codespellignore b/.codespellignore new file mode 100644 index 0000000..bf52b4c --- /dev/null +++ b/.codespellignore @@ -0,0 +1 @@ +assertIn diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 0000000..1fd4110 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,4 @@ +[codespell] +skip = .git,*.pdf,*.svg,requirements.txt,test-requirements.txt +# poped - loved variable name +ignore-words-list = poped diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..ba79221 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +parallel=True diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..79736ca --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +root = true + +[*] +indent_style = space +indent_size = tab +tab_width = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 + +[*.py] +indent_style = space + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f5e937b..43991ee 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -13,8 +13,6 @@ A clear and concise description of what the bug is. Please make sure it's not a bug in podman (in that case report it to podman) or your understanding of docker-compose or how rootless containers work (for example, it's normal for rootless container not to be able to listen for port less than 1024 like 80) -please try to reproduce the bug in latest devel branch - **To Reproduce** Steps to reproduce the behavior: 1. what is the content of the current working directory (ex. `docker-compose.yml`, `.env`, `Dockerfile`, ...etc.) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..cb5bf18 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,10 @@ + +## Contributor Checklist: + +If this PR adds a new feature that improves compatibility with docker-compose, please add a link +to the exact part of compose spec that the PR touches. + +For any user-visible change please add a release note to newsfragments directory, e.g. +newsfragments/my_feature.feature. See newsfragments/README.md for more details. + +All changes require additional unit tests. diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 0000000..13c0846 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,22 @@ +--- +name: Codespell + +on: + push: + pull_request: + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Codespell + uses: codespell-project/actions-codespell@v2 + with: + ignore_words_file: .codespellignore diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml deleted file mode 100644 index 957bd98..0000000 --- a/.github/workflows/pylint.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Pylint - -on: -- push -- pull_request - -jobs: - lint-black: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Install psf/black requirements - run: | - sudo apt-get update - sudo apt-get install -y python3 python3-venv - - uses: psf/black@stable - with: - options: "--check --verbose" - version: "~= 23.3" - - lint-pylint: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install pylint - - name: Analysing the code with pylint - run: | - python -m compileall podman_compose.py - pylint podman_compose.py - # pylint $(git ls-files '*.py') diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml deleted file mode 100644 index d962053..0000000 --- a/.github/workflows/pytest.yml +++ /dev/null @@ -1,36 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: PyTest - -on: - push: - branches: [ devel ] - pull_request: - branches: [ devel ] - -jobs: - test: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.10 - uses: actions/setup-python@v4 - with: - python-version: "3.10" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - python -m pytest ./pytests - diff --git a/.github/workflows/static-checks.yml b/.github/workflows/static-checks.yml new file mode 100644 index 0000000..b0c13d2 --- /dev/null +++ b/.github/workflows/static-checks.yml @@ -0,0 +1,25 @@ +name: Static checks + +on: +- push +- pull_request + +jobs: + static-checks: + runs-on: ubuntu-latest + container: + image: docker.io/library/python:3.11-bookworm + # cgroupns needed to address the following error: + # write /sys/fs/cgroup/cgroup.subtree_control: operation not supported + options: --privileged --cgroupns=host + steps: + - uses: actions/checkout@v4 + - name: Analysing the code with ruff + run: | + set -e + pip install -r test-requirements.txt + ruff format --check + ruff check + - name: Analysing the code with pylint + run: | + pylint podman_compose.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..a7ad716 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,40 @@ +name: Tests + +on: + push: + pull_request: + +jobs: + test: + strategy: + fail-fast: false + matrix: + python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12' ] + + runs-on: ubuntu-latest + container: + image: "docker.io/library/python:${{ matrix.python-version }}-bookworm" + # cgroupns needed to address the following error: + # write /sys/fs/cgroup/cgroup.subtree_control: operation not supported + options: --privileged --cgroupns=host + steps: + - uses: actions/checkout@v4 + - name: Install dependencies + run: | + set -e + apt update && apt install -y podman + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install -r test-requirements.txt + - name: Run integration tests + run: | + python -m unittest discover -v tests/integration + env: + TESTS_DEBUG: 1 + - name: Run unit tests + run: | + coverage run --source podman_compose -m unittest discover tests/unit + - name: Report coverage + run: | + coverage combine + coverage report --format=markdown | tee -a $GITHUB_STEP_SUMMARY diff --git a/.gitignore b/.gitignore index 6621793..a0d9bb8 100644 --- a/.gitignore +++ b/.gitignore @@ -47,6 +47,8 @@ coverage.xml *.cover .hypothesis/ .pytest_cache/ +test-compose.yaml +test-compose-?.yaml # Translations *.mo @@ -103,3 +105,6 @@ venv.bak/ # mypy .mypy_cache/ + + +.vscode diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6787a11..5732e35 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,3 +30,7 @@ repos: "-sn", # Don't display the score "--rcfile=.pylintrc", # Link to your config file ] + - repo: https://github.com/codespell-project/codespell + rev: v2.2.5 + hooks: + - id: codespell diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5cd371b..a960f44 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,114 +1,135 @@ # Contributing to podman-compose -## Who can contribute? +## Who can contribute? -- Users that found a bug -- Users that wants to propose new functionalities or enhancements -- Users that want to help other users to troubleshoot their environments -- Developers that want to fix bugs -- Developers that want to implement new functionalities or enhancements - -## Branches - -Please request your PR to be merged into the `devel` branch. -Changes to the `stable` branch are managed by the repository maintainers. +- Users that found a bug, +- Users that want to propose new functionalities or enhancements, +- Users that want to help other users to troubleshoot their environments, +- Developers that want to fix bugs, +- Developers that want to implement new functionalities or enhancements. ## Development environment setup Note: Some steps are OPTIONAL but all are RECOMMENDED. -1. Fork the project repo and clone it -```shell -$ git clone https://github.com/USERNAME/podman-compose.git -$ cd podman-compose -``` -1. (OPTIONAL) Create a python virtual environment. Example using [virtualenv wrapper](https://virtualenvwrapper.readthedocs.io/en/latest/): -```shell -mkvirtualenv podman-compose -``` -2. Install the project runtime and development requirements -```shell -$ pip install '.[devel]' -``` -3. (OPTIONAL) Install `pre-commit` git hook scripts (https://pre-commit.com/#3-install-the-git-hook-scripts) -```shell -$ pre-commit install -``` -4. Create a new branch, develop and add tests when possible -5. Run linting & testing before commiting code. Ensure all the hooks are passing. -```shell -$ pre-commit run --all-files -``` -6. Commit your code to your fork's branch. - - Make sure you include a `Signed-off-by` message in your commits. Read [this guide](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) to learn how to sign your commits - - In the commit message reference the Issue ID that your code fixes and a brief description of the changes. Example: `Fixes #516: allow empty network` -7. Open a PR to `containers/podman-compose:devel` and wait for a maintainer to review your work. +1. Fork the project repository and clone it: + + ```shell + $ git clone https://github.com/USERNAME/podman-compose.git + $ cd podman-compose + ``` + +2. (OPTIONAL) Create a Python virtual environment. Example using + [virtualenv wrapper](https://virtualenvwrapper.readthedocs.io/en/latest/): + + ```shell + $ mkvirtualenv podman-compose + ``` + +3. Install the project runtime and development requirements: + + ```shell + $ pip install '.[devel]' + ``` + +4. (OPTIONAL) Install `pre-commit` git hook scripts + (https://pre-commit.com/#3-install-the-git-hook-scripts): + + ```shell + $ pre-commit install + ``` + +5. Create a new branch, develop and add tests when possible. +6. Run linting and testing before committing code. Ensure all the hooks are passing. + + ```shell + $ pre-commit run --all-files + ``` + +7. Run code coverage: + + ```shell + $ coverage run --source podman_compose -m unittest discover tests/unit + $ python3 -m unittest discover tests/integration + $ coverage combine + $ coverage report + $ coverage html + ``` + +8. Commit your code to your fork's branch. + - Make sure you include a `Signed-off-by` message in your commits. + Read [this guide](https://github.com/containers/common/blob/main/CONTRIBUTING.md#sign-your-prs) + to learn how to sign your commits. + - In the commit message, reference the Issue ID that your code fixes and a brief description of + the changes. + Example: `Fixes #516: Allow empty network` +9. Open a pull request to `containers/podman-compose` and wait for a maintainer to review your work. ## Adding new commands -To add a command you need to add a function that is decorated -with `@cmd_run` passing the compose instance, command name and -description. the wrapped function should accept two arguments -the compose instance and the command-specific arguments (resulted -from python's `argparse` package) inside that command you can -run PodMan like this `compose.podman.run(['inspect', 'something'])` -and inside that function you can access `compose.pods` -and `compose.containers` ...etc. -Here is an example +To add a command, you need to add a function that is decorated with `@cmd_run`. -``` +The decorated function must be declared `async` and should accept two arguments: The compose +instance and the command-specific arguments (resulted from the Python's `argparse` package). + +In this function, you can run Podman (e.g. `await compose.podman.run(['inspect', 'something'])`), +access `compose.pods`, `compose.containers` etc. + +Here is an example: + +```python @cmd_run(podman_compose, 'build', 'build images defined in the stack') -def compose_build(compose, args): - compose.podman.run(['build', 'something']) +async def compose_build(compose, args): + await compose.podman.run(['build', 'something']) ``` ## Command arguments parsing -Add a function that accept `parser` which is an instance from `argparse`. -In side that function you can call `parser.add_argument()`. -The function decorated with `@cmd_parse` accepting the compose instance, -and command names (as a list or as a string). -You can do this multiple times. +To add arguments to be parsed by a command, you need to add a function that is decorated with +`@cmd_parse` which accepts the compose instance and the command's name (as a string list or as a +single string). -Here is an example +The decorated function should accept a single argument: An instance of `argparse`. -``` +In this function, you can call `parser.add_argument()` to add a new argument to the command. + +Note you can add such a function multiple times. + +Here is an example: + +```python @cmd_parse(podman_compose, 'build') def compose_build_parse(parser): parser.add_argument("--pull", help="attempt to pull a newer version of the image", action='store_true') parser.add_argument("--pull-always", - help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action='store_true') + help="Attempt to pull a newer version of the image, " + "raise an error even if the image is present locally.", + action='store_true') ``` -NOTE: `@cmd_parse` should be after `@cmd_run` +NOTE: `@cmd_parse` should be after `@cmd_run`. -## Calling a command from inside another +## Calling a command from another one -If you need to call `podman-compose down` from inside `podman-compose up` -do something like: +If you need to call `podman-compose down` from `podman-compose up`, do something like: -``` +```python @cmd_run(podman_compose, 'up', 'up desc') -def compose_up(compose, args): - compose.commands['down'](compose, args) +async def compose_up(compose, args): + await compose.commands['down'](compose, args) # or - compose.commands['down'](argparse.Namespace(foo=123)) + await compose.commands['down'](argparse.Namespace(foo=123)) ``` - ## Missing Commands (help needed) + ``` bundle Generate a Docker bundle from the Compose file - config Validate and view the Compose file create Create services events Receive real time events from containers images List images - logs View output from containers - port Print the public port for a port binding - ps List containers rm Remove stopped containers - run Run a one-off command scale Set number of containers for a service top Display the running processes ``` diff --git a/README.md b/README.md index f7e67a6..7d075ea 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ # Podman Compose -## [![Pylint Test: ](https://github.com/containers/podman-compose/actions/workflows/pylint.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/pylint.yml) [![Unit tests PyTest](https://github.com/containers/podman-compose/actions/workflows/pytest.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/pytest.yml) - +## [![Tests](https://github.com/containers/podman-compose/actions/workflows/test.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/test.yml) An implementation of [Compose Spec](https://compose-spec.io/) with [Podman](https://podman.io/) backend. This project focuses on: @@ -11,7 +10,11 @@ This project focuses on: This project only depends on: * `podman` -* [podman dnsname plugin](https://github.com/containers/dnsname): It is usually found in the `podman-plugins` or `podman-dnsname` distro packages, those packages are not pulled by default and you need to install them. This allows containers to be able to resolve each other if they are on the same CNI network. +* [podman dnsname plugin](https://github.com/containers/dnsname): It is usually found in + the `podman-plugins` or `podman-dnsname` distro packages, those packages are not pulled + by default and you need to install them. This allows containers to be able to resolve + each other if they are on the same CNI network. This is not necessary when podman is using + netavark as a network backend. * Python3 * [PyYAML](https://pyyaml.org/) * [python-dotenv](https://pypi.org/project/python-dotenv/) @@ -49,9 +52,11 @@ like `hostnet`. If you desire that behavior, pass it the standard way like `netw ## Installation +### Pip + Install the latest stable version from PyPI: -``` +```bash pip3 install podman-compose ``` @@ -59,14 +64,33 @@ pass `--user` to install inside regular user home without being root. Or latest development version from GitHub: -``` -pip3 install https://github.com/containers/podman-compose/archive/devel.tar.gz +```bash +pip3 install https://github.com/containers/podman-compose/archive/main.tar.gz ``` +### Homebrew + +```bash +brew install podman-compose +``` + +### Manual + +```bash +curl -o /usr/local/bin/podman-compose https://raw.githubusercontent.com/containers/podman-compose/main/podman_compose.py +chmod +x /usr/local/bin/podman-compose +``` + +or inside your home + +```bash +curl -o ~/.local/bin/podman-compose https://raw.githubusercontent.com/containers/podman-compose/main/podman_compose.py +chmod +x ~/.local/bin/podman-compose +``` or install from Fedora (starting from f31) repositories: -``` +```bash sudo dnf install podman-compose ``` @@ -75,10 +99,9 @@ sudo dnf install podman-compose We have included fully functional sample stacks inside `examples/` directory. You can get more examples from [awesome-compose](https://github.com/docker/awesome-compose). - A quick example would be -``` +```bash cd examples/busybox podman-compose --help podman-compose up --help @@ -103,11 +126,11 @@ There is also AWX 17.1.0 Inside `tests/` directory we have many useless docker-compose stacks that are meant to test as many cases as we can to make sure we are compatible -### Unit tests with pytest -run a pytest with following command +### Unit tests with unittest +run a unittest with following command ```shell -python -m pytest pytests +python3 -m unittest discover tests/unit ``` # Contributing guide diff --git a/docs/Changelog-1.1.0.md b/docs/Changelog-1.1.0.md new file mode 100644 index 0000000..8791b10 --- /dev/null +++ b/docs/Changelog-1.1.0.md @@ -0,0 +1,33 @@ +Version v1.1.0 (2024-04-17) +=========================== + +Bug fixes +--------- + +- Fixed support for values with equals sign in `-e` argument of `run` and `exec` commands. +- Fixed duplicate arguments being emitted in `stop` and `restart` commands. +- Removed extraneous debug output. `--verbose` flag has been added to preserve verbose output. +- Links aliases are now added to service aliases. +- Fixed image build process to use defined environmental variables. +- Empty list is now allowed to be `COMMAND` and `ENTRYPOINT`. +- Environment files are now resolved relative to current working directory. +- Exit code of container build is now preserved as return code of `build` command. + +New features +------------ + +- Added support for `uidmap`, `gidmap`, `http_proxy` and `runtime` service configuration keys. +- Added support for `enable_ipv6` network configuration key. +- Added `--parallel` option to support parallel pulling and building of images. +- Implemented support for maps in `sysctls` container configuration key. +- Implemented `stats` command. +- Added `--no-normalize` flag to `config` command. +- Added support for `include` global configuration key. +- Added support for `build` command. +- Added support to start containers with multiple networks. +- Added support for `profile` argument. +- Added support for starting podman in existing network namespace. +- Added IPAM driver support. +- Added support for file secrets being passed to `podman build` via `--secret` argument. +- Added support for multiple networks with separately specified IP and MAC address. +- Added support for `service.build.ulimits` when building image. diff --git a/docs/Changelog-1.2.0.md b/docs/Changelog-1.2.0.md new file mode 100644 index 0000000..95d32c1 --- /dev/null +++ b/docs/Changelog-1.2.0.md @@ -0,0 +1,40 @@ +Version v1.2.0 (2024-06-26) +=========================== + +Bug fixes +--------- + +- Fixed handling of `--in-pod` argument. Previously it was hard to provide false value to it. +- podman-compose no longer creates pods when registering systemd unit. +- Fixed warning `RuntimeWarning: coroutine 'create_pods' was never awaited` +- Fixed error when setting up IPAM network with default driver. +- Fixed support for having list and dictionary `depends_on` sections in related compose files. +- Fixed logging of failed build message. +- Fixed support for multiple entries in `include` section. +- Fixed environment variable precedence order. + +Changes +------- + +- `x-podman` dictionary in container root has been migrated to `x-podman.*` fields in container root. + +New features +------------ + +- Added support for `--publish` in `podman-compose run`. +- Added support for Podman external root filesystem management (`--rootfs` option). +- Added support for `podman-compose images` command. +- Added support for `env_file` being configured via dictionaries. +- Added support for enabling GPU access. +- Added support for selinux in verbose mount specification. +- Added support for `additional_contexts` section. +- Added support for multi-line environment files. +- Added support for passing contents of `podman-compose.yml` via stdin. +- Added support for specifying the value for `--in-pod` setting in `podman-compose.yml` file. +- Added support for environmental secrets. + +Documentation +------------- + +- Added instructions on how to install podman-compose on Homebrew. +- Added explanation that netavark is an alternative to dnsname plugin diff --git a/docs/Extensions.md b/docs/Extensions.md new file mode 100644 index 0000000..d99467a --- /dev/null +++ b/docs/Extensions.md @@ -0,0 +1,128 @@ +# Podman specific extensions to the docker-compose format + +Podman-compose supports the following extension to the docker-compose format. These extensions +are generally specified under fields with "x-podman" prefix in the compose file. + +## Container management + +The following extension keys are available under container configuration: + +* `x-podman.uidmaps` - Run the container in a new user namespace using the supplied UID mapping. + +* `x-podman.gidmaps` - Run the container in a new user namespace using the supplied GID mapping. + +* `x-podman.rootfs` - Run the container without requiring any image management; the rootfs of the +container is assumed to be managed externally. + +For example, the following docker-compose.yml allows running a podman container with externally managed rootfs. +```yml +version: "3" +services: + my_service: + command: ["/bin/busybox"] + x-podman.rootfs: "/path/to/rootfs" +``` + +For explanations of these extensions, please refer to the [Podman Documentation](https://docs.podman.io/). + + +## Per-network MAC-addresses + +Generic docker-compose files support specification of the MAC address on the container level. If the +container has multiple network interfaces, the specified MAC address is applied to the first +specified network. + +Podman-compose in addition supports the specification of MAC addresses on a per-network basis. This +is done by adding a `x-podman.mac_address` key to the network configuration in the container. The +value of the `x-podman.mac_address` key is the MAC address to be used for the network interface. + +Specifying a MAC address for the container and for individual networks at the same time is not +supported. + +Example: + +```yaml +--- +version: "3" + +networks: + net0: + driver: "bridge" + ipam: + config: + - subnet: "192.168.0.0/24" + net1: + driver: "bridge" + ipam: + config: + - subnet: "192.168.1.0/24" + +services: + webserver + image: "busybox" + command: ["/bin/busybox", "httpd", "-f", "-h", "/etc", "-p", "8001"] + networks: + net0: + ipv4_address: "192.168.0.10" + x-podman.mac_address: "02:aa:aa:aa:aa:aa" + net1: + ipv4_address: "192.168.1.10" + x-podman.mac_address: "02:bb:bb:bb:bb:bb" +``` + +## Podman-specific network modes + +Generic docker-compose supports the following values for `network-mode` for a container: + +- `bridge` +- `host` +- `none` +- `service` +- `container` + +In addition, podman-compose supports the following podman-specific values for `network-mode`: + +- `slirp4netns[:,...]` +- `ns:` +- `pasta[:,...]` +- `private` + +The options to the network modes are passed to the `--network` option of the `podman create` command +as-is. + + +## Compatibility of default network names between docker-compose and podman-compose + +Current versions of podman-compose may produce different default external network names than +docker-compose under certain conditions. Specifically, docker-compose removes dashes (`-` character) +from project name. + +To enable compatibility between docker-compose and podman-compose, specify +`default_net_name_compat: true` under global `x-podman` key: + +``` +x-podman: + default_net_name_compat: true +``` + +By default `default_net_name_compat` is `false`. This will change to `true` at some point and the +setting will be removed. + +## Custom pods management + +Podman-compose can have containers in pods. This can be controlled by extension key x-podman in_pod. +It allows providing custom value for --in-pod and is especially relevant when --userns has to be set. + +For example, the following docker-compose.yml allows using userns_mode by overriding the default +value of --in-pod (unless it was specifically provided by "--in-pod=True" in command line interface). +```yml +version: "3" +services: + cont: + image: nopush/podman-compose-test + userns_mode: keep-id:uid=1000 + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-p", "8080"] + +x-podman: + in_pod: false +``` diff --git a/examples/hello-python/app/web.py b/examples/hello-python/app/web.py index 3008e69..6173ab7 100644 --- a/examples/hello-python/app/web.py +++ b/examples/hello-python/app/web.py @@ -1,7 +1,7 @@ # pylint: disable=import-error # pylint: disable=unused-import -import os import asyncio # noqa: F401 +import os import aioredis from aiohttp import web diff --git a/examples/nvidia-smi/docker-compose.yaml b/examples/nvidia-smi/docker-compose.yaml new file mode 100644 index 0000000..26c411f --- /dev/null +++ b/examples/nvidia-smi/docker-compose.yaml @@ -0,0 +1,11 @@ +services: + test: + image: nvidia/cuda:12.3.1-base-ubuntu20.04 + command: nvidia-smi + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] diff --git a/newsfragments/978.bugfix b/newsfragments/978.bugfix new file mode 100644 index 0000000..6b155a3 --- /dev/null +++ b/newsfragments/978.bugfix @@ -0,0 +1 @@ +Fixed support for de-facto alternative `Dockerfile` names (e.g. `Containerfile`) diff --git a/newsfragments/README.txt b/newsfragments/README.txt new file mode 100644 index 0000000..69f1e34 --- /dev/null +++ b/newsfragments/README.txt @@ -0,0 +1,13 @@ +This is the directory for news fragments used by towncrier: https://github.com/hawkowl/towncrier + +You create a news fragment in this directory when you make a change, and the file gets removed from +this directory when the news is published. + +towncrier has a few standard types of news fragments, signified by the file extension. These are: + +.feature: Signifying a new feature. +.bugfix: Signifying a bug fix. +.doc: Signifying a documentation improvement. +.removal: Signifying a deprecation or removal of public API. +.change: Signifying a change of behavior +.misc: Miscellaneous change diff --git a/newsfragments/build-labels.feature b/newsfragments/build-labels.feature new file mode 100644 index 0000000..a0c3afe --- /dev/null +++ b/newsfragments/build-labels.feature @@ -0,0 +1 @@ +Added support for build labels. diff --git a/newsfragments/check-if-pod-exists.bugfix b/newsfragments/check-if-pod-exists.bugfix new file mode 100644 index 0000000..8c4a472 --- /dev/null +++ b/newsfragments/check-if-pod-exists.bugfix @@ -0,0 +1 @@ +Fixed a bug that caused attempts to create already existing pods multiple times. diff --git a/newsfragments/default_net_name_compat.feature b/newsfragments/default_net_name_compat.feature new file mode 100644 index 0000000..d64d527 --- /dev/null +++ b/newsfragments/default_net_name_compat.feature @@ -0,0 +1 @@ +Added a way to get compatibility of default network names with docker compose. This is selected by setting `default_net_name_compat: true` on `x-podman` global dictionary. diff --git a/newsfragments/dont-resolve-links-to-compose-file.bugfix b/newsfragments/dont-resolve-links-to-compose-file.bugfix new file mode 100644 index 0000000..6f36b83 --- /dev/null +++ b/newsfragments/dont-resolve-links-to-compose-file.bugfix @@ -0,0 +1 @@ +Fix compatibility with docker-compose in how symlinks to docker-compose.yml are handled. diff --git a/newsfragments/network-mode-none.bugfix b/newsfragments/network-mode-none.bugfix new file mode 100644 index 0000000..e1a7266 --- /dev/null +++ b/newsfragments/network-mode-none.bugfix @@ -0,0 +1 @@ +Fix support for `network_mode: none`. diff --git a/podman_compose.py b/podman_compose.py index 423e2a2..df337b8 100755 --- a/podman_compose.py +++ b/podman_compose.py @@ -1,31 +1,28 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- - +# SPDX-License-Identifier: GPL-2.0 # https://docs.docker.com/compose/compose-file/#service-configuration-reference # https://docs.docker.com/samples/ # https://docs.docker.com/compose/gettingstarted/ # https://docs.docker.com/compose/django/ # https://docs.docker.com/compose/wordpress/ - # TODO: podman pod logs --color -n -f pod_testlogs - - -import sys -import os -import getpass import argparse -import itertools -import subprocess -import time -import re -import hashlib -import random -import json +import asyncio.subprocess +import getpass import glob - -from threading import Thread - +import hashlib +import itertools +import json +import logging +import os +import random +import re import shlex +import signal +import subprocess +import sys +from asyncio import Task try: from shlex import quote as cmd_quote @@ -38,7 +35,7 @@ except ImportError: import yaml from dotenv import dotenv_values -__version__ = "1.0.7" +__version__ = "1.2.0" script = os.path.realpath(sys.argv[0]) @@ -54,11 +51,7 @@ def is_dict(dict_object): def is_list(list_object): - return ( - not is_str(list_object) - and not is_dict(list_object) - and hasattr(list_object, "__iter__") - ) + return not is_str(list_object) and not is_dict(list_object) and hasattr(list_object, "__iter__") # identity filter @@ -86,10 +79,7 @@ def try_float(i, fallback=None): return fallback -def log(*msgs, sep=" ", end="\n"): - line = (sep.join([str(msg) for msg in msgs])) + end - sys.stderr.write(line) - sys.stderr.flush() +log = logging.getLogger(__name__) dir_re = re.compile(r"^[~/\.]") @@ -127,7 +117,8 @@ def str_to_seconds(txt): mins = int(mins) if mins else 0 sec = float(sec) if sec else 0 # "podman stop" takes only int - # Error: invalid argument "3.0" for "-t, --time" flag: strconv.ParseUint: parsing "3.0": invalid syntax + # Error: invalid argument "3.0" for "-t, --time" flag: strconv.ParseUint: parsing "3.0": + # invalid syntax return int(mins * 60.0 + sec) @@ -168,9 +159,8 @@ def parse_short_mount(mount_str, basedir): # User-relative path # - ~/configs:/etc/configs/:ro mount_type = "bind" - mount_src = os.path.abspath( - os.path.join(basedir, os.path.expanduser(mount_src)) - ) + if os.name != 'nt' or (os.name == 'nt' and ".sock" not in mount_src): + mount_src = os.path.abspath(os.path.join(basedir, os.path.expanduser(mount_src))) else: # Named volume # - datavolume:/var/lib/mysql @@ -223,13 +213,11 @@ def fix_mount_dict(compose, mount_dict, proj_name, srv_name): # handle anonymous or implied volume if not source: # missing source - vol["name"] = "_".join( - [ - proj_name, - srv_name, - hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(), - ] - ) + vol["name"] = "_".join([ + proj_name, + srv_name, + hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(), + ]) elif not name: external = vol.get("external", None) if isinstance(external, dict): @@ -348,6 +336,16 @@ def norm_ulimit(inner_value): return inner_value +def default_network_name_for_project(compose, proj_name, net, is_ext): + if is_ext: + return net + + default_net_name_compat = compose.x_podman.get("default_net_name_compat", False) + if default_net_name_compat is True: + return f"{proj_name.replace('-', '')}_{net}" + return f"{proj_name}_{net}" + + # def tr_identity(project_name, given_containers): # pod_name = f'pod_{project_name}' # pod = dict(name=pod_name) @@ -358,7 +356,7 @@ def norm_ulimit(inner_value): def transform(args, project_name, given_containers): - if not args.in_pod: + if not args.in_pod_bool: pod_name = None pods = [] else: @@ -371,7 +369,7 @@ def transform(args, project_name, given_containers): return pods, containers -def assert_volume(compose, mount_dict): +async def assert_volume(compose, mount_dict): """ inspect volume to get directory create volume if needed @@ -380,9 +378,7 @@ def assert_volume(compose, mount_dict): if mount_dict["type"] == "bind": basedir = os.path.realpath(compose.dirname) mount_src = mount_dict["source"] - mount_src = os.path.realpath( - os.path.join(basedir, os.path.expanduser(mount_src)) - ) + mount_src = os.path.realpath(os.path.join(basedir, os.path.expanduser(mount_src))) if not os.path.exists(mount_src): try: os.makedirs(mount_src, exist_ok=True) @@ -394,11 +390,12 @@ def assert_volume(compose, mount_dict): proj_name = compose.project_name vol_name = vol["name"] is_ext = vol.get("external", None) - log(f"podman volume inspect {vol_name} || podman volume create {vol_name}") + log.debug("podman volume inspect %s || podman volume create %s", vol_name, vol_name) # TODO: might move to using "volume list" - # podman volume list --format '{{.Name}}\t{{.MountPoint}}' -f 'label=io.podman.compose.project=HERE' + # podman volume list --format '{{.Name}}\t{{.MountPoint}}' \ + # -f 'label=io.podman.compose.project=HERE' try: - _ = compose.podman.output([], "volume", ["inspect", vol_name]).decode("utf-8") + _ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode("utf-8") except subprocess.CalledProcessError as e: if is_ext: raise RuntimeError(f"External volume [{vol_name}] does not exists") from e @@ -419,13 +416,11 @@ def assert_volume(compose, mount_dict): for opt, value in driver_opts.items(): args.extend(["--opt", f"{opt}={value}"]) args.append(vol_name) - compose.podman.output([], "volume", args) - _ = compose.podman.output([], "volume", ["inspect", vol_name]).decode("utf-8") + await compose.podman.output([], "volume", args) + _ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode("utf-8") -def mount_desc_to_mount_args( - compose, mount_desc, srv_name, cnt_name -): # pylint: disable=unused-argument +def mount_desc_to_mount_args(compose, mount_desc, srv_name, cnt_name): # pylint: disable=unused-argument mount_type = mount_desc.get("type", None) vol = mount_desc.get("_vol", None) if mount_type == "volume" else None source = vol["name"] if vol else mount_desc.get("source", None) @@ -446,6 +441,11 @@ def mount_desc_to_mount_args( tmpfs_mode = tmpfs_opts.get("mode", None) if tmpfs_mode: opts.append(f"tmpfs-mode={tmpfs_mode}") + if mount_type == "bind": + bind_opts = mount_desc.get("bind", {}) + selinux = bind_opts.get("selinux", None) + if selinux is not None: + opts.append(selinux) opts = ",".join(opts) if mount_type == "bind": return f"type=bind,source={source},destination={target},{opts}".rstrip(",") @@ -456,8 +456,7 @@ def mount_desc_to_mount_args( raise ValueError("unknown mount type:" + mount_type) -def container_to_ulimit_args(cnt, podman_args): - ulimit = cnt.get("ulimits", []) +def ulimit_to_ulimit_args(ulimit, podman_args): if ulimit is not None: # ulimit can be a single value, i.e. ulimit: host if is_str(ulimit): @@ -473,9 +472,18 @@ def container_to_ulimit_args(cnt, podman_args): podman_args.extend(["--ulimit", i]) -def mount_desc_to_volume_args( - compose, mount_desc, srv_name, cnt_name -): # pylint: disable=unused-argument +def container_to_ulimit_args(cnt, podman_args): + ulimit_to_ulimit_args(cnt.get("ulimits", []), podman_args) + + +def container_to_ulimit_build_args(cnt, podman_args): + build = cnt.get("build", None) + + if build is not None: + ulimit_to_ulimit_args(build.get("ulimits", []), podman_args) + + +def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name): # pylint: disable=unused-argument mount_type = mount_desc["type"] if mount_type not in ("bind", "volume"): raise ValueError("unknown mount type:" + mount_type) @@ -486,13 +494,9 @@ def mount_desc_to_volume_args( target = mount_desc["target"] opts = [] - propagations = set( - filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(",")) - ) + propagations = set(filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(","))) if mount_type != "bind": - propagations.update( - filteri(mount_desc.get("bind", {}).get("propagation", "").split(",")) - ) + propagations.update(filteri(mount_desc.get("bind", {}).get("propagation", "").split(","))) opts.extend(propagations) # --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]] # [rw|ro] @@ -507,6 +511,12 @@ def mount_desc_to_volume_args( read_only = mount_desc.get("read_only", None) if read_only is not None: opts.append("ro" if read_only else "rw") + if mount_type == "bind": + bind_opts = mount_desc.get("bind", {}) + selinux = bind_opts.get("selinux", None) + if selinux is not None: + opts.append(selinux) + args = f"{source}:{target}" if opts: args += ":" + ",".join(opts) @@ -522,12 +532,12 @@ def get_mnt_dict(compose, cnt, volume): return fix_mount_dict(compose, volume, proj_name, srv_name) -def get_mount_args(compose, cnt, volume): +async def get_mount_args(compose, cnt, volume): volume = get_mnt_dict(compose, cnt, volume) # proj_name = compose.project_name srv_name = cnt["_service"] mount_type = volume["type"] - assert_volume(compose, volume) + await assert_volume(compose, volume) if compose.prefer_volume_over_mount: if mount_type == "tmpfs": # TODO: --tmpfs /tmp:rw,size=787448k,mode=1777 @@ -549,41 +559,61 @@ def get_mount_args(compose, cnt, volume): return ["--mount", args] -def get_secret_args(compose, cnt, secret): +def get_secret_args(compose, cnt, secret, podman_is_building=False): + """ + podman_is_building: True if we are preparing arguments for an invocation of "podman build" + False if we are preparing for something else like "podman run" + """ secret_name = secret if is_str(secret) else secret.get("source", None) if not secret_name or secret_name not in compose.declared_secrets.keys(): - raise ValueError( - f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}' - ) + raise ValueError(f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}') declared_secret = compose.declared_secrets[secret_name] source_file = declared_secret.get("file", None) dest_file = "" secret_opts = "" - target = None if is_str(secret) else secret.get("target", None) - uid = None if is_str(secret) else secret.get("uid", None) - gid = None if is_str(secret) else secret.get("gid", None) - mode = None if is_str(secret) else secret.get("mode", None) + secret_target = None if is_str(secret) else secret.get("target", None) + secret_uid = None if is_str(secret) else secret.get("uid", None) + secret_gid = None if is_str(secret) else secret.get("gid", None) + secret_mode = None if is_str(secret) else secret.get("mode", None) + secret_type = None if is_str(secret) else secret.get("type", None) if source_file: - if not target: - dest_file = f"/run/secrets/{secret_name}" - elif not target.startswith("/"): - sec = target if target else secret_name - dest_file = f"/run/secrets/{sec}" - else: - dest_file = target + # assemble path for source file first, because we need it for all cases basedir = compose.dirname - source_file = os.path.realpath( - os.path.join(basedir, os.path.expanduser(source_file)) - ) - volume_ref = ["--volume", f"{source_file}:{dest_file}:ro,rprivate,rbind"] - if uid or gid or mode: - sec = target if target else secret_name - log( - f'WARNING: Service {cnt["_service"]} uses secret "{sec}" with uid, gid, or mode.' - + " These fields are not supported by this implementation of the Compose file" + source_file = os.path.realpath(os.path.join(basedir, os.path.expanduser(source_file))) + + if podman_is_building: + # pass file secrets to "podman build" with param --secret + if not secret_target: + secret_id = secret_name + elif "/" in secret_target: + raise ValueError( + f'ERROR: Build secret "{secret_name}" has invalid target "{secret_target}". ' + + "(Expected plain filename without directory as target.)" + ) + else: + secret_id = secret_target + volume_ref = ["--secret", f"id={secret_id},src={source_file}"] + else: + # pass file secrets to "podman run" as volumes + if not secret_target: + dest_file = "/run/secrets/{}".format(secret_name) + elif not secret_target.startswith("/"): + sec = secret_target if secret_target else secret_name + dest_file = f"/run/secrets/{sec}" + else: + dest_file = secret_target + volume_ref = ["--volume", f"{source_file}:{dest_file}:ro,rprivate,rbind"] + + if secret_uid or secret_gid or secret_mode: + sec = secret_target if secret_target else secret_name + log.warning( + "WARNING: Service %s uses secret %s with uid, gid, or mode." + + " These fields are not supported by this implementation of the Compose file", + cnt["_service"], + sec, ) return volume_ref # v3.5 and up added external flag, earlier the spec @@ -594,37 +624,97 @@ def get_secret_args(compose, cnt, secret): # podman-create commands, albeit we can only support a 1:1 mapping # at the moment if declared_secret.get("external", False) or declared_secret.get("name", None): - secret_opts += f",uid={uid}" if uid else "" - secret_opts += f",gid={gid}" if gid else "" - secret_opts += f",mode={mode}" if mode else "" + secret_opts += f",uid={secret_uid}" if secret_uid else "" + secret_opts += f",gid={secret_gid}" if secret_gid else "" + secret_opts += f",mode={secret_mode}" if secret_mode else "" + secret_opts += f",type={secret_type}" if secret_type else "" + secret_opts += f",target={secret_target}" if secret_target and secret_type == "env" else "" # The target option is only valid for type=env, # which in an ideal world would work # for type=mount as well. # having a custom name for the external secret # has the same problem as well ext_name = declared_secret.get("name", None) - err_str = 'ERROR: Custom name/target reference "{}" for mounted external secret "{}" is not supported' + err_str = ( + 'ERROR: Custom name/target reference "{}" ' + 'for mounted external secret "{}" is not supported' + ) if ext_name and ext_name != secret_name: raise ValueError(err_str.format(secret_name, ext_name)) - if target and target != secret_name: - raise ValueError(err_str.format(target, secret_name)) - if target: - log( - 'WARNING: Service "{}" uses target: "{}" for secret: "{}".'.format( - cnt["_service"], target, secret_name - ) - + " That is un-supported and a no-op and is ignored." + if secret_target and secret_target != secret_name and secret_type != 'env': + raise ValueError(err_str.format(secret_target, secret_name)) + if secret_target and secret_type != 'env': + log.warning( + 'WARNING: Service "%s" uses target: "%s" for secret: "%s".' + + " That is un-supported and a no-op and is ignored.", + cnt["_service"], + secret_target, + secret_name, ) return ["--secret", "{}{}".format(secret_name, secret_opts)] raise ValueError( - 'ERROR: unparsable secret: "{}", service: "{}"'.format( - secret_name, cnt["_service"] - ) + 'ERROR: unparsable secret: "{}", service: "{}"'.format(secret_name, cnt["_service"]) ) def container_to_res_args(cnt, podman_args): + container_to_cpu_res_args(cnt, podman_args) + container_to_gpu_res_args(cnt, podman_args) + + +def container_to_gpu_res_args(cnt, podman_args): + # https://docs.docker.com/compose/gpu-support/ + # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html + + deploy = cnt.get("deploy", None) or {} + res = deploy.get("resources", None) or {} + reservations = res.get("reservations", None) or {} + devices = reservations.get("devices", []) + gpu_on = False + for device in devices: + driver = device.get("driver", None) + if driver is None: + continue + + capabilities = device.get("capabilities", None) + if capabilities is None: + continue + + if driver != "nvidia" or "gpu" not in capabilities: + continue + + count = device.get("count", "all") + device_ids = device.get("device_ids", "all") + if device_ids != "all" and len(device_ids) > 0: + for device_id in device_ids: + podman_args.extend(( + "--device", + f"nvidia.com/gpu={device_id}", + )) + gpu_on = True + continue + + if count != "all": + for device_id in range(count): + podman_args.extend(( + "--device", + f"nvidia.com/gpu={device_id}", + )) + gpu_on = True + continue + + podman_args.extend(( + "--device", + "nvidia.com/gpu=all", + )) + gpu_on = True + + if gpu_on: + podman_args.append("--security-opt=label=disable") + + +def container_to_cpu_res_args(cnt, podman_args): # v2: https://docs.docker.com/compose/compose-file/compose-file-v2/#cpu-and-other-resources # cpus, cpu_shares, mem_limit, mem_reservation cpus_limit_v2 = try_float(cnt.get("cpus", None), None) @@ -645,35 +735,27 @@ def container_to_res_args(cnt, podman_args): # add args cpus = cpus_limit_v3 or cpus_limit_v2 if cpus: - podman_args.extend( - ( - "--cpus", - str(cpus), - ) - ) + podman_args.extend(( + "--cpus", + str(cpus), + )) if cpu_shares_v2: - podman_args.extend( - ( - "--cpu-shares", - str(cpu_shares_v2), - ) - ) + podman_args.extend(( + "--cpu-shares", + str(cpu_shares_v2), + )) mem = mem_limit_v3 or mem_limit_v2 if mem: - podman_args.extend( - ( - "-m", - str(mem).lower(), - ) - ) + podman_args.extend(( + "-m", + str(mem).lower(), + )) mem_res = mem_res_v3 or mem_res_v2 if mem_res: - podman_args.extend( - ( - "--memory-reservation", - str(mem_res).lower(), - ) - ) + podman_args.extend(( + "--memory-reservation", + str(mem_res).lower(), + )) def port_dict_to_str(port_desc): @@ -710,7 +792,52 @@ def norm_ports(ports_in): return ports_out -def assert_cnt_nets(compose, cnt): +def get_network_create_args(net_desc, proj_name, net_name): + args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + ] + # TODO: add more options here, like dns, ipv6, etc. + labels = net_desc.get("labels", None) or [] + for item in norm_as_list(labels): + args.extend(["--label", item]) + if net_desc.get("internal", None): + args.append("--internal") + driver = net_desc.get("driver", None) + if driver: + args.extend(("--driver", driver)) + driver_opts = net_desc.get("driver_opts", None) or {} + for key, value in driver_opts.items(): + args.extend(("--opt", f"{key}={value}")) + ipam = net_desc.get("ipam", None) or {} + ipam_driver = ipam.get("driver", None) + if ipam_driver and ipam_driver != "default": + args.extend(("--ipam-driver", ipam_driver)) + ipam_config_ls = ipam.get("config", None) or [] + if net_desc.get("enable_ipv6", None): + args.append("--ipv6") + + if is_dict(ipam_config_ls): + ipam_config_ls = [ipam_config_ls] + for ipam_config in ipam_config_ls: + subnet = ipam_config.get("subnet", None) + ip_range = ipam_config.get("ip_range", None) + gateway = ipam_config.get("gateway", None) + if subnet: + args.extend(("--subnet", subnet)) + if ip_range: + args.extend(("--ip-range", ip_range)) + if gateway: + args.extend(("--gateway", gateway)) + args.append(net_name) + + return args + + +async def assert_cnt_nets(compose, cnt): """ create missing networks """ @@ -728,86 +855,49 @@ def assert_cnt_nets(compose, cnt): net_desc = nets[net] or {} is_ext = net_desc.get("external", None) ext_desc = is_ext if is_dict(is_ext) else {} - default_net_name = net if is_ext else f"{proj_name}_{net}" - net_name = ( - ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name - ) + default_net_name = default_network_name_for_project(compose, proj_name, net, is_ext) + net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name try: - compose.podman.output([], "network", ["exists", net_name]) + await compose.podman.output([], "network", ["exists", net_name]) except subprocess.CalledProcessError as e: if is_ext: - raise RuntimeError( - f"External network [{net_name}] does not exists" - ) from e - args = [ - "create", - "--label", - f"io.podman.compose.project={proj_name}", - "--label", - f"com.docker.compose.project={proj_name}", - ] - # TODO: add more options here, like dns, ipv6, etc. - labels = net_desc.get("labels", None) or [] - for item in norm_as_list(labels): - args.extend(["--label", item]) - if net_desc.get("internal", None): - args.append("--internal") - driver = net_desc.get("driver", None) - if driver: - args.extend(("--driver", driver)) - driver_opts = net_desc.get("driver_opts", None) or {} - for key, value in driver_opts.items(): - args.extend(("--opt", f"{key}={value}")) - ipam = net_desc.get("ipam", None) or {} - ipam_driver = ipam.get("driver", None) - if ipam_driver: - args.extend(("--ipam-driver", ipam_driver)) - ipam_config_ls = ipam.get("config", None) or [] - if is_dict(ipam_config_ls): - ipam_config_ls = [ipam_config_ls] - for ipam_config in ipam_config_ls: - subnet = ipam_config.get("subnet", None) - ip_range = ipam_config.get("ip_range", None) - gateway = ipam_config.get("gateway", None) - if subnet: - args.extend(("--subnet", subnet)) - if ip_range: - args.extend(("--ip-range", ip_range)) - if gateway: - args.extend(("--gateway", gateway)) - args.append(net_name) - compose.podman.output([], "network", args) - compose.podman.output([], "network", ["exists", net_name]) + raise RuntimeError(f"External network [{net_name}] does not exists") from e + args = get_network_create_args(net_desc, proj_name, net_name) + await compose.podman.output([], "network", args) + await compose.podman.output([], "network", ["exists", net_name]) def get_net_args(compose, cnt): service_name = cnt["service_name"] net_args = [] - mac_address = cnt.get("mac_address", None) - if mac_address: - net_args.extend(["--mac-address", mac_address]) is_bridge = False + mac_address = cnt.get("mac_address", None) net = cnt.get("network_mode", None) if net: if net == "none": is_bridge = False + net_args.append("--network=none") elif net == "host": - net_args.extend(["--network", net]) - elif net.startswith("slirp4netns:"): - net_args.extend(["--network", net]) - elif net.startswith("ns:"): - net_args.extend(["--network", net]) + net_args.append(f"--network={net}") + elif net.startswith("slirp4netns"): # Note: podman-specific network mode + net_args.append(f"--network={net}") + elif net == "private": # Note: podman-specific network mode + net_args.append("--network=private") + elif net.startswith("pasta"): # Note: podman-specific network mode + net_args.append(f"--network={net}") + elif net.startswith("ns:"): # Note: podman-specific network mode + net_args.append(f"--network={net}") elif net.startswith("service:"): other_srv = net.split(":", 1)[1].strip() other_cnt = compose.container_names_by_service[other_srv][0] - net_args.extend(["--network", f"container:{other_cnt}"]) + net_args.append(f"--network=container:{other_cnt}") elif net.startswith("container:"): other_cnt = net.split(":", 1)[1].strip() - net_args.extend(["--network", f"container:{other_cnt}"]) + net_args.append(f"--network=container:{other_cnt}") elif net.startswith("bridge"): is_bridge = True else: - print(f"unknown network_mode [{net}]") + log.fatal("unknown network_mode [%s]", net) sys.exit(1) else: is_bridge = True @@ -815,6 +905,7 @@ def get_net_args(compose, cnt): default_net = compose.default_net nets = compose.networks cnt_nets = cnt.get("networks", None) + aliases = [service_name] # NOTE: from podman manpage: # NOTE: A container will only have access to aliases on the first network @@ -841,12 +932,10 @@ def get_net_args(compose, cnt): if not ip6: ip6 = net_value.get("ipv6_address", None) net_priority = net_value.get("priority", 0) - prioritized_cnt_nets.append( - ( - net_priority, - net_key, - ) - ) + prioritized_cnt_nets.append(( + net_priority, + net_key, + )) # sort dict by priority prioritized_cnt_nets.sort(reverse=True) cnt_nets = [net_key for _, net_key in prioritized_cnt_nets] @@ -856,49 +945,91 @@ def get_net_args(compose, cnt): net_desc = nets[net] or {} is_ext = net_desc.get("external", None) ext_desc = is_ext if is_dict(is_ext) else {} - default_net_name = net if is_ext else f"{proj_name}_{net}" - net_name = ( - ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name - ) + default_net_name = default_network_name_for_project(compose, proj_name, net, is_ext) + net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name net_names.append(net_name) net_names_str = ",".join(net_names) - if ip_assignments > 1: - multiple_nets = cnt.get("networks", None) - multiple_net_names = multiple_nets.keys() + # TODO: add support for per-interface aliases + # See https://docs.docker.com/compose/compose-file/compose-file-v3/#aliases + # Even though podman accepts network-specific aliases (e.g., --network=bridge:alias=foo, + # podman currently ignores this if a per-container network-alias is set; as pdoman-compose + # always sets a network-alias to the container name, is currently doesn't make sense to + # implement this. + multiple_nets = cnt.get("networks", None) + if multiple_nets and len(multiple_nets) > 1: + # networks can be specified as a dict with config per network or as a plain list without + # config. Support both cases by converting the plain list to a dict with empty config. + if is_list(multiple_nets): + multiple_nets = {net: {} for net in multiple_nets} + else: + multiple_nets = {net: net_config or {} for net, net_config in multiple_nets.items()} - for net_ in multiple_net_names: + # if a mac_address was specified on the container level, we need to check that it is not + # specified on the network level as well + if mac_address is not None: + for net_config_ in multiple_nets.values(): + network_mac = net_config_.get("x-podman.mac_address", None) + if network_mac is not None: + raise RuntimeError( + f"conflicting mac addresses {mac_address} and {network_mac}:" + "specifying mac_address on both container and network level " + "is not supported" + ) + + for net_, net_config_ in multiple_nets.items(): net_desc = nets[net_] or {} is_ext = net_desc.get("external", None) ext_desc = is_ext if is_dict(is_ext) else {} - default_net_name = net_ if is_ext else f"{proj_name}_{net_}" - net_name = ( - ext_desc.get("name", None) - or net_desc.get("name", None) - or default_net_name - ) + default_net_name = default_network_name_for_project(compose, proj_name, net_, is_ext) + net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name - ipv4 = multiple_nets[net_].get("ipv4_address", None) - ipv6 = multiple_nets[net_].get("ipv6_address", None) - if ipv4 is not None and ipv6 is not None: - net_args.extend(["--network", f"{net_name}:ip={ipv4},ip={ipv6}"]) - elif ipv4 is None and ipv6 is not None: - net_args.extend(["--network", f"{net_name}:ip={ipv6}"]) - elif ipv6 is None and ipv4 is not None: - net_args.extend(["--network", f"{net_name}:ip={ipv4}"]) + ipv4 = net_config_.get("ipv4_address", None) + ipv6 = net_config_.get("ipv6_address", None) + # custom extension; not supported by docker-compose v3 + mac = net_config_.get("x-podman.mac_address", None) + + # if a mac_address was specified on the container level, apply it to the first network + # This works for Python > 3.6, because dict insert ordering is preserved, so we are + # sure that the first network we encounter here is also the first one specified by + # the user + if mac is None and mac_address is not None: + mac = mac_address + mac_address = None + + net_options = [] + if ipv4: + net_options.append(f"ip={ipv4}") + if ipv6: + net_options.append(f"ip={ipv6}") + if mac: + net_options.append(f"mac={mac}") + + if net_options: + net_args.append(f"--network={net_name}:" + ",".join(net_options)) + else: + net_args.append(f"--network={net_name}") else: if is_bridge: - net_args.extend( - ["--net", net_names_str, "--network-alias", ",".join(aliases)] - ) + if net_names_str: + net_args.append(f"--network={net_names_str}") + else: + net_args.append("--network=bridge") if ip: net_args.append(f"--ip={ip}") if ip6: net_args.append(f"--ip6={ip6}") + if mac_address: + net_args.append(f"--mac-address={mac_address}") + + if is_bridge: + for alias in aliases: + net_args.extend([f"--network-alias={alias}"]) + return net_args -def container_to_args(compose, cnt, detached=True): +async def container_to_args(compose, cnt, detached=True): # TODO: double check -e , --add-host, -v, --read-only dirname = compose.dirname pod = cnt.get("pod", None) or "" @@ -924,6 +1055,8 @@ def container_to_args(compose, cnt, detached=True): podman_args.extend(["--annotation", a]) if cnt.get("read_only", None): podman_args.append("--read-only") + if cnt.get("http_proxy", None) is False: + podman_args.append("--http-proxy=false") for i in cnt.get("labels", []): podman_args.extend(["--label", i]) for c in cnt.get("cap_add", []): @@ -941,11 +1074,23 @@ def container_to_args(compose, cnt, detached=True): for item in norm_as_list(cnt.get("dns_search", None)): podman_args.extend(["--dns-search", item]) env_file = cnt.get("env_file", []) - if is_str(env_file): + if is_str(env_file) or is_dict(env_file): env_file = [env_file] for i in env_file: - i = os.path.realpath(os.path.join(dirname, i)) - podman_args.extend(["--env-file", i]) + if is_str(i): + i = {"path": i} + path = i["path"] + required = i.get("required", True) + i = os.path.realpath(os.path.join(dirname, path)) + if not os.path.exists(i): + if not required: + continue + raise ValueError("Env file at {} does not exist".format(i)) + dotenv_dict = {} + dotenv_dict = dotenv_to_dict(i) + env = norm_as_list(dotenv_dict) + for e in env: + podman_args.extend(["-e", e]) env = norm_as_list(cnt.get("environment", {})) for e in env: podman_args.extend(["-e", e]) @@ -955,15 +1100,15 @@ def container_to_args(compose, cnt, detached=True): for i in tmpfs_ls: podman_args.extend(["--tmpfs", i]) for volume in cnt.get("volumes", []): - podman_args.extend(get_mount_args(compose, cnt, volume)) + podman_args.extend(await get_mount_args(compose, cnt, volume)) - assert_cnt_nets(compose, cnt) + await assert_cnt_nets(compose, cnt) podman_args.extend(get_net_args(compose, cnt)) - logging = cnt.get("logging", None) - if logging is not None: - podman_args.append(f'--log-driver={logging.get("driver", "k8s-file")}') - log_opts = logging.get("options") or {} + log_config = cnt.get("logging", None) + if log_config is not None: + podman_args.append(f'--log-driver={log_config.get("driver", "k8s-file")}') + log_opts = log_config.get("options") or {} podman_args += [f"--log-opt={name}={value}" for name, value in log_opts.items()] for secret in cnt.get("secrets", []): podman_args.extend(get_secret_args(compose, cnt, secret)) @@ -1000,12 +1145,24 @@ def container_to_args(compose, cnt, detached=True): podman_args.append("-i") if cnt.get("stop_signal", None): podman_args.extend(["--stop-signal", cnt["stop_signal"]]) - for i in cnt.get("sysctls", []): - podman_args.extend(["--sysctl", i]) + + sysctls = cnt.get("sysctls") + if sysctls is not None: + if isinstance(sysctls, dict): + for sysctl, value in sysctls.items(): + podman_args.extend(["--sysctl", "{}={}".format(sysctl, value)]) + elif isinstance(sysctls, list): + for i in sysctls: + podman_args.extend(["--sysctl", i]) + else: + raise TypeError("sysctls should be either dict or list") + if cnt.get("tty", None): podman_args.append("--tty") if cnt.get("privileged", None): podman_args.append("--privileged") + if cnt.get("pid", None): + podman_args.extend(["--pid", cnt["pid"]]) pull_policy = cnt.get("pull_policy", None) if pull_policy is not None and pull_policy != "build": podman_args.extend(["--pull", pull_policy]) @@ -1026,6 +1183,8 @@ def container_to_args(compose, cnt, detached=True): platform = cnt.get("platform", None) if platform is not None: podman_args.extend(["--platform", platform]) + if cnt.get("runtime", None): + podman_args.extend(["--runtime", cnt["runtime"]]) # WIP: healthchecks are still work in progress healthcheck = cnt.get("healthcheck", None) or {} @@ -1039,9 +1198,10 @@ def container_to_args(compose, cnt, detached=True): # If it's a string, it's equivalent to specifying CMD-SHELL if is_str(healthcheck_test): # podman does not add shell to handle command with whitespace - podman_args.extend( - ["--healthcheck-command", "/bin/sh -c " + cmd_quote(healthcheck_test)] - ) + podman_args.extend([ + "--healthcheck-command", + "/bin/sh -c " + cmd_quote(healthcheck_test), + ]) elif is_list(healthcheck_test): healthcheck_test = healthcheck_test.copy() # If it's a list, first item is either NONE, CMD or CMD-SHELL. @@ -1077,14 +1237,25 @@ def container_to_args(compose, cnt, detached=True): podman_args.extend(["--healthcheck-retries", str(healthcheck["retries"])]) # handle podman extension - x_podman = cnt.get("x-podman", None) - if x_podman is not None: - for uidmap in x_podman.get("uidmaps", []): - podman_args.extend(["--uidmap", uidmap]) - for gidmap in x_podman.get("gidmaps", []): - podman_args.extend(["--gidmap", gidmap]) + if 'x-podman' in cnt: + raise ValueError( + 'Configuration under x-podman has been migrated to x-podman.uidmaps and ' + 'x-podman.gidmaps fields' + ) - podman_args.append(cnt["image"]) # command, ..etc. + rootfs_mode = False + for uidmap in cnt.get('x-podman.uidmaps', []): + podman_args.extend(["--uidmap", uidmap]) + for gidmap in cnt.get('x-podman.gidmaps', []): + podman_args.extend(["--gidmap", gidmap]) + rootfs = cnt.get('x-podman.rootfs', None) + if rootfs is not None: + rootfs_mode = True + podman_args.extend(["--rootfs", rootfs]) + log.warning("WARNING: x-podman.rootfs and image both specified, image field ignored") + + if not rootfs_mode: + podman_args.append(cnt["image"]) # command, ..etc. command = cnt.get("command", None) if command is not None: if is_str(command): @@ -1143,30 +1314,63 @@ def flat_deps(services, with_extends=False): for c in links_ls: if ":" in c: dep_name, dep_alias = c.split(":") - if not "_aliases" in services[dep_name]: + if "_aliases" not in services[dep_name]: services[dep_name]["_aliases"] = set() services[dep_name]["_aliases"].add(dep_alias) for name, srv in services.items(): rec_deps(services, name) +async def wait_with_timeout(coro, timeout): + """ + Asynchronously waits for the given coroutine to complete with a timeout. + + Args: + coro: The coroutine to wait for. + timeout (int or float): The maximum number of seconds to wait for. + + Raises: + TimeoutError: If the coroutine does not complete within the specified timeout. + """ + try: + return await asyncio.wait_for(coro, timeout) + except asyncio.TimeoutError as exc: + raise TimeoutError from exc + + ################### # podman and compose classes ################### class Podman: - def __init__(self, compose, podman_path="podman", dry_run=False): + def __init__( + self, + compose, + podman_path="podman", + dry_run=False, + semaphore: asyncio.Semaphore = asyncio.Semaphore(sys.maxsize), + ): self.compose = compose self.podman_path = podman_path self.dry_run = dry_run + self.semaphore = semaphore - def output(self, podman_args, cmd="", cmd_args=None): - cmd_args = cmd_args or [] - xargs = self.compose.get_podman_args(cmd) if cmd else [] - cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args - log(cmd_ls) - return subprocess.check_output(cmd_ls) + async def output(self, podman_args, cmd="", cmd_args=None): + async with self.semaphore: + cmd_args = cmd_args or [] + xargs = self.compose.get_podman_args(cmd) if cmd else [] + cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args + log.info(str(cmd_ls)) + p = await asyncio.create_subprocess_exec( + *cmd_ls, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + + stdout_data, stderr_data = await p.communicate() + if p.returncode == 0: + return stdout_data + + raise subprocess.CalledProcessError(p.returncode, " ".join(cmd_ls), stderr_data) def exec( self, @@ -1177,68 +1381,85 @@ class Podman: cmd_args = list(map(str, cmd_args or [])) xargs = self.compose.get_podman_args(cmd) if cmd else [] cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args - log(" ".join([str(i) for i in cmd_ls])) + log.info(" ".join([str(i) for i in cmd_ls])) os.execlp(self.podman_path, *cmd_ls) - def run( + async def run( # pylint: disable=dangerous-default-value self, podman_args, cmd="", cmd_args=None, - wait=True, - sleep=1, - obj=None, log_formatter=None, - ): - if obj is not None: - obj.exit_code = None - cmd_args = list(map(str, cmd_args or [])) - xargs = self.compose.get_podman_args(cmd) if cmd else [] - cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args - log(" ".join([str(i) for i in cmd_ls])) - if self.dry_run: - return None - # subprocess.Popen( - # args, bufsize = 0, executable = None, stdin = None, stdout = None, stderr = None, preexec_fn = None, - # close_fds = False, shell = False, cwd = None, env = None, universal_newlines = False, startupinfo = None, - # creationflags = 0 - # ) - if log_formatter is not None: - # Pipe podman process output through log_formatter (which can add colored prefix) - p = subprocess.Popen( - cmd_ls, stdout=subprocess.PIPE - ) # pylint: disable=consider-using-with - _ = subprocess.Popen( - log_formatter, stdin=p.stdout - ) # pylint: disable=consider-using-with - p.stdout.close() # Allow p_process to receive a SIGPIPE if logging process exits. - else: - p = subprocess.Popen(cmd_ls) # pylint: disable=consider-using-with + *, + # Intentionally mutable default argument to hold references to tasks + task_reference=set(), + ) -> int: + async with self.semaphore: + cmd_args = list(map(str, cmd_args or [])) + xargs = self.compose.get_podman_args(cmd) if cmd else [] + cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args + log.info(" ".join([str(i) for i in cmd_ls])) + if self.dry_run: + return None - if wait: - exit_code = p.wait() - log("exit code:", exit_code) - if obj is not None: - obj.exit_code = exit_code + if log_formatter is not None: - if sleep: - time.sleep(sleep) - return p + async def format_out(stdout): + while True: + line = await stdout.readline() + if line: + print(log_formatter, line.decode('utf-8'), end='') + if stdout.at_eof(): + break - def volume_ls(self, proj=None): + p = await asyncio.create_subprocess_exec( + *cmd_ls, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) # pylint: disable=consider-using-with + + # This is hacky to make the tasks not get garbage collected + # https://github.com/python/cpython/issues/91887 + out_t = asyncio.create_task(format_out(p.stdout)) + task_reference.add(out_t) + out_t.add_done_callback(task_reference.discard) + + err_t = asyncio.create_task(format_out(p.stderr)) + task_reference.add(err_t) + err_t.add_done_callback(task_reference.discard) + + else: + p = await asyncio.create_subprocess_exec(*cmd_ls) # pylint: disable=consider-using-with + + try: + exit_code = await p.wait() + except asyncio.CancelledError: + log.info("Sending termination signal") + p.terminate() + try: + exit_code = await wait_with_timeout(p.wait(), 10) + except TimeoutError: + log.warning("container did not shut down after 10 seconds, killing") + p.kill() + exit_code = await p.wait() + + log.info("exit code: %s", exit_code) + return exit_code + + async def volume_ls(self, proj=None): if not proj: proj = self.compose.project_name - output = self.output( - [], - "volume", - [ - "ls", - "--noheading", - "--filter", - f"label=io.podman.compose.project={proj}", - "--format", - "{{.Name}}", - ], + output = ( + await self.output( + [], + "volume", + [ + "ls", + "--noheading", + "--filter", + f"label=io.podman.compose.project={proj}", + "--format", + "{{.Name}}", + ], + ) ).decode("utf-8") volumes = output.splitlines() return volumes @@ -1261,6 +1482,12 @@ def normalize_service(service, sub_dir=""): if not context: context = "." service["build"]["context"] = context + if "build" in service and "additional_contexts" in service["build"]: + if is_dict(build["additional_contexts"]): + new_additional_contexts = [] + for k, v in build["additional_contexts"].items(): + new_additional_contexts.append(f"{k}={v}") + build["additional_contexts"] = new_additional_contexts for key in ("command", "entrypoint"): if key in service: if is_str(service[key]): @@ -1284,6 +1511,15 @@ def normalize_service(service, sub_dir=""): if is_str(extends): extends = {"service": extends} service["extends"] = extends + if "depends_on" in service: + deps = service["depends_on"] + if is_str(deps): + deps = [deps] + if is_list(deps): + deps_dict = {} + for d in deps: + deps_dict[d] = {'condition': 'service_started'} + service["depends_on"] = deps_dict return service @@ -1303,14 +1539,8 @@ def normalize_service_final(service: dict, project_dir: str) -> dict: context = build if is_str(build) else build.get("context", ".") if not re.match(r"://", context) and not re.match(r"[^:]+:.+", context): context = os.path.normpath(os.path.join(project_dir, context)) - dockerfile = ( - "Dockerfile" - if is_str(build) - else service["build"].get("dockerfile", "Dockerfile") - ) if not is_dict(service["build"]): service["build"] = {} - service["build"]["dockerfile"] = dockerfile service["build"]["context"] = context return service @@ -1348,17 +1578,13 @@ def rec_merge_one(target, source): if not isinstance(value2, type(value)): value_type = type(value) value2_type = type(value2) - raise ValueError( - f"can't merge value of [{key}] of type {value_type} and {value2_type}" - ) + raise ValueError(f"can't merge value of [{key}] of type {value_type} and {value2_type}") if is_list(value2): if key == "volumes": # clean duplicate mount targets pts = {v.split(":", 2)[1] for v in value2 if ":" in v} del_ls = [ - ix - for (ix, v) in enumerate(value) - if ":" in v and v.split(":", 2)[1] in pts + ix for (ix, v) in enumerate(value) if ":" in v and v.split(":", 2)[1] in pts ] for ix in reversed(del_ls): del value[ix] @@ -1458,14 +1684,15 @@ class PodmanCompose: self.services = None self.all_services = set() self.prefer_volume_over_mount = True + self.x_podman = {} self.merged_yaml = None self.yaml_hash = "" self.console_colors = [ - "\x1B[1;32m", - "\x1B[1;33m", - "\x1B[1;34m", - "\x1B[1;35m", - "\x1B[1;36m", + "\x1b[1;32m", + "\x1b[1;33m", + "\x1b[1;34m", + "\x1b[1;35m", + "\x1b[1;36m", ] def assert_services(self, services): @@ -1475,7 +1702,7 @@ class PodmanCompose: missing = given - self.all_services if missing: missing_csv = ",".join(missing) - log(f"missing services [{missing_csv}]") + log.warning("missing services [%s]", missing_csv) sys.exit(1) def get_podman_args(self, cmd): @@ -1488,9 +1715,9 @@ class PodmanCompose: xargs.extend(shlex.split(args)) return xargs - def run(self): - log("podman-compose version: " + __version__) - args = self._parse_args() + async def run(self, argv=None): + log.info("podman-compose version: %s", __version__) + args = self._parse_args(argv) podman_path = args.podman_path if podman_path != "podman": if os.path.isfile(podman_path) and os.access(podman_path, os.X_OK): @@ -1498,23 +1725,23 @@ class PodmanCompose: else: # this also works if podman hasn't been installed now if args.dry_run is False: - log(f"Binary {podman_path} has not been found.") + log.fatal("Binary %s has not been found.", podman_path) sys.exit(1) - self.podman = Podman(self, podman_path, args.dry_run) + self.podman = Podman(self, podman_path, args.dry_run, asyncio.Semaphore(args.parallel)) + if not args.dry_run: # just to make sure podman is running try: - self.podman_version = ( - self.podman.output(["--version"], "", []).decode("utf-8").strip() - or "" - ) + self.podman_version = (await self.podman.output(["--version"], "", [])).decode( + "utf-8" + ).strip() or "" self.podman_version = (self.podman_version.split() or [""])[-1] except subprocess.CalledProcessError: self.podman_version = None if not self.podman_version: - log("it seems that you do not have `podman` installed") + log.fatal("it seems that you do not have `podman` installed") sys.exit(1) - log("using podman version: " + self.podman_version) + log.info("using podman version: %s", self.podman_version) cmd_name = args.command compose_required = cmd_name != "version" and ( cmd_name != "systemd" or args.action != "create-unit" @@ -1522,10 +1749,16 @@ class PodmanCompose: if compose_required: self._parse_compose_file() cmd = self.commands[cmd_name] - retcode = cmd(self, args) + retcode = await cmd(self, args) if isinstance(retcode, int): sys.exit(retcode) + def resolve_in_pod(self): + if self.global_args.in_pod_bool is None: + self.global_args.in_pod_bool = self.x_podman.get("in_pod", True) + # otherwise use `in_pod` value provided by command line + return self.global_args.in_pod_bool + def _parse_compose_file(self): args = self.global_args # cmd = args.command @@ -1542,18 +1775,18 @@ class PodmanCompose: args.file = list(filter(os.path.exists, default_ls)) files = args.file if not files: - log( - "no compose.yaml, docker-compose.yml or container-compose.yml file found, pass files with -f" + log.fatal( + "no compose.yaml, docker-compose.yml or container-compose.yml file found, " + "pass files with -f" ) sys.exit(-1) - ex = map(os.path.exists, files) + ex = map(lambda x: x == '-' or os.path.exists(x), files) missing = [fn0 for ex0, fn0 in zip(ex, files) if not ex0] if missing: - log("missing files: ", missing) + log.fatal("missing files: %s", missing) sys.exit(1) # make absolute relative_files = files - files = list(map(os.path.realpath, files)) filename = files[0] project_name = args.project_name # no_ansi = args.no_ansi @@ -1567,30 +1800,30 @@ class PodmanCompose: # env-file is relative to the CWD dotenv_dict = {} if args.env_file: + # Load .env from the Compose file's directory to preserve + # behavior prior to 1.1.0 and to match with Docker Compose (v2). + if ".env" == args.env_file: + project_dotenv_file = os.path.realpath(os.path.join(dirname, ".env")) + if os.path.exists(project_dotenv_file): + dotenv_dict.update(dotenv_to_dict(project_dotenv_file)) dotenv_path = os.path.realpath(args.env_file) - dotenv_dict = dotenv_to_dict(dotenv_path) + dotenv_dict.update(dotenv_to_dict(dotenv_path)) # TODO: remove next line os.chdir(dirname) - os.environ.update( - { - key: value - for key, value in dotenv_dict.items() - if key.startswith("PODMAN_") - } - ) - self.environ = dict(os.environ) - self.environ.update(dotenv_dict) + os.environ.update({ + key: value for key, value in dotenv_dict.items() if key.startswith("PODMAN_") + }) + self.environ = dotenv_dict + self.environ.update(dict(os.environ)) # see: https://docs.docker.com/compose/reference/envvars/ # see: https://docs.docker.com/compose/env-file/ - self.environ.update( - { - "COMPOSE_PROJECT_DIR": dirname, - "COMPOSE_FILE": pathsep.join(relative_files), - "COMPOSE_PATH_SEPARATOR": pathsep, - } - ) + self.environ.update({ + "COMPOSE_PROJECT_DIR": dirname, + "COMPOSE_FILE": pathsep.join(relative_files), + "COMPOSE_PATH_SEPARATOR": pathsep, + }) compose = {} # Iterate over files primitively to allow appending to files in-loop files_iter = iter(files) @@ -1601,32 +1834,32 @@ class PodmanCompose: except StopIteration: break - with open(filename, "r", encoding="utf-8") as f: - content = yaml.safe_load(f) + if filename.strip().split('/')[-1] == '-': + content = yaml.safe_load(sys.stdin) + else: + with open(filename, "r", encoding="utf-8") as f: + content = yaml.safe_load(f) # log(filename, json.dumps(content, indent = 2)) - if not isinstance(content, dict): - sys.stderr.write( - "Compose file does not contain a top level object: %s\n" - % filename - ) - sys.exit(1) - content = normalize(content) - # log(filename, json.dumps(content, indent = 2)) - content = rec_subs(content, self.environ) - rec_merge(compose, content) - # If `include` is used, append included files to files - include = compose.get("include", None) - if include: - files.append(*include) - # As compose obj is updated and tested with every loop, not deleting `include` - # from it, results in it being tested again and again, original values for - # `include` be appended to `files`, and, included files be processed for ever. - # Solution is to remove 'include' key from compose obj. This doesn't break - # having `include` present and correctly processed in included files - del compose["include"] - resolved_services = self._resolve_profiles( - compose.get("services", {}), set(args.profile) - ) + if not isinstance(content, dict): + sys.stderr.write( + "Compose file does not contain a top level object: %s\n" % filename + ) + sys.exit(1) + content = normalize(content) + # log(filename, json.dumps(content, indent = 2)) + content = rec_subs(content, self.environ) + rec_merge(compose, content) + # If `include` is used, append included files to files + include = compose.get("include", None) + if include: + files.extend(include) + # As compose obj is updated and tested with every loop, not deleting `include` + # from it, results in it being tested again and again, original values for + # `include` be appended to `files`, and, included files be processed for ever. + # Solution is to remove 'include' key from compose obj. This doesn't break + # having `include` present and correctly processed in included files + del compose["include"] + resolved_services = self._resolve_profiles(compose.get("services", {}), set(args.profile)) compose["services"] = resolved_services if not getattr(args, "no_normalize", None): compose = normalize_final(compose, self.dirname) @@ -1636,22 +1869,20 @@ class PodmanCompose: compose["_dirname"] = dirname # debug mode if len(files) > 1: - log(" ** merged:\n", json.dumps(compose, indent=2)) + log.debug(" ** merged:\n%s", json.dumps(compose, indent=2)) # ver = compose.get('version', None) if not project_name: project_name = compose.get("name", None) if project_name is None: - # More strict then actually needed for simplicity: podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]* + # More strict then actually needed for simplicity: + # podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]* project_name = ( - self.environ.get("COMPOSE_PROJECT_NAME", None) - or dir_basename.lower() + self.environ.get("COMPOSE_PROJECT_NAME", None) or dir_basename.lower() ) project_name = norm_re.sub("", project_name) if not project_name: - raise RuntimeError( - f"Project name [{dir_basename}] normalized to empty" - ) + raise RuntimeError(f"Project name [{dir_basename}] normalized to empty") self.project_name = project_name self.environ.update({"COMPOSE_PROJECT_NAME": self.project_name}) @@ -1659,21 +1890,17 @@ class PodmanCompose: services = compose.get("services", None) if services is None: services = {} - log("WARNING: No services defined") + log.warning("WARNING: No services defined") # include services with no profile defined or the selected profiles services = self._resolve_profiles(services, set(args.profile)) # NOTE: maybe add "extends.service" to _deps at this stage flat_deps(services, with_extends=True) - service_names = sorted( - [(len(srv["_deps"]), name) for name, srv in services.items()] - ) + service_names = sorted([(len(srv["_deps"]), name) for name, srv in services.items()]) service_names = [name for _, name in service_names] resolve_extends(services, service_names, self.environ) flat_deps(services) - service_names = sorted( - [(len(srv["_deps"]), name) for name, srv in services.items()] - ) + service_names = sorted([(len(srv["_deps"]), name) for name, srv in services.items()]) service_names = [name for _, name in service_names] nets = compose.get("networks", None) or {} if not nets: @@ -1689,16 +1916,14 @@ class PodmanCompose: allnets = set() for name, srv in services.items(): srv_nets = srv.get("networks", None) or default_net - srv_nets = ( - list(srv_nets.keys()) if is_dict(srv_nets) else norm_as_list(srv_nets) - ) + srv_nets = list(srv_nets.keys()) if is_dict(srv_nets) else norm_as_list(srv_nets) allnets.update(srv_nets) given_nets = set(nets.keys()) missing_nets = allnets - given_nets unused_nets = given_nets - allnets - set(["default"]) if len(unused_nets): unused_nets_str = ",".join(unused_nets) - log(f"WARNING: unused networks: {unused_nets_str}") + log.warning("WARNING: unused networks: %s", unused_nets_str) if len(missing_nets): missing_nets_str = ",".join(missing_nets) raise RuntimeError(f"missing networks: {missing_nets_str}") @@ -1737,17 +1962,17 @@ class PodmanCompose: "service_name": service_name, **service_desc, } - if "image" not in cnt: + x_podman = service_desc.get("x-podman", None) + rootfs_mode = x_podman is not None and x_podman.get("rootfs", None) is not None + if "image" not in cnt and not rootfs_mode: cnt["image"] = f"{project_name}_{service_name}" labels = norm_as_list(cnt.get("labels", None)) cnt["ports"] = norm_ports(cnt.get("ports", None)) labels.extend(podman_compose_labels) - labels.extend( - [ - f"com.docker.compose.container-number={num}", - "com.docker.compose.service=" + service_name, - ] - ) + labels.extend([ + f"com.docker.compose.container-number={num}", + "com.docker.compose.service=" + service_name, + ]) cnt["labels"] = labels cnt["_service"] = service_name cnt["_project"] = project_name @@ -1761,9 +1986,7 @@ class PodmanCompose: and mnt_dict["source"] not in self.vols ): vol_name = mnt_dict["source"] - raise RuntimeError( - f"volume [{vol_name}] not defined in top level" - ) + raise RuntimeError(f"volume [{vol_name}] not defined in top level") self.container_names_by_service = container_names_by_service self.all_services = set(container_names_by_service.keys()) container_by_name = {c["name"]: c for c in given_containers} @@ -1771,6 +1994,10 @@ class PodmanCompose: given_containers = list(container_by_name.values()) given_containers.sort(key=lambda c: len(c.get("_deps", None) or [])) # log("sorted:", [c["name"] for c in given_containers]) + + self.x_podman = compose.get("x-podman", {}) + + args.in_pod_bool = self.resolve_in_pod() pods, containers = transform(args, project_name, given_containers) self.pods = pods self.containers = containers @@ -1778,11 +2005,11 @@ class PodmanCompose: def _resolve_profiles(self, defined_services, requested_profiles=None): """ - Returns a service dictionary (key = service name, value = service config) compatible with the requested_profiles - list. + Returns a service dictionary (key = service name, value = service config) compatible with + the requested_profiles list. - The returned service dictionary contains all services which do not include/reference a profile in addition to - services that match the requested_profiles. + The returned service dictionary contains all services which do not include/reference a + profile in addition to services that match the requested_profiles. :param defined_services: The service dictionary :param requested_profiles: The profiles requested using the --profile arg. @@ -1794,29 +2021,44 @@ class PodmanCompose: for name, config in defined_services.items(): service_profiles = set(config.get("profiles", [])) - if not service_profiles or requested_profiles.intersection( - service_profiles - ): + if not service_profiles or requested_profiles.intersection(service_profiles): services[name] = config return services - def _parse_args(self): + def _parse_args(self, argv=None): parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) self._init_global_parser(parser) subparsers = parser.add_subparsers(title="command", dest="command") subparser = subparsers.add_parser("help", help="show help") for cmd_name, cmd in self.commands.items(): - subparser = subparsers.add_parser( - cmd_name, help=cmd.desc - ) # pylint: disable=protected-access + subparser = subparsers.add_parser(cmd_name, help=cmd.desc) # pylint: disable=protected-access for cmd_parser in cmd._parse_args: # pylint: disable=protected-access cmd_parser(subparser) - self.global_args = parser.parse_args() + self.global_args = parser.parse_args(argv) + if self.global_args.in_pod is not None and self.global_args.in_pod.lower() not in ( + '', + 'true', + '1', + 'false', + '0', + ): + raise ValueError( + f'Invalid --in-pod value: \'{self.global_args.in_pod}\'. ' + 'It must be set to either of: empty value, true, 1, false, 0' + ) + + if self.global_args.in_pod == '' or self.global_args.in_pod is None: + self.global_args.in_pod_bool = None + else: + self.global_args.in_pod_bool = self.global_args.in_pod.lower() in ('true', '1') + if self.global_args.version: self.global_args.command = "version" if not self.global_args.command or self.global_args.command == "help": parser.print_help() sys.exit(-1) + + logging.basicConfig(level=("DEBUG" if self.global_args.verbose else "WARN")) return self.global_args @staticmethod @@ -1826,8 +2068,8 @@ class PodmanCompose: "--in-pod", help="pod creation", metavar="in_pod", - type=bool, - default=True, + type=str, + default=None, ) parser.add_argument( "--pod-args", @@ -1846,7 +2088,7 @@ class PodmanCompose: parser.add_argument( "-f", "--file", - help="Specify an alternate compose file (default: docker-compose.yml)", + help="Specify an compose file (default: docker-compose.yml) or '-' to read from stdin.", metavar="file", action="append", default=[], @@ -1901,13 +2143,24 @@ class PodmanCompose: help="No action; perform a simulation of commands", action="store_true", ) + parser.add_argument( + "--parallel", type=int, default=os.environ.get("COMPOSE_PARALLEL_LIMIT", sys.maxsize) + ) + parser.add_argument( + "--verbose", + help="Print debugging output", + action="store_true", + ) podman_compose = PodmanCompose() + ################### # decorators to add commands and parse options ################### +class PodmanComposeError(Exception): + pass class cmd_run: # pylint: disable=invalid-name,too-few-public-methods @@ -1920,6 +2173,8 @@ class cmd_run: # pylint: disable=invalid-name,too-few-public-methods def wrapped(*args, **kw): return func(*args, **kw) + if not asyncio.iscoroutinefunction(func): + raise PodmanComposeError("Command must be async") wrapped._compose = self.compose # Trim extra indentation at start of multiline docstrings. wrapped.desc = self.cmd_desc or re.sub(r"^\s+", "", func.__doc__) @@ -1948,7 +2203,7 @@ class cmd_parse: # pylint: disable=invalid-name,too-few-public-methods @cmd_run(podman_compose, "version", "show version") -def compose_version(compose, args): +async def compose_version(compose, args): if getattr(args, "short", False): print(__version__) return @@ -1957,7 +2212,7 @@ def compose_version(compose, args): print(json.dumps(res)) return print("podman-compose version", __version__) - compose.podman.run(["--version"], "", [], sleep=0) + await compose.podman.run(["--version"], "", []) def is_local(container: dict) -> bool: @@ -1973,15 +2228,15 @@ def is_local(container: dict) -> bool: @cmd_run(podman_compose, "wait", "wait running containers to stop") -def compose_wait(compose, args): # pylint: disable=unused-argument +async def compose_wait(compose, args): # pylint: disable=unused-argument containers = [cnt["name"] for cnt in compose.containers] cmd_args = ["--"] cmd_args.extend(containers) - compose.podman.exec([], "wait", cmd_args) + await compose.podman.exec([], "wait", cmd_args) @cmd_run(podman_compose, "systemd") -def compose_systemd(compose, args): +async def compose_systemd(compose, args): """ create systemd unit file and register its compose stacks @@ -1994,15 +2249,13 @@ def compose_systemd(compose, args): proj_name = compose.project_name fn = os.path.expanduser(f"~/{stacks_dir}/{proj_name}.env") os.makedirs(os.path.dirname(fn), exist_ok=True) - print(f"writing [{fn}]: ...") + log.debug("writing [%s]: ...", fn) with open(fn, "w", encoding="utf-8") as f: for k, v in compose.environ.items(): if k.startswith("COMPOSE_") or k.startswith("PODMAN_"): f.write(f"{k}={v}\n") - print(f"writing [{fn}]: done.") - print("\n\ncreating the pod without starting it: ...\n\n") - process = subprocess.run([script, "up", "--no-start"], check=False) - print("\nfinal exit code is ", process.returncode) + log.debug("writing [%s]: done.", fn) + log.info("\n\ncreating the pod without starting it: ...\n\n") username = getpass.getuser() print( f""" @@ -2049,10 +2302,10 @@ ExecStop=/usr/bin/podman pod stop pod_%i WantedBy=default.target """ if os.access(os.path.dirname(fn), os.W_OK): - print(f"writing [{fn}]: ...") + log.debug("writing [%s]: ...", fn) with open(fn, "w", encoding="utf-8") as f: f.write(out) - print(f"writing [{fn}]: done.") + log.debug("writing [%s]: done.", fn) print( """ while in your project type `podman-compose systemd -a register` @@ -2060,11 +2313,11 @@ while in your project type `podman-compose systemd -a register` ) else: print(out) - log(f"Could not write to [{fn}], use 'sudo'") + log.warning("Could not write to [%s], use 'sudo'", fn) @cmd_run(podman_compose, "pull", "pull stack images") -def compose_pull(compose, args): +async def compose_pull(compose, args): img_containers = [cnt for cnt in compose.containers if "image" in cnt] if args.services: services = set(args.services) @@ -2073,27 +2326,27 @@ def compose_pull(compose, args): if not args.force_local: local_images = {cnt["image"] for cnt in img_containers if is_local(cnt)} images -= local_images - for image in images: - compose.podman.run([], "pull", [image], sleep=0) + + await asyncio.gather(*[compose.podman.run([], "pull", [image]) for image in images]) @cmd_run(podman_compose, "push", "push stack images") -def compose_push(compose, args): +async def compose_push(compose, args): services = set(args.services) for cnt in compose.containers: if "build" not in cnt: continue if services and cnt["_service"] not in services: continue - compose.podman.run([], "push", [cnt["image"]], sleep=0) + await compose.podman.run([], "push", [cnt["image"]]) -def build_one(compose, args, cnt): +async def build_one(compose, args, cnt): if "build" not in cnt: return None if getattr(args, "if_not_exists", None): try: - img_id = compose.podman.output( + img_id = await compose.podman.output( [], "inspect", ["-t", "image", "-f", "{{.Id}}", cnt["image"]] ) except subprocess.CalledProcessError: @@ -2124,12 +2377,19 @@ def build_one(compose, args, cnt): if os.path.exists(dockerfile) or re.match(r"://", ctx) or re.match(r"[^:]+:.+", ctx): build_args.extend(["-f", dockerfile]) for secret in build_desc.get("secrets", []): - build_args.extend(get_secret_args(compose, cnt, secret)) + build_args.extend(get_secret_args(compose, cnt, secret, podman_is_building=True)) for tag in build_desc.get("tags", []): build_args.extend(["-t", tag]) + labels = build_desc.get("labels", []) + if isinstance(labels, dict): + labels = [f"{k}={v}" for (k, v) in labels.items()] + for label in labels: + build_args.extend(["--label", label]) + for additional_ctx in build_desc.get("additional_contexts", {}): + build_args.extend([f"--build-context={additional_ctx}"]) if "target" in build_desc: build_args.extend(["--target", build_desc["target"]]) - container_to_ulimit_args(cnt, build_args) + container_to_ulimit_build_args(cnt, build_args) if getattr(args, "no_cache", None): build_args.append("--no-cache") if getattr(args, "pull_always", None): @@ -2142,48 +2402,49 @@ def build_one(compose, args, cnt): args_list = norm_as_list(build_desc.get("args", {})) for build_arg in args_list + args.build_arg: - build_args.extend( - ( - "--build-arg", - build_arg, - ) - ) + build_args.extend(( + "--build-arg", + build_arg, + )) build_args.append(ctx) - status = compose.podman.run([], "build", build_args, sleep=0) + status = await compose.podman.run([], "build", build_args) return status @cmd_run(podman_compose, "build", "build stack images") -def compose_build(compose, args): - # keeps the status of the last service/container built - status = 0 - - def parse_return_code(obj, current_status): - if obj and obj.returncode != 0: - return obj.returncode - return current_status +async def compose_build(compose, args): + tasks = [] if args.services: container_names_by_service = compose.container_names_by_service compose.assert_services(args.services) for service in args.services: cnt = compose.container_by_name[container_names_by_service[service][0]] - p = build_one(compose, args, cnt) - status = parse_return_code(p, status) - if status != 0: - return status + tasks.append(asyncio.create_task(build_one(compose, args, cnt))) + else: for cnt in compose.containers: - p = build_one(compose, args, cnt) - status = parse_return_code(p, status) - if status != 0: - return status + tasks.append(asyncio.create_task(build_one(compose, args, cnt))) + + status = 0 + for t in asyncio.as_completed(tasks): + s = await t + if s is not None: + status = s return status -def create_pods(compose, args): # pylint: disable=unused-argument +async def pod_exists(compose, name): + exit_code = await compose.podman.run([], "pod", ["exists", name]) + return exit_code == 0 + + +async def create_pods(compose, args): # pylint: disable=unused-argument for pod in compose.pods: + if await pod_exists(compose, pod["name"]): + continue + podman_args = [ "create", "--name=" + pod["name"], @@ -2197,7 +2458,7 @@ def create_pods(compose, args): # pylint: disable=unused-argument ports = [ports] for i in ports: podman_args.extend(["-p", str(i)]) - compose.podman.run([], "pod", podman_args) + await compose.podman.run([], "pod", podman_args) def get_excluded(compose, args): @@ -2207,55 +2468,56 @@ def get_excluded(compose, args): for service in args.services: excluded -= compose.services[service]["_deps"] excluded.discard(service) - log("** excluding: ", excluded) + log.debug("** excluding: %s", excluded) return excluded -@cmd_run( - podman_compose, "up", "Create and start the entire stack or some of its services" -) -def compose_up(compose, args): +@cmd_run(podman_compose, "up", "Create and start the entire stack or some of its services") +async def compose_up(compose: PodmanCompose, args): proj_name = compose.project_name excluded = get_excluded(compose, args) if not args.no_build: # `podman build` does not cache, so don't always build build_args = argparse.Namespace(if_not_exists=(not args.build), **args.__dict__) - compose.commands["build"](compose, build_args) + if await compose.commands["build"](compose, build_args) != 0: + log.error("Build command failed") hashes = ( - compose.podman.output( - [], - "ps", - [ - "--filter", - f"label=io.podman.compose.project={proj_name}", - "-a", - "--format", - '{{ index .Labels "io.podman.compose.config-hash"}}', - ], + ( + await compose.podman.output( + [], + "ps", + [ + "--filter", + f"label=io.podman.compose.project={proj_name}", + "-a", + "--format", + '{{ index .Labels "io.podman.compose.config-hash"}}', + ], + ) ) .decode("utf-8") .splitlines() ) diff_hashes = [i for i in hashes if i and i != compose.yaml_hash] if args.force_recreate or len(diff_hashes): - log("recreating: ...") + log.info("recreating: ...") down_args = argparse.Namespace(**dict(args.__dict__, volumes=False)) - compose.commands["down"](compose, down_args) - log("recreating: done\n\n") + await compose.commands["down"](compose, down_args) + log.info("recreating: done\n\n") # args.no_recreate disables check for changes (which is not implemented) podman_command = "run" if args.detach and not args.no_start else "create" - create_pods(compose, args) + await create_pods(compose, args) for cnt in compose.containers: if cnt["_service"] in excluded: - log("** skipping: ", cnt["name"]) + log.debug("** skipping: %s", cnt["name"]) continue - podman_args = container_to_args(compose, cnt, detached=args.detach) - subproc = compose.podman.run([], podman_command, podman_args) - if podman_command == "run" and subproc and subproc.returncode: - compose.podman.run([], "start", [cnt["name"]]) + podman_args = await container_to_args(compose, cnt, detached=args.detach) + subproc = await compose.podman.run([], podman_command, podman_args) + if podman_command == "run" and subproc is not None: + await compose.podman.run([], "start", [cnt["name"]]) if args.no_start or args.detach or args.dry_run: return # TODO: handle already existing @@ -2265,54 +2527,61 @@ def compose_up(compose, args): if exit_code_from: args.abort_on_container_exit = True - threads = [] - max_service_length = 0 for cnt in compose.containers: curr_length = len(cnt["_service"]) - max_service_length = ( - curr_length if curr_length > max_service_length else max_service_length - ) - has_sed = os.path.isfile("/bin/sed") + max_service_length = curr_length if curr_length > max_service_length else max_service_length + + tasks = set() + + loop = asyncio.get_event_loop() + loop.add_signal_handler(signal.SIGINT, lambda: [t.cancel("User exit") for t in tasks]) + for i, cnt in enumerate(compose.containers): # Add colored service prefix to output by piping output through sed color_idx = i % len(compose.console_colors) color = compose.console_colors[color_idx] space_suffix = " " * (max_service_length - len(cnt["_service"]) + 1) - log_formatter = "s/^/{}[{}]{}|\x1B[0m\\ /;".format( - color, cnt["_service"], space_suffix - ) - log_formatter = ["sed", "-e", log_formatter] if has_sed else None + log_formatter = "{}[{}]{}|\x1b[0m".format(color, cnt["_service"], space_suffix) if cnt["_service"] in excluded: - log("** skipping: ", cnt["name"]) + log.debug("** skipping: %s", cnt["name"]) continue - # TODO: remove sleep from podman.run - obj = compose if exit_code_from == cnt["_service"] else None - thread = Thread( - target=compose.podman.run, - args=[[], "start", ["-a", cnt["name"]]], - kwargs={"obj": obj, "log_formatter": log_formatter}, - daemon=True, - name=cnt["name"], - ) - thread.start() - threads.append(thread) - time.sleep(1) - while threads: - to_remove = [] - for thread in threads: - thread.join(timeout=1.0) - if not thread.is_alive(): - to_remove.append(thread) - if args.abort_on_container_exit: - time.sleep(1) - exit_code = ( - compose.exit_code if compose.exit_code is not None else -1 - ) - sys.exit(exit_code) - for thread in to_remove: - threads.remove(thread) + tasks.add( + asyncio.create_task( + compose.podman.run([], "start", ["-a", cnt["name"]], log_formatter=log_formatter), + name=cnt["_service"], + ) + ) + + def _task_cancelled(task: Task) -> bool: + if task.cancelled(): + return True + # Task.cancelling() is new in python 3.11 + if sys.version_info >= (3, 11) and task.cancelling(): + return True + return False + + exit_code = 0 + exiting = False + while tasks: + done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) + if args.abort_on_container_exit: + if not exiting: + # If 2 containers exit at the exact same time, the cancellation of the other ones + # cause the status to overwrite. Sleeping for 1 seems to fix this and make it match + # docker-compose + await asyncio.sleep(1) + for t in tasks: + if not _task_cancelled(t): + t.cancel() + t: Task + exiting = True + for t in done: + if t.get_name() == exit_code_from: + exit_code = t.result() + + return exit_code def get_volume_names(compose, cnt): @@ -2333,12 +2602,14 @@ def get_volume_names(compose, cnt): @cmd_run(podman_compose, "down", "tear down entire stack") -def compose_down(compose, args): +async def compose_down(compose, args): excluded = get_excluded(compose, args) podman_args = [] timeout_global = getattr(args, "timeout", None) containers = list(reversed(compose.containers)) + down_tasks = [] + for cnt in containers: if cnt["_service"] in excluded: continue @@ -2349,68 +2620,70 @@ def compose_down(compose, args): timeout = str_to_seconds(timeout_str) if timeout is not None: podman_stop_args.extend(["-t", str(timeout)]) - compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]], sleep=0) + down_tasks.append( + asyncio.create_task( + compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]]), name=cnt["name"] + ) + ) + await asyncio.gather(*down_tasks) for cnt in containers: if cnt["_service"] in excluded: continue - compose.podman.run([], "rm", [cnt["name"]], sleep=0) + await compose.podman.run([], "rm", [cnt["name"]]) if args.remove_orphans: names = ( - compose.podman.output( - [], - "ps", - [ - "--filter", - f"label=io.podman.compose.project={compose.project_name}", - "-a", - "--format", - "{{ .Names }}", - ], + ( + await compose.podman.output( + [], + "ps", + [ + "--filter", + f"label=io.podman.compose.project={compose.project_name}", + "-a", + "--format", + "{{ .Names }}", + ], + ) ) .decode("utf-8") .splitlines() ) for name in names: - compose.podman.run([], "stop", [*podman_args, name], sleep=0) + await compose.podman.run([], "stop", [*podman_args, name]) for name in names: - compose.podman.run([], "rm", [name], sleep=0) + await compose.podman.run([], "rm", [name]) if args.volumes: vol_names_to_keep = set() for cnt in containers: if cnt["_service"] not in excluded: continue vol_names_to_keep.update(get_volume_names(compose, cnt)) - log("keep", vol_names_to_keep) - for volume_name in compose.podman.volume_ls(): + log.debug("keep %s", vol_names_to_keep) + for volume_name in await compose.podman.volume_ls(): if volume_name in vol_names_to_keep: continue - compose.podman.run([], "volume", ["rm", volume_name]) + await compose.podman.run([], "volume", ["rm", volume_name]) if excluded: return for pod in compose.pods: - compose.podman.run([], "pod", ["rm", pod["name"]], sleep=0) + await compose.podman.run([], "pod", ["rm", pod["name"]]) @cmd_run(podman_compose, "ps", "show status of containers") -def compose_ps(compose, args): +async def compose_ps(compose, args): proj_name = compose.project_name + ps_args = ["-a", "--filter", f"label=io.podman.compose.project={proj_name}"] if args.quiet is True: - compose.podman.run( - [], - "ps", - [ - "-a", - "--format", - "{{.ID}}", - "--filter", - f"label=io.podman.compose.project={proj_name}", - ], - ) - else: - compose.podman.run( - [], "ps", ["-a", "--filter", f"label=io.podman.compose.project={proj_name}"] - ) + ps_args.extend(["--format", "{{.ID}}"]) + elif args.format: + ps_args.extend(["--format", args.format]) + + await compose.podman.run( + [], + "ps", + ps_args, + ) @cmd_run( @@ -2418,8 +2691,8 @@ def compose_ps(compose, args): "run", "create a container similar to a service to run a one-off command", ) -def compose_run(compose, args): - create_pods(compose, args) +async def compose_run(compose, args): + await create_pods(compose, args) compose.assert_services(args.service) container_names = compose.container_names_by_service[args.service] container_name = container_names[0] @@ -2438,22 +2711,31 @@ def compose_run(compose, args): no_start=False, no_cache=False, build_arg=[], + parallel=1, + remove_orphans=True, ) ) - compose.commands["up"](compose, up_args) + await compose.commands["up"](compose, up_args) build_args = argparse.Namespace( - services=[args.service], - if_not_exists=(not args.build), - build_arg=[], - **args.__dict__, + services=[args.service], if_not_exists=(not args.build), build_arg=[], **args.__dict__ ) - compose.commands["build"](compose, build_args) + await compose.commands["build"](compose, build_args) + compose_run_update_container_from_args(compose, cnt, args) + # run podman + podman_args = await container_to_args(compose, cnt, args.detach) + if not args.detach: + podman_args.insert(1, "-i") + if args.rm: + podman_args.insert(1, "--rm") + p = await compose.podman.run([], "run", podman_args) + sys.exit(p) + + +def compose_run_update_container_from_args(compose, cnt, args): # adjust one-off container options - name0 = "{}_{}_tmp{}".format( - compose.project_name, args.service, random.randrange(0, 65536) - ) + name0 = "{}_{}_tmp{}".format(compose.project_name, args.service, random.randrange(0, 65536)) cnt["name"] = args.name or name0 if args.entrypoint: cnt["entrypoint"] = args.entrypoint @@ -2463,7 +2745,7 @@ def compose_run(compose, args): cnt["working_dir"] = args.workdir env = dict(cnt.get("environment", {})) if args.env: - additional_env_vars = dict(map(lambda each: each.split("="), args.env)) + additional_env_vars = dict(map(lambda each: each.split("=", maxsplit=1), args.env)) env.update(additional_env_vars) cnt["environment"] = env if not args.service_ports: @@ -2472,6 +2754,10 @@ def compose_run(compose, args): del cnt[k] except KeyError: pass + if args.publish: + ports = cnt.get("ports", []) + ports.extend(norm_ports(args.publish)) + cnt["ports"] = ports if args.volume: # TODO: handle volumes volumes = clone(cnt.get("volumes", None) or []) @@ -2483,22 +2769,20 @@ def compose_run(compose, args): # can't restart and --rm if args.rm and "restart" in cnt: del cnt["restart"] - # run podman - podman_args = container_to_args(compose, cnt, args.detach) - if not args.detach: - podman_args.insert(1, "-i") - if args.rm: - podman_args.insert(1, "--rm") - p = compose.podman.run([], "run", podman_args, sleep=0) - sys.exit(p.returncode) @cmd_run(podman_compose, "exec", "execute a command in a running container") -def compose_exec(compose, args): +async def compose_exec(compose, args): compose.assert_services(args.service) container_names = compose.container_names_by_service[args.service] container_name = container_names[args.index - 1] cnt = compose.container_by_name[container_name] + podman_args = compose_exec_args(cnt, container_name, args) + p = await compose.podman.run([], "exec", podman_args) + sys.exit(p) + + +def compose_exec_args(cnt, container_name, args): podman_args = ["--interactive"] if args.privileged: podman_args += ["--privileged"] @@ -2511,7 +2795,7 @@ def compose_exec(compose, args): env = dict(cnt.get("environment", {})) if args.env: additional_env_vars = dict( - map(lambda each: each.split("=") if "=" in each else (each, None), args.env) + map(lambda each: each.split("=", maxsplit=1) if "=" in each else (each, None), args.env) ) env.update(additional_env_vars) for name, value in env.items(): @@ -2519,11 +2803,10 @@ def compose_exec(compose, args): podman_args += [container_name] if args.cnt_command is not None and len(args.cnt_command) > 0: podman_args += args.cnt_command - p = compose.podman.run([], "exec", podman_args, sleep=0) - sys.exit(p.returncode) + return podman_args -def transfer_service_status(compose, args, action): +async def transfer_service_status(compose, args, action): # TODO: handle dependencies, handle creations container_names_by_service = compose.container_names_by_service if not args.services: @@ -2536,9 +2819,10 @@ def transfer_service_status(compose, args, action): targets.extend(container_names_by_service[service]) if action in ["stop", "restart"]: targets = list(reversed(targets)) - podman_args = [] timeout_global = getattr(args, "timeout", None) + tasks = [] for target in targets: + podman_args = [] if action != "start": timeout = timeout_global if timeout is None: @@ -2549,26 +2833,27 @@ def transfer_service_status(compose, args, action): timeout = str_to_seconds(timeout_str) if timeout is not None: podman_args.extend(["-t", str(timeout)]) - compose.podman.run([], action, podman_args + [target], sleep=0) + tasks.append(asyncio.create_task(compose.podman.run([], action, podman_args + [target]))) + await asyncio.gather(*tasks) @cmd_run(podman_compose, "start", "start specific services") -def compose_start(compose, args): - transfer_service_status(compose, args, "start") +async def compose_start(compose, args): + await transfer_service_status(compose, args, "start") @cmd_run(podman_compose, "stop", "stop specific services") -def compose_stop(compose, args): - transfer_service_status(compose, args, "stop") +async def compose_stop(compose, args): + await transfer_service_status(compose, args, "stop") @cmd_run(podman_compose, "restart", "restart specific services") -def compose_restart(compose, args): - transfer_service_status(compose, args, "restart") +async def compose_restart(compose, args): + await transfer_service_status(compose, args, "restart") @cmd_run(podman_compose, "logs", "show logs from services") -def compose_logs(compose, args): +async def compose_logs(compose, args): container_names_by_service = compose.container_names_by_service if not args.services and not args.latest: args.services = container_names_by_service.keys() @@ -2595,11 +2880,11 @@ def compose_logs(compose, args): podman_args.extend(["--until", args.until]) for target in targets: podman_args.append(target) - compose.podman.run([], "logs", podman_args) + await compose.podman.run([], "logs", podman_args) @cmd_run(podman_compose, "config", "displays the compose file") -def compose_config(compose, args): +async def compose_config(compose, args): if args.services: for service in compose.services: print(service) @@ -2608,7 +2893,7 @@ def compose_config(compose, args): @cmd_run(podman_compose, "port", "Prints the public port for a port binding.") -def compose_port(compose, args): +async def compose_port(compose, args): # TODO - deal with pod index compose.assert_services(args.service) containers = compose.container_names_by_service[args.service] @@ -2636,34 +2921,32 @@ def compose_port(compose, args): @cmd_run(podman_compose, "pause", "Pause all running containers") -def compose_pause(compose, args): +async def compose_pause(compose, args): container_names_by_service = compose.container_names_by_service if not args.services: args.services = container_names_by_service.keys() targets = [] for service in args.services: targets.extend(container_names_by_service[service]) - compose.podman.run([], "pause", targets) + await compose.podman.run([], "pause", targets) @cmd_run(podman_compose, "unpause", "Unpause all running containers") -def compose_unpause(compose, args): +async def compose_unpause(compose, args): container_names_by_service = compose.container_names_by_service if not args.services: args.services = container_names_by_service.keys() targets = [] for service in args.services: targets.extend(container_names_by_service[service]) - compose.podman.run([], "unpause", targets) + await compose.podman.run([], "unpause", targets) -@cmd_run( - podman_compose, "kill", "Kill one or more running containers with a specific signal" -) -def compose_kill(compose, args): +@cmd_run(podman_compose, "kill", "Kill one or more running containers with a specific signal") +async def compose_kill(compose, args): # to ensure that the user did not execute the command by mistake if not args.services and not args.all: - print( + log.fatal( "Error: you must provide at least one service name or use (--all) to kill all services" ) sys.exit() @@ -2681,15 +2964,14 @@ def compose_kill(compose, args): targets.extend(container_names_by_service[service]) for target in targets: podman_args.append(target) - compose.podman.run([], "kill", podman_args) - - if args.services: + await compose.podman.run([], "kill", podman_args) + elif args.services: targets = [] for service in args.services: targets.extend(container_names_by_service[service]) for target in targets: podman_args.append(target) - compose.podman.run([], "kill", podman_args) + await compose.podman.run([], "kill", podman_args) @cmd_run( @@ -2697,7 +2979,7 @@ def compose_kill(compose, args): "stats", "Display percentage of CPU, memory, network I/O, block I/O and PIDs for services.", ) -def compose_stats(compose, args): +async def compose_stats(compose, args): container_names_by_service = compose.container_names_by_service if not args.services: args.services = container_names_by_service.keys() @@ -2718,11 +3000,47 @@ def compose_stats(compose, args): podman_args.append(target) try: - compose.podman.run([], "stats", podman_args) + await compose.podman.run([], "stats", podman_args) except KeyboardInterrupt: pass +@cmd_run(podman_compose, "images", "List images used by the created containers") +async def compose_images(compose, args): + img_containers = [cnt for cnt in compose.containers if "image" in cnt] + data = [] + if args.quiet is True: + for img in img_containers: + name = img["name"] + output = await compose.podman.output([], "images", ["--quiet", img["image"]]) + data.append(output.decode("utf-8").split()) + else: + data.append(["CONTAINER", "REPOSITORY", "TAG", "IMAGE ID", "SIZE", ""]) + for img in img_containers: + name = img["name"] + output = await compose.podman.output( + [], + "images", + [ + "--format", + "table " + name + " {{.Repository}} {{.Tag}} {{.ID}} {{.Size}}", + "-n", + img["image"], + ], + ) + data.append(output.decode("utf-8").split()) + + # Determine the maximum length of each column + column_widths = [max(map(len, column)) for column in zip(*data)] + + # Print each row + for row in data: + # Format each cell using the maximum column width + formatted_row = [cell.ljust(width) for cell, width in zip(row, column_widths)] + formatted_row[-2:] = ["".join(formatted_row[-2:]).strip()] + print("\t".join(formatted_row)) + + ################### # command arguments parsing ################### @@ -2753,17 +3071,13 @@ def compose_up_parse(parser): help="Detached mode: Run container in the background, print new container name. \ Incompatible with --abort-on-container-exit.", ) - parser.add_argument( - "--no-color", action="store_true", help="Produce monochrome output." - ) + parser.add_argument("--no-color", action="store_true", help="Produce monochrome output.") parser.add_argument( "--quiet-pull", action="store_true", help="Pull without printing progress information.", ) - parser.add_argument( - "--no-deps", action="store_true", help="Don't start linked services." - ) + parser.add_argument("--no-deps", action="store_true", help="Don't start linked services.") parser.add_argument( "--force-recreate", action="store_true", @@ -2777,7 +3091,8 @@ def compose_up_parse(parser): parser.add_argument( "--no-recreate", action="store_true", - help="If containers already exist, don't recreate them. Incompatible with --force-recreate and -V.", + help="If containers already exist, don't recreate them. Incompatible with --force-recreate " + "and -V.", ) parser.add_argument( "--no-build", @@ -2802,8 +3117,8 @@ def compose_up_parse(parser): "--timeout", type=int, default=None, - help="Use this timeout in seconds for container shutdown when attached or when containers are already running. \ - (default: 10)", + help="Use this timeout in seconds for container shutdown when attached or when containers " + "are already running. (default: 10)", ) parser.add_argument( "-V", @@ -2820,14 +3135,16 @@ def compose_up_parse(parser): "--scale", metavar="SERVICE=NUM", action="append", - help="Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.", + help="Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if " + "present.", ) parser.add_argument( "--exit-code-from", metavar="SERVICE", type=str, default=None, - help="Return the exit code of the selected service container. Implies --abort-on-container-exit.", + help="Return the exit code of the selected service container. " + "Implies --abort-on-container-exit.", ) @@ -2859,9 +3176,7 @@ def compose_run_parse(parser): action="store_true", help="Detached mode: Run container in the background, print new container name.", ) - parser.add_argument( - "--name", type=str, default=None, help="Assign a name to the container" - ) + parser.add_argument("--name", type=str, default=None, help="Assign a name to the container") parser.add_argument( "--entrypoint", type=str, @@ -2885,9 +3200,7 @@ def compose_run_parse(parser): parser.add_argument( "-u", "--user", type=str, default=None, help="Run as specified username or uid" ) - parser.add_argument( - "--no-deps", action="store_true", help="Don't start linked services" - ) + parser.add_argument("--no-deps", action="store_true", help="Don't start linked services") parser.add_argument( "--rm", action="store_true", @@ -3013,21 +3326,15 @@ def compose_logs_parse(parser): action="store_true", help="Output the container name in the log", ) - parser.add_argument( - "--since", help="Show logs since TIMESTAMP", type=str, default=None - ) - parser.add_argument( - "-t", "--timestamps", action="store_true", help="Show timestamps." - ) + parser.add_argument("--since", help="Show logs since TIMESTAMP", type=str, default=None) + parser.add_argument("-t", "--timestamps", action="store_true", help="Show timestamps.") parser.add_argument( "--tail", help="Number of lines to show from the end of the logs for each " "container.", type=str, default="all", ) - parser.add_argument( - "--until", help="Show logs until TIMESTAMP", type=str, default=None - ) + parser.add_argument("--until", help="Show logs until TIMESTAMP", type=str, default=None) parser.add_argument( "services", metavar="services", nargs="*", default=None, help="service names" ) @@ -3052,9 +3359,7 @@ def compose_pull_parse(parser): default=False, help="Also pull unprefixed images for services which have a build section", ) - parser.add_argument( - "services", metavar="services", nargs="*", help="services to pull" - ) + parser.add_argument("services", metavar="services", nargs="*", help="services to pull") @cmd_parse(podman_compose, "push") @@ -3064,16 +3369,12 @@ def compose_push_parse(parser): action="store_true", help="Push what it can and ignores images with push failures. (not implemented)", ) - parser.add_argument( - "services", metavar="services", nargs="*", help="services to push" - ) + parser.add_argument("services", metavar="services", nargs="*", help="services to push") @cmd_parse(podman_compose, "ps") def compose_ps_parse(parser): - parser.add_argument( - "-q", "--quiet", help="Only display container IDs", action="store_true" - ) + parser.add_argument("-q", "--quiet", help="Only display container IDs", action="store_true") @cmd_parse(podman_compose, ["build", "up"]) @@ -3085,7 +3386,8 @@ def compose_build_up_parse(parser): ) parser.add_argument( "--pull-always", - help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", + help="attempt to pull a newer version of the image, Raise an error even if the image is " + "present locally.", action="store_true", ) parser.add_argument( @@ -3173,6 +3475,11 @@ def compose_kill_parse(parser): ) +@cmd_parse(podman_compose, "images") +def compose_images_parse(parser): + parser.add_argument("-q", "--quiet", help="Only display images IDs", action="store_true") + + @cmd_parse(podman_compose, ["stats"]) def compose_stats_parse(parser): parser.add_argument( @@ -3184,12 +3491,6 @@ def compose_stats_parse(parser): type=int, help="Time in seconds between stats reports (default 5)", ) - parser.add_argument( - "-f", - "--format", - type=str, - help="Pretty-print container statistics to JSON or using a Go template", - ) parser.add_argument( "--no-reset", help="Disable resetting the screen between intervals", @@ -3202,8 +3503,22 @@ def compose_stats_parse(parser): ) +@cmd_parse(podman_compose, ["ps", "stats"]) +def compose_format_parse(parser): + parser.add_argument( + "-f", + "--format", + type=str, + help="Pretty-print container statistics to JSON or using a Go template", + ) + + +async def async_main(): + await podman_compose.run() + + def main(): - podman_compose.run() + asyncio.run(async_main()) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..b6a8360 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,15 @@ +[tool.ruff] +line-length = 100 +target-version = "py38" + +[tool.ruff.lint] +select = ["W", "E", "F", "I"] +ignore = [ +] + +[tool.ruff.lint.isort] +force-single-line = true + +[tool.ruff.format] +preview = true # needed for quote-style +quote-style = "preserve" diff --git a/pytests/test_can_merge_build.py b/pytests/test_can_merge_build.py deleted file mode 100644 index d08081a..0000000 --- a/pytests/test_can_merge_build.py +++ /dev/null @@ -1,168 +0,0 @@ -import copy -import os -import argparse -import yaml -from podman_compose import normalize_service, PodmanCompose - - -test_cases_simple = [ - ({"test": "test"}, {"test": "test"}), - ({"build": "."}, {"build": {"context": "."}}), - ({"build": "./dir-1"}, {"build": {"context": "./dir-1"}}), - ({"build": {"context": "./dir-1"}}, {"build": {"context": "./dir-1"}}), - ( - {"build": {"dockerfile": "dockerfile-1"}}, - {"build": {"dockerfile": "dockerfile-1"}}, - ), - ( - {"build": {"context": "./dir-1", "dockerfile": "dockerfile-1"}}, - {"build": {"context": "./dir-1", "dockerfile": "dockerfile-1"}}, - ), -] - - -def test_normalize_service_simple(): - for test_case, expected in copy.deepcopy(test_cases_simple): - test_original = copy.deepcopy(test_case) - test_case = normalize_service(test_case) - test_result = expected == test_case - if not test_result: - print("test: ", test_original) - print("expected: ", expected) - print("actual: ", test_case) - assert test_result - - -test_cases_sub_dir = [ - ({"test": "test"}, {"test": "test"}), - ({"build": "."}, {"build": {"context": "./sub_dir/."}}), - ({"build": "./dir-1"}, {"build": {"context": "./sub_dir/dir-1"}}), - ({"build": {"context": "./dir-1"}}, {"build": {"context": "./sub_dir/dir-1"}}), - ( - {"build": {"dockerfile": "dockerfile-1"}}, - {"build": {"context": "./sub_dir", "dockerfile": "dockerfile-1"}}, - ), - ( - {"build": {"context": "./dir-1", "dockerfile": "dockerfile-1"}}, - {"build": {"context": "./sub_dir/dir-1", "dockerfile": "dockerfile-1"}}, - ), -] - - -def test_normalize_service_with_sub_dir(): - for test_case, expected in copy.deepcopy(test_cases_sub_dir): - test_original = copy.deepcopy(test_case) - test_case = normalize_service(test_case, sub_dir="./sub_dir") - test_result = expected == test_case - if not test_result: - print("test: ", test_original) - print("expected: ", expected) - print("actual: ", test_case) - assert test_result - - -test_cases_merges = [ - ({}, {}, {}), - ({}, {"test": "test"}, {"test": "test"}), - ({"test": "test"}, {}, {"test": "test"}), - ({"test": "test-1"}, {"test": "test-2"}, {"test": "test-2"}), - ({}, {"build": "."}, {"build": {"context": "."}}), - ({"build": "."}, {}, {"build": {"context": "."}}), - ({"build": "./dir-1"}, {"build": "./dir-2"}, {"build": {"context": "./dir-2"}}), - ({}, {"build": {"context": "./dir-1"}}, {"build": {"context": "./dir-1"}}), - ({"build": {"context": "./dir-1"}}, {}, {"build": {"context": "./dir-1"}}), - ( - {"build": {"context": "./dir-1"}}, - {"build": {"context": "./dir-2"}}, - {"build": {"context": "./dir-2"}}, - ), - ( - {}, - {"build": {"dockerfile": "dockerfile-1"}}, - {"build": {"dockerfile": "dockerfile-1"}}, - ), - ( - {"build": {"dockerfile": "dockerfile-1"}}, - {}, - {"build": {"dockerfile": "dockerfile-1"}}, - ), - ( - {"build": {"dockerfile": "./dockerfile-1"}}, - {"build": {"dockerfile": "./dockerfile-2"}}, - {"build": {"dockerfile": "./dockerfile-2"}}, - ), - ( - {"build": {"dockerfile": "./dockerfile-1"}}, - {"build": {"context": "./dir-2"}}, - {"build": {"dockerfile": "./dockerfile-1", "context": "./dir-2"}}, - ), - ( - {"build": {"dockerfile": "./dockerfile-1", "context": "./dir-1"}}, - {"build": {"dockerfile": "./dockerfile-2", "context": "./dir-2"}}, - {"build": {"dockerfile": "./dockerfile-2", "context": "./dir-2"}}, - ), - ( - {"build": {"dockerfile": "./dockerfile-1"}}, - {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, - {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, - ), - ( - {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, - {"build": {"dockerfile": "./dockerfile-1"}}, - {"build": {"dockerfile": "./dockerfile-1", "args": ["ENV1=1"]}}, - ), - ( - {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, - {"build": {"dockerfile": "./dockerfile-1", "args": ["ENV2=2"]}}, - {"build": {"dockerfile": "./dockerfile-1", "args": ["ENV1=1", "ENV2=2"]}}, - ), -] - - -def test__parse_compose_file_when_multiple_composes() -> None: - for test_input, test_override, expected_result in copy.deepcopy(test_cases_merges): - compose_test_1 = {"services": {"test-service": test_input}} - compose_test_2 = {"services": {"test-service": test_override}} - dump_yaml(compose_test_1, "test-compose-1.yaml") - dump_yaml(compose_test_2, "test-compose-2.yaml") - - podman_compose = PodmanCompose() - set_args(podman_compose, ["test-compose-1.yaml", "test-compose-2.yaml"]) - - podman_compose._parse_compose_file() # pylint: disable=protected-access - - actual_compose = {} - if podman_compose.services: - podman_compose.services["test-service"].pop("_deps") - actual_compose = podman_compose.services["test-service"] - if actual_compose != expected_result: - print("compose: ", test_input) - print("override: ", test_override) - print("expected: ", expected_result) - print("actual: ", actual_compose) - - compose_expected = expected_result - - assert compose_expected == actual_compose - - -def set_args(podman_compose: PodmanCompose, file_names: list[str]) -> None: - podman_compose.global_args = argparse.Namespace() - podman_compose.global_args.file = file_names - podman_compose.global_args.project_name = None - podman_compose.global_args.env_file = None - podman_compose.global_args.profile = [] - podman_compose.global_args.in_pod = True - podman_compose.global_args.no_normalize = True - - -def dump_yaml(compose: dict, name: str) -> None: - with open(name, "w", encoding="utf-8") as outfile: - yaml.safe_dump(compose, outfile, default_flow_style=False) - - -def test_clean_test_yamls() -> None: - test_files = ["test-compose-1.yaml", "test-compose-2.yaml"] - for file in test_files: - if os.path.exists(file): - os.remove(file) diff --git a/pytests/test_can_merge_cmd_ent.py b/pytests/test_can_merge_cmd_ent.py deleted file mode 100644 index 2120632..0000000 --- a/pytests/test_can_merge_cmd_ent.py +++ /dev/null @@ -1,122 +0,0 @@ -import copy -import os -import argparse -import yaml -from podman_compose import normalize_service, PodmanCompose - -test_keys = ["command", "entrypoint"] - -test_cases_normalise_pre_merge = [ - ({"$$$": []}, {"$$$": []}), - ({"$$$": ["sh"]}, {"$$$": ["sh"]}), - ({"$$$": ["sh", "-c", "date"]}, {"$$$": ["sh", "-c", "date"]}), - ({"$$$": "sh"}, {"$$$": ["sh"]}), - ({"$$$": "sleep infinity"}, {"$$$": ["sleep", "infinity"]}), - ( - {"$$$": "bash -c 'sleep infinity'"}, - {"$$$": ["bash", "-c", "sleep infinity"]}, - ), -] - -test_cases_merges = [ - ({}, {"$$$": []}, {"$$$": []}), - ({"$$$": []}, {}, {"$$$": []}), - ({"$$$": []}, {"$$$": "sh-2"}, {"$$$": ["sh-2"]}), - ({"$$$": "sh-2"}, {"$$$": []}, {"$$$": []}), - ({}, {"$$$": "sh"}, {"$$$": ["sh"]}), - ({"$$$": "sh"}, {}, {"$$$": ["sh"]}), - ({"$$$": "sh-1"}, {"$$$": "sh-2"}, {"$$$": ["sh-2"]}), - ({"$$$": ["sh-1"]}, {"$$$": "sh-2"}, {"$$$": ["sh-2"]}), - ({"$$$": "sh-1"}, {"$$$": ["sh-2"]}, {"$$$": ["sh-2"]}), - ({"$$$": "sh-1"}, {"$$$": ["sh-2", "sh-3"]}, {"$$$": ["sh-2", "sh-3"]}), - ({"$$$": ["sh-1"]}, {"$$$": ["sh-2", "sh-3"]}, {"$$$": ["sh-2", "sh-3"]}), - ({"$$$": ["sh-1", "sh-2"]}, {"$$$": ["sh-3", "sh-4"]}, {"$$$": ["sh-3", "sh-4"]}), - ({}, {"$$$": ["sh-3", "sh 4"]}, {"$$$": ["sh-3", "sh 4"]}), - ({"$$$": "sleep infinity"}, {"$$$": "sh"}, {"$$$": ["sh"]}), - ({"$$$": "sh"}, {"$$$": "sleep infinity"}, {"$$$": ["sleep", "infinity"]}), - ( - {}, - {"$$$": "bash -c 'sleep infinity'"}, - {"$$$": ["bash", "-c", "sleep infinity"]}, - ), -] - - -def template_to_expression(base, override, expected, key): - base_copy = copy.deepcopy(base) - override_copy = copy.deepcopy(override) - expected_copy = copy.deepcopy(expected) - - expected_copy[key] = expected_copy.pop("$$$") - if "$$$" in base: - base_copy[key] = base_copy.pop("$$$") - if "$$$" in override: - override_copy[key] = override_copy.pop("$$$") - return base_copy, override_copy, expected_copy - - -def test_normalize_service(): - for test_input_template, expected_template in test_cases_normalise_pre_merge: - for key in test_keys: - test_input, _, expected = template_to_expression( - test_input_template, {}, expected_template, key - ) - test_input = normalize_service(test_input) - test_result = expected == test_input - if not test_result: - print("base_template: ", test_input_template) - print("expected: ", expected) - print("actual: ", test_input) - assert test_result - - -def test__parse_compose_file_when_multiple_composes() -> None: - for base_template, override_template, expected_template in copy.deepcopy( - test_cases_merges - ): - for key in test_keys: - base, override, expected = template_to_expression( - base_template, override_template, expected_template, key - ) - compose_test_1 = {"services": {"test-service": base}} - compose_test_2 = {"services": {"test-service": override}} - dump_yaml(compose_test_1, "test-compose-1.yaml") - dump_yaml(compose_test_2, "test-compose-2.yaml") - - podman_compose = PodmanCompose() - set_args(podman_compose, ["test-compose-1.yaml", "test-compose-2.yaml"]) - - podman_compose._parse_compose_file() # pylint: disable=protected-access - - actual = {} - if podman_compose.services: - podman_compose.services["test-service"].pop("_deps") - actual = podman_compose.services["test-service"] - if actual != expected: - print("compose: ", base) - print("override: ", override) - print("result: ", expected) - - assert expected == actual - - -def set_args(podman_compose: PodmanCompose, file_names: list[str]) -> None: - podman_compose.global_args = argparse.Namespace() - podman_compose.global_args.file = file_names - podman_compose.global_args.project_name = None - podman_compose.global_args.env_file = None - podman_compose.global_args.profile = [] - podman_compose.global_args.in_pod = True - podman_compose.global_args.no_normalize = None - - -def dump_yaml(compose: dict, name: str) -> None: - with open(name, "w", encoding="utf-8") as outfile: - yaml.safe_dump(compose, outfile, default_flow_style=False) - - -def test_clean_test_yamls() -> None: - test_files = ["test-compose-1.yaml", "test-compose-2.yaml"] - for file in test_files: - if os.path.exists(file): - os.remove(file) diff --git a/pytests/test_normalize_final_build.py b/pytests/test_normalize_final_build.py deleted file mode 100644 index e281174..0000000 --- a/pytests/test_normalize_final_build.py +++ /dev/null @@ -1,298 +0,0 @@ -# pylint: disable=protected-access - -import argparse -import copy -import os -import yaml -from podman_compose import ( - normalize_service, - normalize, - normalize_final, - normalize_service_final, - PodmanCompose, -) - -cwd = os.path.abspath(".") -test_cases_simple_normalization = [ - ({"image": "test-image"}, {"image": "test-image"}), - ( - {"build": "."}, - { - "build": {"context": cwd, "dockerfile": "Dockerfile"}, - }, - ), - ( - {"build": "../relative"}, - { - "build": { - "context": os.path.normpath(os.path.join(cwd, "../relative")), - "dockerfile": "Dockerfile", - }, - }, - ), - ( - {"build": "./relative"}, - { - "build": { - "context": os.path.normpath(os.path.join(cwd, "./relative")), - "dockerfile": "Dockerfile", - }, - }, - ), - ( - {"build": "/workspace/absolute"}, - { - "build": { - "context": "/workspace/absolute", - "dockerfile": "Dockerfile", - }, - }, - ), - ( - { - "build": { - "dockerfile": "Dockerfile", - }, - }, - { - "build": { - "context": cwd, - "dockerfile": "Dockerfile", - }, - }, - ), - ( - { - "build": { - "context": ".", - }, - }, - { - "build": { - "context": cwd, - "dockerfile": "Dockerfile", - }, - }, - ), - ( - { - "build": {"context": "../", "dockerfile": "test-dockerfile"}, - }, - { - "build": { - "context": os.path.normpath(os.path.join(cwd, "../")), - "dockerfile": "test-dockerfile", - }, - }, - ), - ( - { - "build": {"context": ".", "dockerfile": "./dev/test-dockerfile"}, - }, - { - "build": { - "context": cwd, - "dockerfile": "./dev/test-dockerfile", - }, - }, - ), -] - - -# -# [service.build] is normalised after merges -# -def test_normalize_service_final_returns_absolute_path_in_context() -> None: - project_dir = cwd - for test_input, expected_service in copy.deepcopy(test_cases_simple_normalization): - actual_service = normalize_service_final(test_input, project_dir) - assert expected_service == actual_service - - -def test_normalize_returns_absolute_path_in_context() -> None: - project_dir = cwd - for test_input, expected_result in copy.deepcopy(test_cases_simple_normalization): - compose_test = {"services": {"test-service": test_input}} - compose_expected = {"services": {"test-service": expected_result}} - actual_compose = normalize_final(compose_test, project_dir) - assert compose_expected == actual_compose - - -# -# running full parse over single compose files -# -def test__parse_compose_file_when_single_compose() -> None: - for test_input, expected_result in copy.deepcopy(test_cases_simple_normalization): - compose_test = {"services": {"test-service": test_input}} - dump_yaml(compose_test, "test-compose.yaml") - - podman_compose = PodmanCompose() - set_args(podman_compose, ["test-compose.yaml"], no_normalize=None) - - podman_compose._parse_compose_file() - - actual_compose = {} - if podman_compose.services: - podman_compose.services["test-service"].pop("_deps") - actual_compose = podman_compose.services["test-service"] - if actual_compose != expected_result: - print("compose: ", test_input) - print("result: ", expected_result) - - assert expected_result == actual_compose - - -test_cases_with_merges = [ - ( - {}, - {"build": "."}, - {"build": {"context": cwd, "dockerfile": "Dockerfile"}}, - ), - ( - {"build": "."}, - {}, - {"build": {"context": cwd, "dockerfile": "Dockerfile"}}, - ), - ( - {"build": "/workspace/absolute"}, - {"build": "./relative"}, - { - "build": { - "context": os.path.normpath(os.path.join(cwd, "./relative")), - "dockerfile": "Dockerfile", - } - }, - ), - ( - {"build": "./relative"}, - {"build": "/workspace/absolute"}, - {"build": {"context": "/workspace/absolute", "dockerfile": "Dockerfile"}}, - ), - ( - {"build": "./relative"}, - {"build": "/workspace/absolute"}, - {"build": {"context": "/workspace/absolute", "dockerfile": "Dockerfile"}}, - ), - ( - {"build": {"dockerfile": "test-dockerfile"}}, - {}, - {"build": {"context": cwd, "dockerfile": "test-dockerfile"}}, - ), - ( - {}, - {"build": {"dockerfile": "test-dockerfile"}}, - {"build": {"context": cwd, "dockerfile": "test-dockerfile"}}, - ), - ( - {}, - {"build": {"dockerfile": "test-dockerfile"}}, - {"build": {"context": cwd, "dockerfile": "test-dockerfile"}}, - ), - ( - {"build": {"dockerfile": "test-dockerfile-1"}}, - {"build": {"dockerfile": "test-dockerfile-2"}}, - {"build": {"context": cwd, "dockerfile": "test-dockerfile-2"}}, - ), - ( - {"build": "/workspace/absolute"}, - {"build": {"dockerfile": "test-dockerfile"}}, - {"build": {"context": "/workspace/absolute", "dockerfile": "test-dockerfile"}}, - ), - ( - {"build": {"dockerfile": "test-dockerfile"}}, - {"build": "/workspace/absolute"}, - {"build": {"context": "/workspace/absolute", "dockerfile": "test-dockerfile"}}, - ), - ( - {"build": {"dockerfile": "./test-dockerfile-1"}}, - {"build": {"dockerfile": "./test-dockerfile-2", "args": ["ENV1=1"]}}, - { - "build": { - "context": cwd, - "dockerfile": "./test-dockerfile-2", - "args": ["ENV1=1"], - } - }, - ), - ( - {"build": {"dockerfile": "./test-dockerfile-1", "args": ["ENV1=1"]}}, - {"build": {"dockerfile": "./test-dockerfile-2"}}, - { - "build": { - "context": cwd, - "dockerfile": "./test-dockerfile-2", - "args": ["ENV1=1"], - } - }, - ), - ( - {"build": {"dockerfile": "./test-dockerfile-1", "args": ["ENV1=1"]}}, - {"build": {"dockerfile": "./test-dockerfile-2", "args": ["ENV2=2"]}}, - { - "build": { - "context": cwd, - "dockerfile": "./test-dockerfile-2", - "args": ["ENV1=1", "ENV2=2"], - } - }, - ), -] - - -# -# running full parse over merged -# -def test__parse_compose_file_when_multiple_composes() -> None: - for test_input, test_override, expected_result in copy.deepcopy( - test_cases_with_merges - ): - compose_test_1 = {"services": {"test-service": test_input}} - compose_test_2 = {"services": {"test-service": test_override}} - dump_yaml(compose_test_1, "test-compose-1.yaml") - dump_yaml(compose_test_2, "test-compose-2.yaml") - - podman_compose = PodmanCompose() - set_args( - podman_compose, - ["test-compose-1.yaml", "test-compose-2.yaml"], - no_normalize=None, - ) - - podman_compose._parse_compose_file() - - actual_compose = {} - if podman_compose.services: - podman_compose.services["test-service"].pop("_deps") - actual_compose = podman_compose.services["test-service"] - if actual_compose != expected_result: - print("compose: ", test_input) - print("override: ", test_override) - print("result: ", expected_result) - compose_expected = expected_result - - assert compose_expected == actual_compose - - -def set_args( - podman_compose: PodmanCompose, file_names: list[str], no_normalize: bool -) -> None: - podman_compose.global_args = argparse.Namespace() - podman_compose.global_args.file = file_names - podman_compose.global_args.project_name = None - podman_compose.global_args.env_file = None - podman_compose.global_args.profile = [] - podman_compose.global_args.in_pod = True - podman_compose.global_args.no_normalize = no_normalize - - -def dump_yaml(compose: dict, name: str) -> None: - # Path(Path.cwd()/"subdirectory").mkdir(parents=True, exist_ok=True) - with open(name, "w", encoding="utf-8") as outfile: - yaml.safe_dump(compose, outfile, default_flow_style=False) - - -def test_clean_test_yamls() -> None: - test_files = ["test-compose-1.yaml", "test-compose-2.yaml", "test-compose.yaml"] - for file in test_files: - if os.path.exists(file): - os.remove(file) diff --git a/pytests/test_volumes.py b/pytests/test_volumes.py deleted file mode 100644 index 7f9d698..0000000 --- a/pytests/test_volumes.py +++ /dev/null @@ -1,21 +0,0 @@ -# pylint: disable=redefined-outer-name -import pytest - -from podman_compose import parse_short_mount - - -@pytest.fixture -def multi_propagation_mount_str(): - return "/foo/bar:/baz:U,Z" - - -def test_parse_short_mount_multi_propagation(multi_propagation_mount_str): - expected = { - "type": "bind", - "source": "/foo/bar", - "target": "/baz", - "bind": { - "propagation": "U,Z", - }, - } - assert parse_short_mount(multi_propagation_mount_str, "/") == expected diff --git a/setup.py b/setup.py index 5222c14..1417b52 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + import os + from setuptools import setup try: - README = open( - os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8" - ).read() + README = open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8").read() except: # noqa: E722 # pylint: disable=bare-except README = "" @@ -39,19 +40,10 @@ setup( "pyyaml", "python-dotenv", ], - extras_require={ - "devel": [ - "flake8", - "black", - "pylint", - "pre-commit", - ] - } + extras_require={"devel": ["ruff", "pre-commit", "coverage", "parameterized"]}, # test_suite='tests', # tests_require=[ # 'coverage', - # 'pytest-cov', - # 'pytest', # 'tox', # ] ) diff --git a/test-requirements.txt b/test-requirements.txt index 5a20426..edafb76 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,9 +1,34 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +-e . +coverage==7.4.3 +parameterized==0.9.0 +pytest==8.0.2 +tox==4.13.0 +ruff==0.3.1 +pylint==3.1.0 -coverage -pytest-cov -pytest -tox -black +# The packages below are transitive dependencies of the packages above and are included here +# to make testing reproducible. +# To refresh, create a new virtualenv and do: +# pip install -r requirements.txt -r test-requirements.txt +# pip freeze > test-requirements.txt +# and edit test-requirements.txt to add this comment + +astroid==3.1.0 +cachetools==5.3.3 +chardet==5.2.0 +colorama==0.4.6 +dill==0.3.8 +distlib==0.3.8 +filelock==3.13.1 +iniconfig==2.0.0 +isort==5.13.2 +mccabe==0.7.0 +packaging==23.2 +platformdirs==4.2.0 +pluggy==1.4.0 +pyproject-api==1.6.1 +python-dotenv==1.0.1 +PyYAML==6.0.1 +requests +tomlkit==0.12.4 +virtualenv==20.25.1 diff --git a/tests/build/README.md b/tests/build/README.md deleted file mode 100644 index e67b421..0000000 --- a/tests/build/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Test podman-compose with build - -``` -podman-compose build -podman-compose up -d -curl http://localhost:8080/index.txt -curl http://localhost:8000/index.txt -podman inspect my-busybox-httpd2 -podman-compose down -``` - -expected output would be something like - -``` -2019-09-03T15:16:38+0000 -ALT buildno=2 port 8000 2019-09-03T15:16:38+0000 -{ -... -} -``` - -as you can see we were able to override buildno to be 2 instead of 1, -and httpd_port to 8000. - -NOTE: build labels are not passed to `podman build` diff --git a/tests/build_fail/README.md b/tests/build_fail/README.md deleted file mode 100644 index 5d5b1ed..0000000 --- a/tests/build_fail/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Test podman-compose with build (fail scenario) - -```shell -podman-compose build || echo $? -``` - -expected output would be something like - -``` -STEP 1/3: FROM busybox -STEP 2/3: RUN this_command_does_not_exist -/bin/sh: this_command_does_not_exist: not found -Error: building at STEP "RUN this_command_does_not_exist": while running runtime: exit status 127 - -exit code: 127 -``` - -Expected `podman-compose` exit code: -```shell -echo $? -127 -``` diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index 05deac2..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,26 +0,0 @@ -"""conftest.py - -Defines global pytest fixtures available to all tests. -""" -# pylint: disable=redefined-outer-name -from pathlib import Path -import os -import pytest - - -@pytest.fixture -def base_path(): - """Returns the base path for the project""" - return Path(__file__).parent.parent - - -@pytest.fixture -def test_path(base_path): - """Returns the path to the tests directory""" - return os.path.join(base_path, "tests") - - -@pytest.fixture -def podman_compose_path(base_path): - """Returns the path to the podman compose script""" - return os.path.join(base_path, "podman_compose.py") diff --git a/tests/deps/README.md b/tests/deps/README.md deleted file mode 100644 index bde213a..0000000 --- a/tests/deps/README.md +++ /dev/null @@ -1,4 +0,0 @@ - -``` -podman-compose run --rm sleep /bin/sh -c 'wget -O - http://localhost:8000/hosts' -``` diff --git a/tests/deps/docker-compose.yaml b/tests/deps/docker-compose.yaml deleted file mode 100644 index 0f06bbd..0000000 --- a/tests/deps/docker-compose.yaml +++ /dev/null @@ -1,24 +0,0 @@ -version: "3.7" -services: - web: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] - tmpfs: - - /run - - /tmp - sleep: - image: busybox - command: ["/bin/busybox", "sh", "-c", "sleep 3600"] - depends_on: "web" - tmpfs: - - /run - - /tmp - sleep2: - image: busybox - command: ["/bin/busybox", "sh", "-c", "sleep 3600"] - depends_on: - - sleep - tmpfs: - - /run - - /tmp - diff --git a/tests/env-file-tests/README.md b/tests/env-file-tests/README.md deleted file mode 100644 index 0e4614e..0000000 --- a/tests/env-file-tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -running the following commands should always give podman-rocks-123 - -``` -podman-compose -f project/container-compose.yaml --env-file env-files/project-1.env up -``` - -``` -podman-compose -f $(pwd)/project/container-compose.yaml --env-file $(pwd)/env-files/project-1.env up -``` diff --git a/tests/env-file-tests/env-files/project-1.env b/tests/env-file-tests/env-files/project-1.env deleted file mode 100644 index f49c621..0000000 --- a/tests/env-file-tests/env-files/project-1.env +++ /dev/null @@ -1 +0,0 @@ -ZZVAR1=podman-rocks-123 diff --git a/tests/exit-from/docker-compose.yaml b/tests/exit-from/docker-compose.yaml deleted file mode 100644 index cfc3897..0000000 --- a/tests/exit-from/docker-compose.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: "3" -services: - too_long: - image: busybox - command: ["/bin/busybox", "sh", "-c", "sleep 3600; exit 0"] - tmpfs: - - /run - - /tmp - sh1: - image: busybox - command: ["/bin/busybox", "sh", "-c", "sleep 5; exit 1"] - tmpfs: - - /run - - /tmp - sh2: - image: busybox - command: ["/bin/busybox", "sh", "-c", "sleep 5; exit 2"] - tmpfs: - - /run - - /tmp - diff --git a/tests/extends_w_file_subdir/sub/docker/example/Dockerfile b/tests/extends_w_file_subdir/sub/docker/example/Dockerfile deleted file mode 100644 index 14b3ba5..0000000 --- a/tests/extends_w_file_subdir/sub/docker/example/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM busybox as base diff --git a/tests/include/docker-compose.base.yaml b/tests/include/docker-compose.base.yaml deleted file mode 100644 index e356a14..0000000 --- a/tests/include/docker-compose.base.yaml +++ /dev/null @@ -1,7 +0,0 @@ -version: '3.6' - -services: - web: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", ".", "-p", "8003"] - diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..dbd5d2f --- /dev/null +++ b/tests/integration/__init__.py @@ -0,0 +1,12 @@ +import os +import subprocess + + +def create_base_test_image(): + subprocess.check_call( + ['podman', 'build', '-t', 'nopush/podman-compose-test', '.'], + cwd=os.path.join(os.path.dirname(__file__), "base_image"), + ) + + +create_base_test_image() diff --git a/tests/integration/additional_contexts/README.md b/tests/integration/additional_contexts/README.md new file mode 100644 index 0000000..a64510c --- /dev/null +++ b/tests/integration/additional_contexts/README.md @@ -0,0 +1,14 @@ +# Test podman-compose with build.additional_contexts + +``` +podman-compose build +podman-compose up +podman-compose down +``` + +expected output would be + +``` +[dict] | Data for dict +[list] | Data for list +``` diff --git a/tests/integration/additional_contexts/data_for_dict/data.txt b/tests/integration/additional_contexts/data_for_dict/data.txt new file mode 100644 index 0000000..068b08d --- /dev/null +++ b/tests/integration/additional_contexts/data_for_dict/data.txt @@ -0,0 +1 @@ +Data for dict diff --git a/tests/integration/additional_contexts/data_for_list/data.txt b/tests/integration/additional_contexts/data_for_list/data.txt new file mode 100644 index 0000000..a233ee4 --- /dev/null +++ b/tests/integration/additional_contexts/data_for_list/data.txt @@ -0,0 +1 @@ +Data for list diff --git a/tests/integration/additional_contexts/project/Dockerfile b/tests/integration/additional_contexts/project/Dockerfile new file mode 100644 index 0000000..7e75b5d --- /dev/null +++ b/tests/integration/additional_contexts/project/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox +COPY --from=data data.txt /data/data.txt +CMD ["busybox", "cat", "/data/data.txt"] diff --git a/tests/integration/additional_contexts/project/docker-compose.yml b/tests/integration/additional_contexts/project/docker-compose.yml new file mode 100644 index 0000000..1282723 --- /dev/null +++ b/tests/integration/additional_contexts/project/docker-compose.yml @@ -0,0 +1,12 @@ +version: "3.7" +services: + dict: + build: + context: . + additional_contexts: + data: ../data_for_dict + list: + build: + context: . + additional_contexts: + - data=../data_for_list diff --git a/tests/integration/base_image/Dockerfile b/tests/integration/base_image/Dockerfile new file mode 100644 index 0000000..5fd76c3 --- /dev/null +++ b/tests/integration/base_image/Dockerfile @@ -0,0 +1,6 @@ +FROM docker.io/library/debian:bookworm-slim +RUN apt-get update \ + && apt-get install -y \ + dumb-init \ + busybox \ + wget diff --git a/tests/build/context/Dockerfile b/tests/integration/build/context/Dockerfile similarity index 100% rename from tests/build/context/Dockerfile rename to tests/integration/build/context/Dockerfile diff --git a/tests/build/context/Dockerfile-alt b/tests/integration/build/context/Dockerfile-alt similarity index 100% rename from tests/build/context/Dockerfile-alt rename to tests/integration/build/context/Dockerfile-alt diff --git a/tests/build/docker-compose.yml b/tests/integration/build/docker-compose.yml similarity index 100% rename from tests/build/docker-compose.yml rename to tests/integration/build/docker-compose.yml diff --git a/tests/build_fail/context/Dockerfile b/tests/integration/build_fail/context/Dockerfile similarity index 100% rename from tests/build_fail/context/Dockerfile rename to tests/integration/build_fail/context/Dockerfile diff --git a/tests/build_fail/docker-compose.yml b/tests/integration/build_fail/docker-compose.yml similarity index 100% rename from tests/build_fail/docker-compose.yml rename to tests/integration/build_fail/docker-compose.yml diff --git a/tests/integration/build_labels/context/Dockerfile b/tests/integration/build_labels/context/Dockerfile new file mode 100644 index 0000000..24a79d0 --- /dev/null +++ b/tests/integration/build_labels/context/Dockerfile @@ -0,0 +1 @@ +FROM busybox diff --git a/tests/integration/build_labels/docker-compose.yml b/tests/integration/build_labels/docker-compose.yml new file mode 100644 index 0000000..a901d83 --- /dev/null +++ b/tests/integration/build_labels/docker-compose.yml @@ -0,0 +1,22 @@ +version: "3" +services: + test_build_labels_map: + build: + context: ./context + dockerfile: Dockerfile + labels: + com.example.description: "Accounting webapp" + com.example.department: "Finance" + com.example.label-with-empty-value: "" + image: my-busybox-build-labels-map + command: env + test_build_labels_array: + build: + context: ./context + dockerfile: Dockerfile + labels: + - "com.example.description=Accounting webapp" + - "com.example.department=Finance" + - "com.example.label-with-empty-value" + image: my-busybox-build-labels-array + command: env diff --git a/tests/integration/build_labels/test_build_labels.py b/tests/integration/build_labels/test_build_labels.py new file mode 100644 index 0000000..aaa37a5 --- /dev/null +++ b/tests/integration/build_labels/test_build_labels.py @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: GPL-2.0 + + +import json +import os +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +class TestBuildLabels(unittest.TestCase, RunSubprocessMixin): + def test_build_labels(self): + """The build context can contain labels which should be added to the resulting image. They + can be either an array or a map. + """ + + compose_path = os.path.join(test_path(), "build_labels/docker-compose.yml") + + try: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "build", + "test_build_labels_map", + "test_build_labels_array", + ]) + + expected_labels = { + "com.example.department": "Finance", + "com.example.description": "Accounting webapp", + "com.example.label-with-empty-value": "", + } + + out, _ = self.run_subprocess_assert_returncode([ + "podman", + "inspect", + "my-busybox-build-labels-map", + "my-busybox-build-labels-array", + ]) + + images = json.loads(out) + self.assertEqual(len(images), 2) + labels_map = images[0].get("Config", {}).get("Labels", {}) + labels_array = images[1].get("Config", {}).get("Labels", {}) + for k, v in expected_labels.items(): + self.assertIn(k, labels_map) + self.assertEqual(labels_map[k], v) + self.assertIn(k, labels_array) + self.assertEqual(labels_array[k], v) + + finally: + self.run_subprocess_assert_returncode([ + "podman", + "rmi", + "my-busybox-build-labels-map", + "my-busybox-build-labels-array", + ]) diff --git a/tests/integration/build_secrets/Dockerfile b/tests/integration/build_secrets/Dockerfile new file mode 100644 index 0000000..baae1a5 --- /dev/null +++ b/tests/integration/build_secrets/Dockerfile @@ -0,0 +1,9 @@ +FROM busybox + +RUN --mount=type=secret,required=true,id=build_secret \ + ls -l /run/secrets/ && cat /run/secrets/build_secret + +RUN --mount=type=secret,required=true,id=build_secret,target=/tmp/secret \ + ls -l /run/secrets/ /tmp/ && cat /tmp/secret + +CMD [ 'echo', 'nothing here' ] diff --git a/tests/integration/build_secrets/docker-compose.yaml b/tests/integration/build_secrets/docker-compose.yaml new file mode 100644 index 0000000..73ef2a9 --- /dev/null +++ b/tests/integration/build_secrets/docker-compose.yaml @@ -0,0 +1,22 @@ +version: "3.8" + +services: + test: + image: test + secrets: + - run_secret # implicitly mount to /run/secrets/run_secret + - source: run_secret + target: /tmp/run_secret2 # explicit mount point + + build: + context: . + secrets: + - build_secret # can be mounted in Dockerfile with "RUN --mount=type=secret,id=build_secret" + - source: build_secret + target: build_secret2 # rename to build_secret2 + +secrets: + build_secret: + file: ./my_secret + run_secret: + file: ./my_secret diff --git a/tests/integration/build_secrets/docker-compose.yaml.invalid b/tests/integration/build_secrets/docker-compose.yaml.invalid new file mode 100644 index 0000000..c28c2ec --- /dev/null +++ b/tests/integration/build_secrets/docker-compose.yaml.invalid @@ -0,0 +1,18 @@ +version: "3.8" + +services: + test: + image: test + build: + context: . + secrets: + # invalid target argument + # + # According to https://github.com/compose-spec/compose-spec/blob/master/build.md, target is + # supposed to be the "name of a *file* to be mounted in /run/secrets/". Not a path. + - source: build_secret + target: /build_secret + +secrets: + build_secret: + file: ./my_secret diff --git a/tests/secrets/my_secret b/tests/integration/build_secrets/my_secret similarity index 100% rename from tests/secrets/my_secret rename to tests/integration/build_secrets/my_secret diff --git a/tests/integration/deps/docker-compose.yaml b/tests/integration/deps/docker-compose.yaml new file mode 100644 index 0000000..cb0a9ee --- /dev/null +++ b/tests/integration/deps/docker-compose.yaml @@ -0,0 +1,25 @@ +version: "3.7" +services: + web: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] + tmpfs: + - /run + - /tmp + sleep: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "sh", "-c", "sleep 3600"] + depends_on: + - "web" + tmpfs: + - /run + - /tmp + sleep2: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "sh", "-c", "sleep 3600"] + depends_on: + - sleep + tmpfs: + - /run + - /tmp + diff --git a/tests/integration/env-file-tests/.env b/tests/integration/env-file-tests/.env new file mode 100644 index 0000000..0c167ce --- /dev/null +++ b/tests/integration/env-file-tests/.env @@ -0,0 +1,2 @@ +ZZVAR1='This value is overwritten by env-file-tests/.env' +ZZVAR3='This value is loaded from env-file-tests/.env' diff --git a/tests/integration/env-file-tests/.gitignore b/tests/integration/env-file-tests/.gitignore new file mode 100644 index 0000000..e157843 --- /dev/null +++ b/tests/integration/env-file-tests/.gitignore @@ -0,0 +1,4 @@ +# This overrides the repository root .gitignore (ignoring all .env). +# The .env files in this directory are important for the test cases. +!.env +!project/.env diff --git a/tests/integration/env-file-tests/README.md b/tests/integration/env-file-tests/README.md new file mode 100644 index 0000000..9de7018 --- /dev/null +++ b/tests/integration/env-file-tests/README.md @@ -0,0 +1,37 @@ +running the following commands should always give podman-rocks-123 + +``` +podman-compose -f project/container-compose.yaml --env-file env-files/project-1.env up +``` + +``` +podman-compose -f $(pwd)/project/container-compose.yaml --env-file $(pwd)/env-files/project-1.env up +``` + +``` +podman-compose -f $(pwd)/project/container-compose.env-file-flat.yaml up +``` + +``` +podman-compose -f $(pwd)/project/container-compose.env-file-obj.yaml up +``` + +``` +podman-compose -f $(pwd)/project/container-compose.env-file-obj-optional.yaml up +``` + +based on environment variable precedent this command should give podman-rocks-321 + +``` +ZZVAR1=podman-rocks-321 podman-compose -f $(pwd)/project/container-compose.yaml --env-file $(pwd)/env-files/project-1.env up +``` + +_The below test should print three environment variables_ + +``` +podman-compose -f $(pwd)/project/container-compose.load-.env-in-project.yaml run --rm app + +ZZVAR1=This value is overwritten by env-file-tests/.env +ZZVAR2=This value is loaded from .env in project/ directory +ZZVAR3=This value is loaded from env-file-tests/.env +``` diff --git a/tests/integration/env-file-tests/env-files/project-1.env b/tests/integration/env-file-tests/env-files/project-1.env new file mode 100644 index 0000000..617c3d5 --- /dev/null +++ b/tests/integration/env-file-tests/env-files/project-1.env @@ -0,0 +1,3 @@ +ZZVAR1=podman-rocks-123 +ZZVAR2=podman-rocks-124 +ZZVAR3=podman-rocks-125 diff --git a/tests/integration/env-file-tests/env-files/project-2.env b/tests/integration/env-file-tests/env-files/project-2.env new file mode 100644 index 0000000..236c5ff --- /dev/null +++ b/tests/integration/env-file-tests/env-files/project-2.env @@ -0,0 +1,2 @@ +ZZVAR1=podman-rocks-223 +ZZVAR2=podman-rocks-224 diff --git a/tests/integration/env-file-tests/project/.env b/tests/integration/env-file-tests/project/.env new file mode 100644 index 0000000..cc2c0ce --- /dev/null +++ b/tests/integration/env-file-tests/project/.env @@ -0,0 +1,2 @@ +ZZVAR1='This value is loaded but should be overwritten' +ZZVAR2='This value is loaded from .env in project/ directory' diff --git a/tests/integration/env-file-tests/project/container-compose.env-file-flat.yaml b/tests/integration/env-file-tests/project/container-compose.env-file-flat.yaml new file mode 100644 index 0000000..4e010ac --- /dev/null +++ b/tests/integration/env-file-tests/project/container-compose.env-file-flat.yaml @@ -0,0 +1,9 @@ +services: + app: + image: busybox + command: ["/bin/busybox", "sh", "-c", "env | grep ZZ"] + tmpfs: + - /run + - /tmp + env_file: + - ../env-files/project-1.env diff --git a/tests/integration/env-file-tests/project/container-compose.env-file-obj-optional.yaml b/tests/integration/env-file-tests/project/container-compose.env-file-obj-optional.yaml new file mode 100644 index 0000000..cfcdb64 --- /dev/null +++ b/tests/integration/env-file-tests/project/container-compose.env-file-obj-optional.yaml @@ -0,0 +1,11 @@ +services: + app: + image: busybox + command: ["/bin/busybox", "sh", "-c", "env | grep ZZ"] + tmpfs: + - /run + - /tmp + env_file: + - path: ../env-files/project-1.env + - path: ../env-files/project-2.env + required: false diff --git a/tests/integration/env-file-tests/project/container-compose.env-file-obj.yaml b/tests/integration/env-file-tests/project/container-compose.env-file-obj.yaml new file mode 100644 index 0000000..a7e35a3 --- /dev/null +++ b/tests/integration/env-file-tests/project/container-compose.env-file-obj.yaml @@ -0,0 +1,9 @@ +services: + app: + image: busybox + command: ["/bin/busybox", "sh", "-c", "env | grep ZZ"] + tmpfs: + - /run + - /tmp + env_file: + - path: ../env-files/project-1.env diff --git a/tests/integration/env-file-tests/project/container-compose.load-.env-in-project.yaml b/tests/integration/env-file-tests/project/container-compose.load-.env-in-project.yaml new file mode 100644 index 0000000..6cabc0b --- /dev/null +++ b/tests/integration/env-file-tests/project/container-compose.load-.env-in-project.yaml @@ -0,0 +1,11 @@ +services: + app: + image: busybox + command: ["/bin/busybox", "sh", "-c", "env | grep ZZ"] + tmpfs: + - /run + - /tmp + environment: + ZZVAR1: $ZZVAR1 + ZZVAR2: $ZZVAR2 + ZZVAR3: $ZZVAR3 diff --git a/tests/env-file-tests/project/container-compose.yaml b/tests/integration/env-file-tests/project/container-compose.yaml similarity index 100% rename from tests/env-file-tests/project/container-compose.yaml rename to tests/integration/env-file-tests/project/container-compose.yaml diff --git a/tests/env-tests/README.md b/tests/integration/env-tests/README.md similarity index 100% rename from tests/env-tests/README.md rename to tests/integration/env-tests/README.md diff --git a/tests/env-tests/container-compose.yml b/tests/integration/env-tests/container-compose.yml similarity index 100% rename from tests/env-tests/container-compose.yml rename to tests/integration/env-tests/container-compose.yml diff --git a/tests/exit-from/README.md b/tests/integration/exit-from/README.md similarity index 100% rename from tests/exit-from/README.md rename to tests/integration/exit-from/README.md diff --git a/tests/integration/exit-from/docker-compose.yaml b/tests/integration/exit-from/docker-compose.yaml new file mode 100644 index 0000000..a286b2a --- /dev/null +++ b/tests/integration/exit-from/docker-compose.yaml @@ -0,0 +1,21 @@ +version: "3" +services: + too_long: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "sh", "-c", "sleep 3600; exit 0"] + tmpfs: + - /run + - /tmp + sh1: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "sh", "-c", "sleep 1; exit 1"] + tmpfs: + - /run + - /tmp + sh2: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "sh", "-c", "sleep 1; exit 2"] + tmpfs: + - /run + - /tmp + diff --git a/tests/extends/docker-compose.yaml b/tests/integration/extends/docker-compose.yaml similarity index 100% rename from tests/extends/docker-compose.yaml rename to tests/integration/extends/docker-compose.yaml diff --git a/tests/extends_w_empty_service/common-services.yml b/tests/integration/extends_w_empty_service/common-services.yml similarity index 66% rename from tests/extends_w_empty_service/common-services.yml rename to tests/integration/extends_w_empty_service/common-services.yml index b7deb2c..b1138a5 100644 --- a/tests/extends_w_empty_service/common-services.yml +++ b/tests/integration/extends_w_empty_service/common-services.yml @@ -2,6 +2,6 @@ services: webapp_default: webapp_special: - image: busybox + image: nopush/podman-compose-test volumes: - "/data" diff --git a/tests/extends_w_empty_service/docker-compose.yml b/tests/integration/extends_w_empty_service/docker-compose.yml similarity index 80% rename from tests/extends_w_empty_service/docker-compose.yml rename to tests/integration/extends_w_empty_service/docker-compose.yml index d5414e3..72e2dc7 100644 --- a/tests/extends_w_empty_service/docker-compose.yml +++ b/tests/integration/extends_w_empty_service/docker-compose.yml @@ -1,7 +1,7 @@ version: "3" services: web: - image: busybox + image: nopush/podman-compose-test extends: file: common-services.yml service: webapp_default diff --git a/tests/extends_w_file/common-services.yml b/tests/integration/extends_w_file/common-services.yml similarity index 100% rename from tests/extends_w_file/common-services.yml rename to tests/integration/extends_w_file/common-services.yml diff --git a/tests/extends_w_file/docker-compose.yml b/tests/integration/extends_w_file/docker-compose.yml similarity index 100% rename from tests/extends_w_file/docker-compose.yml rename to tests/integration/extends_w_file/docker-compose.yml diff --git a/tests/extends_w_file_subdir/docker-compose.yml b/tests/integration/extends_w_file_subdir/docker-compose.yml similarity index 100% rename from tests/extends_w_file_subdir/docker-compose.yml rename to tests/integration/extends_w_file_subdir/docker-compose.yml diff --git a/tests/extends_w_file_subdir/sub/docker-compose.yml b/tests/integration/extends_w_file_subdir/sub/docker-compose.yml similarity index 100% rename from tests/extends_w_file_subdir/sub/docker-compose.yml rename to tests/integration/extends_w_file_subdir/sub/docker-compose.yml diff --git a/tests/integration/extends_w_file_subdir/sub/docker/example/Dockerfile b/tests/integration/extends_w_file_subdir/sub/docker/example/Dockerfile new file mode 100644 index 0000000..b6882da --- /dev/null +++ b/tests/integration/extends_w_file_subdir/sub/docker/example/Dockerfile @@ -0,0 +1 @@ +FROM nopush/podman-compose-test as base diff --git a/tests/integration/filesystem/compose_symlink/docker-compose.yml b/tests/integration/filesystem/compose_symlink/docker-compose.yml new file mode 120000 index 0000000..bf4e5d6 --- /dev/null +++ b/tests/integration/filesystem/compose_symlink/docker-compose.yml @@ -0,0 +1 @@ +../compose_symlink_dest/docker-compose.yml \ No newline at end of file diff --git a/tests/integration/filesystem/compose_symlink/file b/tests/integration/filesystem/compose_symlink/file new file mode 100644 index 0000000..33431d3 --- /dev/null +++ b/tests/integration/filesystem/compose_symlink/file @@ -0,0 +1 @@ +data_compose_symlink diff --git a/tests/integration/filesystem/compose_symlink_dest/docker-compose.yml b/tests/integration/filesystem/compose_symlink_dest/docker-compose.yml new file mode 100644 index 0000000..3b1e99c --- /dev/null +++ b/tests/integration/filesystem/compose_symlink_dest/docker-compose.yml @@ -0,0 +1,7 @@ +version: "3" +services: + container1: + image: nopush/podman-compose-test + command: ["/bin/busybox", "cat", "/file"] + volumes: + - "./file:/file" diff --git a/tests/integration/filesystem/compose_symlink_dest/file b/tests/integration/filesystem/compose_symlink_dest/file new file mode 100644 index 0000000..0dd80f8 --- /dev/null +++ b/tests/integration/filesystem/compose_symlink_dest/file @@ -0,0 +1 @@ +data_compose_symlink_dest diff --git a/tests/integration/filesystem/test_filesystem.py b/tests/integration/filesystem/test_filesystem.py new file mode 100644 index 0000000..afd806c --- /dev/null +++ b/tests/integration/filesystem/test_filesystem.py @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0 + + +import os +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +class TestFilesystem(unittest.TestCase, RunSubprocessMixin): + def test_compose_symlink(self): + """The context of podman-compose.yml should come from the same directory as the file even + if it is a symlink + """ + + compose_path = os.path.join(test_path(), "filesystem/compose_symlink/docker-compose.yml") + + try: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "up", + "-d", + "container1", + ]) + + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "logs", + "container1", + ]) + + # BUG: figure out why cat is called twice + self.assertEqual(out, b'data_compose_symlink\ndata_compose_symlink\n') + + finally: + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "down", + ]) diff --git a/tests/integration/in_pod/custom_x-podman_false/docker-compose.yml b/tests/integration/in_pod/custom_x-podman_false/docker-compose.yml new file mode 100644 index 0000000..c967bef --- /dev/null +++ b/tests/integration/in_pod/custom_x-podman_false/docker-compose.yml @@ -0,0 +1,9 @@ +version: "3" +services: + cont: + image: nopush/podman-compose-test + userns_mode: keep-id:uid=1000 + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-p", "8080"] + +x-podman: + in_pod: false diff --git a/tests/integration/in_pod/custom_x-podman_not_exists/docker-compose.yml b/tests/integration/in_pod/custom_x-podman_not_exists/docker-compose.yml new file mode 100644 index 0000000..8514c79 --- /dev/null +++ b/tests/integration/in_pod/custom_x-podman_not_exists/docker-compose.yml @@ -0,0 +1,6 @@ +version: "3" +services: + cont: + image: nopush/podman-compose-test + userns_mode: keep-id:uid=1000 + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-p", "8080"] diff --git a/tests/integration/in_pod/custom_x-podman_true/docker-compose.yml b/tests/integration/in_pod/custom_x-podman_true/docker-compose.yml new file mode 100644 index 0000000..698f7b4 --- /dev/null +++ b/tests/integration/in_pod/custom_x-podman_true/docker-compose.yml @@ -0,0 +1,9 @@ +version: "3" +services: + cont: + image: nopush/podman-compose-test + userns_mode: keep-id:uid=1000 + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-p", "8080"] + +x-podman: + in_pod: true diff --git a/tests/integration/include/docker-compose.base.yaml b/tests/integration/include/docker-compose.base.yaml new file mode 100644 index 0000000..fe80e08 --- /dev/null +++ b/tests/integration/include/docker-compose.base.yaml @@ -0,0 +1,7 @@ +version: '3.6' + +services: + web: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", ".", "-p", "8003"] + diff --git a/tests/integration/include/docker-compose.extend.yaml b/tests/integration/include/docker-compose.extend.yaml new file mode 100644 index 0000000..728a938 --- /dev/null +++ b/tests/integration/include/docker-compose.extend.yaml @@ -0,0 +1,6 @@ +version: '3.6' + +services: + web2: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", ".", "-p", "8004"] diff --git a/tests/include/docker-compose.yaml b/tests/integration/include/docker-compose.yaml similarity index 63% rename from tests/include/docker-compose.yaml rename to tests/integration/include/docker-compose.yaml index ada2153..bf608d4 100644 --- a/tests/include/docker-compose.yaml +++ b/tests/integration/include/docker-compose.yaml @@ -2,3 +2,4 @@ version: '3.6' include: - docker-compose.base.yaml + - docker-compose.extend.yaml diff --git a/tests/interpolation/.env b/tests/integration/interpolation/.env similarity index 100% rename from tests/interpolation/.env rename to tests/integration/interpolation/.env diff --git a/tests/interpolation/docker-compose-colon-question-error.yml b/tests/integration/interpolation/docker-compose-colon-question-error.yml similarity index 100% rename from tests/interpolation/docker-compose-colon-question-error.yml rename to tests/integration/interpolation/docker-compose-colon-question-error.yml diff --git a/tests/interpolation/docker-compose-question-error.yml b/tests/integration/interpolation/docker-compose-question-error.yml similarity index 100% rename from tests/interpolation/docker-compose-question-error.yml rename to tests/integration/interpolation/docker-compose-question-error.yml diff --git a/tests/interpolation/docker-compose.yml b/tests/integration/interpolation/docker-compose.yml similarity index 100% rename from tests/interpolation/docker-compose.yml rename to tests/integration/interpolation/docker-compose.yml diff --git a/tests/integration/ipam_default/docker-compose.yaml b/tests/integration/ipam_default/docker-compose.yaml new file mode 100644 index 0000000..311c41b --- /dev/null +++ b/tests/integration/ipam_default/docker-compose.yaml @@ -0,0 +1,15 @@ +version: '3' + +# --ipam-driver must not be pass when driver is "default" +networks: + ipam_test_default: + ipam: + driver: default + config: + - subnet: 172.19.0.0/24 + +services: + testipam: + image: busybox + command: ["echo", "ipamtest"] + diff --git a/tests/integration/lifetime/test_lifetime.py b/tests/integration/lifetime/test_lifetime.py new file mode 100644 index 0000000..996f5ee --- /dev/null +++ b/tests/integration/lifetime/test_lifetime.py @@ -0,0 +1,125 @@ +# SPDX-License-Identifier: GPL-2.0 + + +import os +import unittest + +from parameterized import parameterized + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +class TestLifetime(unittest.TestCase, RunSubprocessMixin): + def test_up_single_container(self): + """Podman compose up should be able to start containers one after another""" + + compose_path = os.path.join(test_path(), "lifetime/up_single_container/docker-compose.yml") + + try: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "up", + "-d", + "container1", + ]) + + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "up", + "-d", + "container2", + ]) + + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "logs", + "container1", + ]) + + self.assertEqual(out, b"test1\n") + + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "logs", + "container2", + ]) + + self.assertEqual(out, b"test2\n") + + finally: + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "down", + ]) + + @parameterized.expand([ + ("no_ports", "up_single_container_many_times"), + ("with_ports", "up_single_container_many_times_with_ports"), + ]) + def test_up_single_container_many_times(self, name, subdir): + """Podman compose up should be able to start a container many times after it finishes + running. + """ + + compose_path = os.path.join(test_path(), f"lifetime/{subdir}/docker-compose.yml") + + try: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "up", + "-d", + "container1", + ]) + + for _ in range(0, 3): + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "up", + "-d", + "container2", + ]) + + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "logs", + "container1", + ]) + + self.assertEqual(out, b"test1\n") + + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "logs", + "container2", + ]) + + # BUG: container should be started 3 times, not 4. + self.assertEqual(out, b"test2\n" * 4) + + finally: + out, _ = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_path, + "down", + ]) diff --git a/tests/integration/lifetime/up_single_container/docker-compose.yml b/tests/integration/lifetime/up_single_container/docker-compose.yml new file mode 100644 index 0000000..301f686 --- /dev/null +++ b/tests/integration/lifetime/up_single_container/docker-compose.yml @@ -0,0 +1,8 @@ +version: "3" +services: + container1: + image: nopush/podman-compose-test + command: ["/bin/bash", "-c", "echo test1; sleep infinity"] + container2: + image: nopush/podman-compose-test + command: ["/bin/bash", "-c", "echo test2; sleep infinity"] diff --git a/tests/integration/lifetime/up_single_container_many_times/docker-compose.yml b/tests/integration/lifetime/up_single_container_many_times/docker-compose.yml new file mode 100644 index 0000000..c59967c --- /dev/null +++ b/tests/integration/lifetime/up_single_container_many_times/docker-compose.yml @@ -0,0 +1,9 @@ +version: "3" +services: + container1: + image: nopush/podman-compose-test + command: ["/bin/bash", "-c", "echo test1; sleep infinity"] + container2: + image: nopush/podman-compose-test + restart: never + command: ["/bin/bash", "-c", "echo test2"] diff --git a/tests/integration/lifetime/up_single_container_many_times_with_ports/docker-compose.yml b/tests/integration/lifetime/up_single_container_many_times_with_ports/docker-compose.yml new file mode 100644 index 0000000..26a61ef --- /dev/null +++ b/tests/integration/lifetime/up_single_container_many_times_with_ports/docker-compose.yml @@ -0,0 +1,11 @@ +version: "3" +services: + container1: + image: nopush/podman-compose-test + ports: "9001:9001" + command: ["/bin/bash", "-c", "echo test1; sleep infinity"] + container2: + image: nopush/podman-compose-test + restart: never + ports: "9002:9002" + command: ["/bin/bash", "-c", "echo test2"] diff --git a/tests/multicompose/README.md b/tests/integration/multicompose/README.md similarity index 100% rename from tests/multicompose/README.md rename to tests/integration/multicompose/README.md diff --git a/tests/multicompose/d1/1.env b/tests/integration/multicompose/d1/1.env similarity index 100% rename from tests/multicompose/d1/1.env rename to tests/integration/multicompose/d1/1.env diff --git a/tests/multicompose/d1/12.env b/tests/integration/multicompose/d1/12.env similarity index 100% rename from tests/multicompose/d1/12.env rename to tests/integration/multicompose/d1/12.env diff --git a/tests/multicompose/d1/2.env b/tests/integration/multicompose/d1/2.env similarity index 100% rename from tests/multicompose/d1/2.env rename to tests/integration/multicompose/d1/2.env diff --git a/tests/multicompose/d1/docker-compose.yml b/tests/integration/multicompose/d1/docker-compose.yml similarity index 100% rename from tests/multicompose/d1/docker-compose.yml rename to tests/integration/multicompose/d1/docker-compose.yml diff --git a/tests/multicompose/d2/12.env b/tests/integration/multicompose/d2/12.env similarity index 100% rename from tests/multicompose/d2/12.env rename to tests/integration/multicompose/d2/12.env diff --git a/tests/multicompose/d2/2.env b/tests/integration/multicompose/d2/2.env similarity index 100% rename from tests/multicompose/d2/2.env rename to tests/integration/multicompose/d2/2.env diff --git a/tests/multicompose/d2/docker-compose.yml b/tests/integration/multicompose/d2/docker-compose.yml similarity index 100% rename from tests/multicompose/d2/docker-compose.yml rename to tests/integration/multicompose/d2/docker-compose.yml diff --git a/tests/nethost/docker-compose.yaml b/tests/integration/nethost/docker-compose.yaml similarity index 100% rename from tests/nethost/docker-compose.yaml rename to tests/integration/nethost/docker-compose.yaml diff --git a/tests/netprio/docker-compose.yaml b/tests/integration/netprio/docker-compose.yaml similarity index 100% rename from tests/netprio/docker-compose.yaml rename to tests/integration/netprio/docker-compose.yaml diff --git a/tests/nets_test1/docker-compose.yml b/tests/integration/nets_test1/docker-compose.yml similarity index 100% rename from tests/nets_test1/docker-compose.yml rename to tests/integration/nets_test1/docker-compose.yml diff --git a/tests/nets_test1/test1.txt b/tests/integration/nets_test1/test1.txt similarity index 100% rename from tests/nets_test1/test1.txt rename to tests/integration/nets_test1/test1.txt diff --git a/tests/nets_test1/test2.txt b/tests/integration/nets_test1/test2.txt similarity index 100% rename from tests/nets_test1/test2.txt rename to tests/integration/nets_test1/test2.txt diff --git a/tests/nets_test2/docker-compose.yml b/tests/integration/nets_test2/docker-compose.yml similarity index 100% rename from tests/nets_test2/docker-compose.yml rename to tests/integration/nets_test2/docker-compose.yml diff --git a/tests/nets_test2/test1.txt b/tests/integration/nets_test2/test1.txt similarity index 100% rename from tests/nets_test2/test1.txt rename to tests/integration/nets_test2/test1.txt diff --git a/tests/nets_test2/test2.txt b/tests/integration/nets_test2/test2.txt similarity index 100% rename from tests/nets_test2/test2.txt rename to tests/integration/nets_test2/test2.txt diff --git a/tests/nets_test3/docker-compose.yml b/tests/integration/nets_test3/docker-compose.yml similarity index 100% rename from tests/nets_test3/docker-compose.yml rename to tests/integration/nets_test3/docker-compose.yml diff --git a/tests/nets_test3/test1.txt b/tests/integration/nets_test3/test1.txt similarity index 100% rename from tests/nets_test3/test1.txt rename to tests/integration/nets_test3/test1.txt diff --git a/tests/nets_test3/test2.txt b/tests/integration/nets_test3/test2.txt similarity index 100% rename from tests/nets_test3/test2.txt rename to tests/integration/nets_test3/test2.txt diff --git a/tests/integration/nets_test_ip/docker-compose.yml b/tests/integration/nets_test_ip/docker-compose.yml new file mode 100644 index 0000000..8f9c792 --- /dev/null +++ b/tests/integration/nets_test_ip/docker-compose.yml @@ -0,0 +1,61 @@ +version: "3" +networks: + shared-network: + driver: bridge + ipam: + config: + - subnet: "172.19.1.0/24" + internal-network: + driver: bridge + ipam: + config: + - subnet: "172.19.2.0/24" + +services: + web1: + image: busybox + hostname: web1 + command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] + working_dir: /var/www/html + networks: + shared-network: + ipv4_address: "172.19.1.10" + x-podman.mac_address: "02:01:01:00:01:01" + internal-network: + ipv4_address: "172.19.2.10" + x-podman.mac_address: "02:01:01:00:02:01" + volumes: + - ./test1.txt:/var/www/html/index.txt:ro,z + web2: + image: busybox + hostname: web2 + command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] + working_dir: /var/www/html + mac_address: "02:01:01:00:02:02" + networks: + internal-network: + ipv4_address: "172.19.2.11" + volumes: + - ./test2.txt:/var/www/html/index.txt:ro,z + + web3: + image: busybox + hostname: web2 + command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] + working_dir: /var/www/html + networks: + internal-network: + volumes: + - ./test3.txt:/var/www/html/index.txt:ro,z + + web4: + image: busybox + hostname: web2 + command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] + working_dir: /var/www/html + networks: + internal-network: + shared-network: + ipv4_address: "172.19.1.13" + volumes: + - ./test4.txt:/var/www/html/index.txt:ro,z diff --git a/tests/ports/test1.txt b/tests/integration/nets_test_ip/test1.txt similarity index 100% rename from tests/ports/test1.txt rename to tests/integration/nets_test_ip/test1.txt diff --git a/tests/ports/test2.txt b/tests/integration/nets_test_ip/test2.txt similarity index 100% rename from tests/ports/test2.txt rename to tests/integration/nets_test_ip/test2.txt diff --git a/tests/integration/nets_test_ip/test3.txt b/tests/integration/nets_test_ip/test3.txt new file mode 100644 index 0000000..df6b0d2 --- /dev/null +++ b/tests/integration/nets_test_ip/test3.txt @@ -0,0 +1 @@ +test3 diff --git a/tests/integration/nets_test_ip/test4.txt b/tests/integration/nets_test_ip/test4.txt new file mode 100644 index 0000000..d234c5e --- /dev/null +++ b/tests/integration/nets_test_ip/test4.txt @@ -0,0 +1 @@ +test4 diff --git a/tests/no_services/docker-compose.yaml b/tests/integration/no_services/docker-compose.yaml similarity index 100% rename from tests/no_services/docker-compose.yaml rename to tests/integration/no_services/docker-compose.yaml diff --git a/tests/integration/pid/docker-compose.yml b/tests/integration/pid/docker-compose.yml new file mode 100644 index 0000000..3abf657 --- /dev/null +++ b/tests/integration/pid/docker-compose.yml @@ -0,0 +1,6 @@ +version: "3" +services: + serv: + image: busybox + pid: host + command: sh -c "ps all" diff --git a/tests/ports/docker-compose.yml b/tests/integration/ports/docker-compose.yml similarity index 71% rename from tests/ports/docker-compose.yml rename to tests/integration/ports/docker-compose.yml index 68b2e4d..e89bd05 100644 --- a/tests/ports/docker-compose.yml +++ b/tests/integration/ports/docker-compose.yml @@ -1,18 +1,18 @@ version: "3" services: web1: - image: busybox + image: nopush/podman-compose-test hostname: web1 - command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html ports: - 8001:8001 volumes: - ./test1.txt:/var/www/html/index.txt:ro,z web2: - image: busybox + image: nopush/podman-compose-test hostname: web2 - command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] working_dir: /var/www/html ports: - 8002:8002 diff --git a/tests/integration/ports/test1.txt b/tests/integration/ports/test1.txt new file mode 100644 index 0000000..a5bce3f --- /dev/null +++ b/tests/integration/ports/test1.txt @@ -0,0 +1 @@ +test1 diff --git a/tests/integration/ports/test2.txt b/tests/integration/ports/test2.txt new file mode 100644 index 0000000..180cf83 --- /dev/null +++ b/tests/integration/ports/test2.txt @@ -0,0 +1 @@ +test2 diff --git a/tests/integration/profile/docker-compose.yml b/tests/integration/profile/docker-compose.yml new file mode 100644 index 0000000..fe84fce --- /dev/null +++ b/tests/integration/profile/docker-compose.yml @@ -0,0 +1,24 @@ +version: "3" +services: + default-service: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] + tmpfs: + - /run + - /tmp + service-1: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] + tmpfs: + - /run + - /tmp + profiles: + - profile-1 + service-2: + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] + tmpfs: + - /run + - /tmp + profiles: + - profile-2 diff --git a/tests/seccomp/docker-compose.yml b/tests/integration/seccomp/docker-compose.yml similarity index 100% rename from tests/seccomp/docker-compose.yml rename to tests/integration/seccomp/docker-compose.yml diff --git a/tests/secrets/bad_external_name/docker-compose.yaml b/tests/integration/secrets/bad_external_name/docker-compose.yaml similarity index 100% rename from tests/secrets/bad_external_name/docker-compose.yaml rename to tests/integration/secrets/bad_external_name/docker-compose.yaml diff --git a/tests/secrets/bad_external_target/docker-compose.yaml b/tests/integration/secrets/bad_external_target/docker-compose.yaml similarity index 100% rename from tests/secrets/bad_external_target/docker-compose.yaml rename to tests/integration/secrets/bad_external_target/docker-compose.yaml diff --git a/tests/secrets/docker-compose.yaml b/tests/integration/secrets/docker-compose.yaml similarity index 92% rename from tests/secrets/docker-compose.yaml rename to tests/integration/secrets/docker-compose.yaml index 735746a..81bde0f 100644 --- a/tests/secrets/docker-compose.yaml +++ b/tests/integration/secrets/docker-compose.yaml @@ -31,6 +31,9 @@ services: uid: '103' gid: '103' mode: 400 + - source: my_secret + target: ENV_SECRET + type: env secrets: my_secret: @@ -43,4 +46,3 @@ secrets: name: my_secret_3 file_secret: file: ./my_secret - diff --git a/tests/integration/secrets/my_secret b/tests/integration/secrets/my_secret new file mode 100644 index 0000000..235fe34 --- /dev/null +++ b/tests/integration/secrets/my_secret @@ -0,0 +1 @@ +important-secret-is-important diff --git a/tests/secrets/print_secrets.sh b/tests/integration/secrets/print_secrets.sh similarity index 85% rename from tests/secrets/print_secrets.sh rename to tests/integration/secrets/print_secrets.sh index 7115716..695749c 100755 --- a/tests/secrets/print_secrets.sh +++ b/tests/integration/secrets/print_secrets.sh @@ -4,3 +4,4 @@ ls -la /run/secrets/* ls -la /etc/custom_location cat /run/secrets/* cat /etc/custom_location +env | grep SECRET diff --git a/tests/integration/selinux/docker-compose.yml b/tests/integration/selinux/docker-compose.yml new file mode 100644 index 0000000..24caab6 --- /dev/null +++ b/tests/integration/selinux/docker-compose.yml @@ -0,0 +1,14 @@ +version: "3" +services: + web1: + image: busybox + command: httpd -f -p 80 -h /var/www/html + volumes: + - type: bind + source: ./docker-compose.yml + target: /var/www/html/index.html + bind: + selinux: z + ports: + - "8080:80" + diff --git a/tests/short/data/redis/.keep b/tests/integration/short/data/redis/.keep similarity index 100% rename from tests/short/data/redis/.keep rename to tests/integration/short/data/redis/.keep diff --git a/tests/short/data/web/.keep b/tests/integration/short/data/web/.keep similarity index 100% rename from tests/short/data/web/.keep rename to tests/integration/short/data/web/.keep diff --git a/tests/short/docker-compose.yaml b/tests/integration/short/docker-compose.yaml similarity index 100% rename from tests/short/docker-compose.yaml rename to tests/integration/short/docker-compose.yaml diff --git a/tests/integration/test_podman_compose.py b/tests/integration/test_podman_compose.py new file mode 100644 index 0000000..e6cba41 --- /dev/null +++ b/tests/integration/test_podman_compose.py @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import unittest +from pathlib import Path + +from tests.integration.test_utils import RunSubprocessMixin + + +def base_path(): + """Returns the base path for the project""" + return Path(__file__).parent.parent.parent + + +def test_path(): + """Returns the path to the tests directory""" + return os.path.join(base_path(), "tests/integration") + + +def podman_compose_path(): + """Returns the path to the podman compose script""" + return os.path.join(base_path(), "podman_compose.py") + + +class TestPodmanCompose(unittest.TestCase, RunSubprocessMixin): + def test_extends_w_file_subdir(self): + """ + Test that podman-compose can execute podman-compose -f up with extended File which + includes a build context + :return: + """ + main_path = Path(__file__).parent.parent.parent + + command_up = [ + "coverage", + "run", + str(main_path.joinpath("podman_compose.py")), + "-f", + str( + main_path.joinpath( + "tests", "integration", "extends_w_file_subdir", "docker-compose.yml" + ) + ), + "up", + "-d", + ] + + command_check_container = [ + "coverage", + "run", + str(main_path.joinpath("podman_compose.py")), + "-f", + str( + main_path.joinpath( + "tests", "integration", "extends_w_file_subdir", "docker-compose.yml" + ) + ), + "ps", + "--format", + '{{.Image}}', + ] + + self.run_subprocess_assert_returncode(command_up) + # check container was created and exists + out, _ = self.run_subprocess_assert_returncode(command_check_container) + self.assertEqual(out, b'localhost/subdir_test:me\n') + # cleanup test image(tags) + self.run_subprocess_assert_returncode([ + str(main_path.joinpath("podman_compose.py")), + "-f", + str( + main_path.joinpath( + "tests", "integration", "extends_w_file_subdir", "docker-compose.yml" + ) + ), + "down", + ]) + + self.run_subprocess_assert_returncode([ + "podman", + "rmi", + "--force", + "localhost/subdir_test:me", + ]) + + # check container did not exists anymore + out, _ = self.run_subprocess_assert_returncode(command_check_container) + self.assertEqual(out, b'') + + def test_extends_w_empty_service(self): + """ + Test that podman-compose can execute podman-compose -f up with extended File which + includes an empty service. (e.g. if the file is used as placeholder for more complex + configurations.) + """ + main_path = Path(__file__).parent.parent.parent + + command_up = [ + "python3", + str(main_path.joinpath("podman_compose.py")), + "-f", + str( + main_path.joinpath( + "tests", "integration", "extends_w_empty_service", "docker-compose.yml" + ) + ), + "up", + "-d", + ] + + self.run_subprocess_assert_returncode(command_up) diff --git a/tests/integration/test_podman_compose_additional_contexts.py b/tests/integration/test_podman_compose_additional_contexts.py new file mode 100644 index 0000000..2a6df74 --- /dev/null +++ b/tests/integration/test_podman_compose_additional_contexts.py @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0 + + +"""Test how additional contexts are passed to podman.""" + +import os +import subprocess +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path + + +def compose_yaml_path(): + """ "Returns the path to the compose file used for this test module""" + return os.path.join(test_path(), "additional_contexts", "project") + + +class TestComposeBuildAdditionalContexts(unittest.TestCase): + def test_build_additional_context(self): + """podman build should receive additional contexts as --build-context + + See additional_context/project/docker-compose.yaml for context paths + """ + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--dry-run", + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yml"), + "build", + ) + p = subprocess.run( + cmd, + stdout=subprocess.PIPE, + check=False, + stderr=subprocess.STDOUT, + text=True, + ) + self.assertEqual(p.returncode, 0) + self.assertIn("--build-context=data=../data_for_dict", p.stdout) + self.assertIn("--build-context=data=../data_for_list", p.stdout) diff --git a/tests/integration/test_podman_compose_build.py b/tests/integration/test_podman_compose_build.py new file mode 100644 index 0000000..ac4ce17 --- /dev/null +++ b/tests/integration/test_podman_compose_build.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import unittest + +import requests + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +def compose_yaml_path(): + """ "Returns the path to the compose file used for this test module""" + base_path = os.path.join(test_path(), "build") + return os.path.join(base_path, "docker-compose.yml") + + +class TestComposeBuild(unittest.TestCase, RunSubprocessMixin): + def test_build(self): + try: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_yaml_path(), + "build", + "--no-cache", + ]) + + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_yaml_path(), + "up", + "-d", + ]) + + request = requests.get('http://localhost:8080/index.txt') + self.assertEqual(request.status_code, 200) + + alt_request_success = False + try: + # FIXME: suspicious behaviour, too often ends up in error + alt_request = requests.get('http://localhost:8000/index.txt') + self.assertEqual(alt_request.status_code, 200) + self.assertIn("ALT buildno=2 port=8000 ", alt_request.text) + alt_request_success = True + except requests.exceptions.ConnectionError: + pass + + if alt_request_success: + output, _ = self.run_subprocess_assert_returncode([ + "podman", + "inspect", + "my-busybox-httpd2", + ]) + self.assertIn("httpd_port=8000", str(output)) + self.assertIn("buildno=2", str(output)) + finally: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_yaml_path(), + "down", + ]) diff --git a/tests/integration/test_podman_compose_build_fail.py b/tests/integration/test_podman_compose_build_fail.py new file mode 100644 index 0000000..1bb89a5 --- /dev/null +++ b/tests/integration/test_podman_compose_build_fail.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +def compose_yaml_path(): + """ "Returns the path to the compose file used for this test module""" + base_path = os.path.join(test_path(), "build_fail") + return os.path.join(base_path, "docker-compose.yml") + + +class TestComposeBuildFail(unittest.TestCase, RunSubprocessMixin): + def test_build_fail(self): + output, error = self.run_subprocess_assert_returncode( + [ + podman_compose_path(), + "-f", + compose_yaml_path(), + "build", + ], + expected_returncode=127, + ) + self.assertIn("RUN this_command_does_not_exist", str(output)) + self.assertIn("this_command_does_not_exist: not found", str(error)) + self.assertIn("while running runtime: exit status 127", str(error)) diff --git a/tests/integration/test_podman_compose_build_secrets.py b/tests/integration/test_podman_compose_build_secrets.py new file mode 100644 index 0000000..e5360f6 --- /dev/null +++ b/tests/integration/test_podman_compose_build_secrets.py @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: GPL-2.0 + + +"""Test how secrets in files are passed to podman.""" + +import os +import subprocess +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path + + +def compose_yaml_path(): + """ "Returns the path to the compose file used for this test module""" + return os.path.join(test_path(), "build_secrets") + + +class TestComposeBuildSecrets(unittest.TestCase): + def test_run_secret(self): + """podman run should receive file secrets as --volume + + See build_secrets/docker-compose.yaml for secret names and mount points (aka targets) + + """ + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--dry-run", + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yaml"), + "run", + "test", + ) + p = subprocess.run( + cmd, stdout=subprocess.PIPE, check=False, stderr=subprocess.STDOUT, text=True + ) + self.assertEqual(p.returncode, 0) + secret_path = os.path.join(compose_yaml_path(), "my_secret") + self.assertIn(f"--volume {secret_path}:/run/secrets/run_secret:ro,rprivate,rbind", p.stdout) + self.assertIn(f"--volume {secret_path}:/tmp/run_secret2:ro,rprivate,rbind", p.stdout) + + def test_build_secret(self): + """podman build should receive secrets as --secret, so that they can be used inside the + Dockerfile in "RUN --mount=type=secret ..." commands. + + """ + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--dry-run", + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yaml"), + "build", + ) + p = subprocess.run( + cmd, stdout=subprocess.PIPE, check=False, stderr=subprocess.STDOUT, text=True + ) + self.assertEqual(p.returncode, 0) + secret_path = os.path.join(compose_yaml_path(), "my_secret") + self.assertIn(f"--secret id=build_secret,src={secret_path}", p.stdout) + self.assertIn(f"--secret id=build_secret2,src={secret_path}", p.stdout) + + def test_invalid_build_secret(self): + """build secrets in docker-compose file can only have a target argument without directory + component + + """ + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--dry-run", + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yaml.invalid"), + "build", + ) + p = subprocess.run( + cmd, stdout=subprocess.PIPE, check=False, stderr=subprocess.STDOUT, text=True + ) + self.assertNotEqual(p.returncode, 0) + self.assertIn( + 'ValueError: ERROR: Build secret "build_secret" has invalid target "/build_secret"', + p.stdout, + ) diff --git a/tests/integration/test_podman_compose_build_ulimits.py b/tests/integration/test_podman_compose_build_ulimits.py new file mode 100644 index 0000000..d578d77 --- /dev/null +++ b/tests/integration/test_podman_compose_build_ulimits.py @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: GPL-2.0 + + +"""Test how ulimits are applied in podman-compose build.""" + +import os +import subprocess +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path + + +def compose_yaml_path(): + """ "Returns the path to the compose file used for this test module""" + return os.path.join(test_path(), "ulimit_build") + + +class TestComposeBuildUlimits(unittest.TestCase): + def test_build_ulimits_ulimit1(self): + """podman build should receive and apply limits when building service ulimit1""" + + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yaml"), + "build", + "--no-cache", + "ulimit1", + ) + p = subprocess.run( + cmd, stdout=subprocess.PIPE, check=False, stderr=subprocess.STDOUT, text=True + ) + + self.assertEqual(p.returncode, 0) + self.assertIn("--ulimit nofile=1001", p.stdout) + self.assertIn("soft nofile limit: 1001", p.stdout) + self.assertIn("hard nofile limit: 1001", p.stdout) + + def test_build_ulimits_ulimit2(self): + """podman build should receive and apply limits when building service ulimit2""" + + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yaml"), + "build", + "--no-cache", + "ulimit2", + ) + p = subprocess.run( + cmd, stdout=subprocess.PIPE, check=False, stderr=subprocess.STDOUT, text=True + ) + + self.assertEqual(p.returncode, 0) + self.assertIn("--ulimit nofile=1002", p.stdout) + self.assertIn("--ulimit nproc=1002:2002", p.stdout) + self.assertIn("soft process limit: 1002", p.stdout) + self.assertIn("hard process limit: 2002", p.stdout) + self.assertIn("soft nofile limit: 1002", p.stdout) + self.assertIn("hard nofile limit: 1002", p.stdout) + + def test_build_ulimits_ulimit3(self): + """podman build should receive and apply limits when building service ulimit3""" + + cmd = ( + "coverage", + "run", + podman_compose_path(), + "--verbose", + "-f", + os.path.join(compose_yaml_path(), "docker-compose.yaml"), + "build", + "--no-cache", + "ulimit3", + ) + p = subprocess.run( + cmd, stdout=subprocess.PIPE, check=False, stderr=subprocess.STDOUT, text=True + ) + + self.assertEqual(p.returncode, 0) + self.assertIn("--ulimit nofile=1003", p.stdout) + self.assertIn("--ulimit nproc=1003:2003", p.stdout) + self.assertIn("soft process limit: 1003", p.stdout) + self.assertIn("hard process limit: 2003", p.stdout) + self.assertIn("soft nofile limit: 1003", p.stdout) + self.assertIn("hard nofile limit: 1003", p.stdout) diff --git a/tests/integration/test_podman_compose_config.py b/tests/integration/test_podman_compose_config.py new file mode 100644 index 0000000..8a7b637 --- /dev/null +++ b/tests/integration/test_podman_compose_config.py @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" +test_podman_compose_config.py + +Tests the podman-compose config command which is used to return defined compose services. +""" + +# pylint: disable=redefined-outer-name +import os +import unittest + +from parameterized import parameterized + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +def profile_compose_file(): + """ "Returns the path to the `profile` compose file used for this test module""" + return os.path.join(test_path(), "profile", "docker-compose.yml") + + +class TestComposeConfig(unittest.TestCase, RunSubprocessMixin): + def test_config_no_profiles(self): + """ + Tests podman-compose config command without profile enablement. + """ + config_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + profile_compose_file(), + "config", + ] + + out, _ = self.run_subprocess_assert_returncode(config_cmd) + + string_output = out.decode("utf-8") + self.assertIn("default-service", string_output) + self.assertNotIn("service-1", string_output) + self.assertNotIn("service-2", string_output) + + @parameterized.expand( + [ + ( + ["--profile", "profile-1", "config"], + {"default-service": True, "service-1": True, "service-2": False}, + ), + ( + ["--profile", "profile-2", "config"], + {"default-service": True, "service-1": False, "service-2": True}, + ), + ( + ["--profile", "profile-1", "--profile", "profile-2", "config"], + {"default-service": True, "service-1": True, "service-2": True}, + ), + ], + ) + def test_config_profiles(self, profiles, expected_services): + """ + Tests podman-compose + :param profiles: The enabled profiles for the parameterized test. + :param expected_services: Dictionary used to model the expected "enabled" services in the + profile. Key = service name, Value = True if the service is enabled, otherwise False. + """ + config_cmd = ["coverage", "run", podman_compose_path(), "-f", profile_compose_file()] + config_cmd.extend(profiles) + + out, _ = self.run_subprocess_assert_returncode(config_cmd) + + actual_output = out.decode("utf-8") + + self.assertEqual(len(expected_services), 3) + + actual_services = {} + for service, _ in expected_services.items(): + actual_services[service] = service in actual_output + + self.assertEqual(expected_services, actual_services) diff --git a/tests/integration/test_podman_compose_deps.py b/tests/integration/test_podman_compose_deps.py new file mode 100644 index 0000000..1c46812 --- /dev/null +++ b/tests/integration/test_podman_compose_deps.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +import os +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +def compose_yaml_path(): + return os.path.join(os.path.join(test_path(), "deps"), "docker-compose.yaml") + + +class TestComposeDeps(unittest.TestCase, RunSubprocessMixin): + def test_deps(self): + try: + output, error = self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_yaml_path(), + "run", + "--rm", + "sleep", + "/bin/sh", + "-c", + "wget -O - http://web:8000/hosts", + ]) + self.assertIn(b"HTTP request sent, awaiting response... 200 OK", output) + self.assertIn(b"deps_web_1", output) + finally: + self.run_subprocess_assert_returncode([ + podman_compose_path(), + "-f", + compose_yaml_path(), + "down", + ]) diff --git a/tests/integration/test_podman_compose_in_pod.py b/tests/integration/test_podman_compose_in_pod.py new file mode 100644 index 0000000..ed57135 --- /dev/null +++ b/tests/integration/test_podman_compose_in_pod.py @@ -0,0 +1,492 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import unittest + +from tests.integration.test_utils import RunSubprocessMixin + + +def base_path(): + """Returns the base path for the project""" + return os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + + +def test_path(): + """Returns the path to the tests directory""" + return os.path.join(base_path(), "tests/integration") + + +def podman_compose_path(): + """Returns the path to the podman compose script""" + return os.path.join(base_path(), "podman_compose.py") + + +# If a compose file has userns_mode set, setting in_pod to True, results in error. +# Default in_pod setting is True, unless compose file provides otherwise. +# Compose file provides custom in_pod option, which can be overridden by command line in_pod option. +# Test all combinations of command line argument in_pod and compose file argument in_pod. +class TestPodmanComposeInPod(unittest.TestCase, RunSubprocessMixin): + # compose file provides x-podman in_pod=false + def test_x_podman_in_pod_false_command_line_in_pod_not_exists(self): + """ + Test that podman-compose will not create a pod, when x-podman in_pod=false and command line + does not provide this option + """ + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "up", + "-d", + ] + + down_cmd = [ + "python3", + podman_compose_path(), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "down", + ] + + try: + self.run_subprocess_assert_returncode(command_up) + + finally: + self.run_subprocess_assert_returncode(down_cmd) + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_false"] + # throws an error, can not actually find this pod because it was not created + self.run_subprocess_assert_returncode(command_rm_pod, expected_returncode=1) + + def test_x_podman_in_pod_false_command_line_in_pod_true(self): + """ + Test that podman-compose does not allow pod creating even with command line in_pod=True + when --userns and --pod are set together: throws an error + """ + # FIXME: creates a pod anyway, although it should not + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=True", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_false"] + # should throw an error of not being able to find this pod (because it should not have + # been created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) + + def test_x_podman_in_pod_false_command_line_in_pod_false(self): + """ + Test that podman-compose will not create a pod as command line sets in_pod=False + """ + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=False", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "up", + "-d", + ] + + down_cmd = [ + "python3", + podman_compose_path(), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "down", + ] + + try: + self.run_subprocess_assert_returncode(command_up) + + finally: + self.run_subprocess_assert_returncode(down_cmd) + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_false"] + # can not actually find this pod because it was not created + self.run_subprocess_assert_returncode(command_rm_pod, 1) + + def test_x_podman_in_pod_false_command_line_in_pod_empty_string(self): + """ + Test that podman-compose will not create a pod, when x-podman in_pod=false and command line + command line in_pod="" + """ + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "up", + "-d", + ] + + down_cmd = [ + "python3", + podman_compose_path(), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_false", + "docker-compose.yml", + ), + "down", + ] + + try: + self.run_subprocess_assert_returncode(command_up) + + finally: + self.run_subprocess_assert_returncode(down_cmd) + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_false"] + # can not actually find this pod because it was not created + self.run_subprocess_assert_returncode(command_rm_pod, 1) + + # compose file provides x-podman in_pod=true + def test_x_podman_in_pod_true_command_line_in_pod_not_exists(self): + """ + Test that podman-compose does not allow pod creating when --userns and --pod are set + together even when x-podman in_pod=true: throws an error + """ + # FIXME: creates a pod anyway, although it should not + # Container is not created, so command 'down' is not needed + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_true", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_true"] + # should throw an error of not being able to find this pod (it should not have been + # created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) + + def test_x_podman_in_pod_true_command_line_in_pod_true(self): + """ + Test that podman-compose does not allow pod creating when --userns and --pod are set + together even when x-podman in_pod=true and and command line in_pod=True: throws an error + """ + # FIXME: creates a pod anyway, although it should not + # Container is not created, so command 'down' is not needed + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=True", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_true", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_true"] + # should throw an error of not being able to find this pod (because it should not have + # been created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) + + def test_x_podman_in_pod_true_command_line_in_pod_false(self): + """ + Test that podman-compose will not create a pod as command line sets in_pod=False + """ + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=False", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_true", + "docker-compose.yml", + ), + "up", + "-d", + ] + + down_cmd = [ + "python3", + podman_compose_path(), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_true", + "docker-compose.yml", + ), + "down", + ] + + try: + self.run_subprocess_assert_returncode(command_up) + + finally: + self.run_subprocess_assert_returncode(down_cmd) + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_false"] + # can not actually find this pod because it was not created + self.run_subprocess_assert_returncode(command_rm_pod, 1) + + def test_x_podman_in_pod_true_command_line_in_pod_empty_string(self): + """ + Test that podman-compose does not allow pod creating when --userns and --pod are set + together even when x-podman in_pod=true and command line in_pod="": throws an error + """ + # FIXME: creates a pod anyway, although it should not + # Container is not created, so command 'down' is not needed + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_true", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_true"] + # should throw an error of not being able to find this pod (because it should not have + # been created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) + + # compose file does not provide x-podman in_pod + def test_x_podman_in_pod_not_exists_command_line_in_pod_not_exists(self): + """ + Test that podman-compose does not allow pod creating when --userns and --pod are set + together: throws an error + """ + # FIXME: creates a pod anyway, although it should not + # Container is not created, so command 'down' is not needed + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_not_exists", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_not_exists"] + # should throw an error of not being able to find this pod (it should not have been + # created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) + + def test_x_podman_in_pod_not_exists_command_line_in_pod_true(self): + """ + Test that podman-compose does not allow pod creating when --userns and --pod are set + together even when x-podman in_pod=true: throws an error + """ + # FIXME: creates a pod anyway, although it should not + # Container was not created, so command 'down' is not needed + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=True", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_not_exists", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_not_exists"] + # should throw an error of not being able to find this pod (because it should not have + # been created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) + + def test_x_podman_in_pod_not_exists_command_line_in_pod_false(self): + """ + Test that podman-compose will not create a pod as command line sets in_pod=False + """ + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=False", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_not_exists", + "docker-compose.yml", + ), + "up", + "-d", + ] + + down_cmd = [ + "python3", + podman_compose_path(), + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_not_exists", + "docker-compose.yml", + ), + "down", + ] + + try: + self.run_subprocess_assert_returncode(command_up) + + finally: + self.run_subprocess_assert_returncode(down_cmd) + + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_not_exists"] + # can not actually find this pod because it was not created + self.run_subprocess_assert_returncode(command_rm_pod, 1) + + def test_x_podman_in_pod_not_exists_command_line_in_pod_empty_string(self): + """ + Test that podman-compose does not allow pod creating when --userns and --pod are set + together: throws an error + """ + # FIXME: creates a pod anyway, although it should not + # Container was not created, so command 'down' is not needed + command_up = [ + "python3", + os.path.join(base_path(), "podman_compose.py"), + "--in-pod=", + "-f", + os.path.join( + base_path(), + "tests", + "integration", + "in_pod", + "custom_x-podman_not_exists", + "docker-compose.yml", + ), + "up", + "-d", + ] + + try: + out, err = self.run_subprocess_assert_returncode(command_up) + self.assertEqual(b"Error: --userns and --pod cannot be set together" in err, True) + + finally: + command_rm_pod = ["podman", "pod", "rm", "pod_custom_x-podman_not_exists"] + # should throw an error of not being able to find this pod (because it should not have + # been created) and have expected_returncode=1 (see FIXME above) + self.run_subprocess_assert_returncode(command_rm_pod) diff --git a/tests/integration/test_podman_compose_include.py b/tests/integration/test_podman_compose_include.py new file mode 100644 index 0000000..dcde390 --- /dev/null +++ b/tests/integration/test_podman_compose_include.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: GPL-2.0 + +import unittest +from pathlib import Path + +from tests.integration.test_utils import RunSubprocessMixin + + +class TestPodmanComposeInclude(unittest.TestCase, RunSubprocessMixin): + def test_podman_compose_include(self): + """ + Test that podman-compose can execute podman-compose -f up with include + :return: + """ + main_path = Path(__file__).parent.parent.parent + + command_up = [ + "coverage", + "run", + str(main_path.joinpath("podman_compose.py")), + "-f", + str(main_path.joinpath("tests", "integration", "include", "docker-compose.yaml")), + "up", + "-d", + ] + + command_check_container = [ + "podman", + "ps", + "-a", + "--filter", + "label=io.podman.compose.project=include", + "--format", + '"{{.Image}}"', + ] + + command_container_id = [ + "podman", + "ps", + "-a", + "--filter", + "label=io.podman.compose.project=include", + "--format", + '"{{.ID}}"', + ] + + command_down = ["podman", "rm", "--force"] + + self.run_subprocess_assert_returncode(command_up) + out, _ = self.run_subprocess_assert_returncode(command_check_container) + expected_output = b'"localhost/nopush/podman-compose-test:latest"\n' * 2 + self.assertEqual(out, expected_output) + # Get container ID to remove it + out, _ = self.run_subprocess_assert_returncode(command_container_id) + self.assertNotEqual(out, b"") + container_ids = out.decode().strip().split("\n") + container_ids = [container_id.replace('"', "") for container_id in container_ids] + command_down.extend(container_ids) + out, _ = self.run_subprocess_assert_returncode(command_down) + # cleanup test image(tags) + self.assertNotEqual(out, b"") + # check container did not exists anymore + out, _ = self.run_subprocess_assert_returncode(command_check_container) + self.assertEqual(out, b"") diff --git a/tests/integration/test_podman_compose_networks.py b/tests/integration/test_podman_compose_networks.py new file mode 100644 index 0000000..de3982b --- /dev/null +++ b/tests/integration/test_podman_compose_networks.py @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" +test_podman_compose_networks.py + +Tests the podman networking parameters +""" + +# pylint: disable=redefined-outer-name +import os +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +class TestPodmanComposeNetwork(RunSubprocessMixin, unittest.TestCase): + @staticmethod + def compose_file(): + """Returns the path to the compose file used for this test module""" + return os.path.join(test_path(), "nets_test_ip", "docker-compose.yml") + + def teardown(self): + """ + Ensures that the services within the "profile compose file" are removed between + each test case. + """ + # run the test case + yield + + down_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + self.compose_file(), + "kill", + "-a", + ] + self.run_subprocess(down_cmd) + + def test_networks(self): + up_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + self.compose_file(), + "up", + "-d", + "--force-recreate", + ] + + self.run_subprocess_assert_returncode(up_cmd) + + check_cmd = [ + podman_compose_path(), + "-f", + self.compose_file(), + "ps", + "--format", + '"{{.Names}}"', + ] + out, _ = self.run_subprocess_assert_returncode(check_cmd) + self.assertIn(b"nets_test_ip_web1_1", out) + self.assertIn(b"nets_test_ip_web2_1", out) + + expected_wget = { + "172.19.1.10": "test1", + "172.19.2.10": "test1", + "172.19.2.11": "test2", + "web3": "test3", + "172.19.1.13": "test4", + } + + for service in ("web1", "web2"): + for ip, expect in expected_wget.items(): + wget_cmd = [ + podman_compose_path(), + "-f", + self.compose_file(), + "exec", + service, + "wget", + "-q", + "-O-", + f"http://{ip}:8001/index.txt", + ] + out, _ = self.run_subprocess_assert_returncode(wget_cmd) + self.assertEqual(f"{expect}\r\n", out.decode('utf-8')) + + expected_macip = { + "web1": { + "eth0": ["172.19.1.10", "02:01:01:00:01:01"], + "eth1": ["172.19.2.10", "02:01:01:00:02:01"], + }, + "web2": {"eth0": ["172.19.2.11", "02:01:01:00:02:02"]}, + } + + for service, interfaces in expected_macip.items(): + ip_cmd = [ + podman_compose_path(), + "-f", + self.compose_file(), + "exec", + service, + "ip", + "addr", + "show", + ] + out, _ = self.run_subprocess_assert_returncode(ip_cmd) + for interface, values in interfaces.items(): + ip, mac = values + self.assertIn(f"ether {mac}", out.decode('utf-8')) + self.assertIn(f"inet {ip}/", out.decode('utf-8')) diff --git a/tests/integration/test_podman_compose_tests.py b/tests/integration/test_podman_compose_tests.py new file mode 100644 index 0000000..67df2cd --- /dev/null +++ b/tests/integration/test_podman_compose_tests.py @@ -0,0 +1,189 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" +test_podman_compose_up_down.py + +Tests the podman compose up and down commands used to create and remove services. +""" + +# pylint: disable=redefined-outer-name +import os +import unittest + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +class TestPodmanCompose(unittest.TestCase, RunSubprocessMixin): + def test_exit_from(self): + up_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "exit-from", "docker-compose.yaml"), + "up", + ] + + self.run_subprocess_assert_returncode(up_cmd + ["--exit-code-from", "sh1"], 1) + self.run_subprocess_assert_returncode(up_cmd + ["--exit-code-from", "sh2"], 2) + + def test_run(self): + """ + This will test depends_on as well + """ + run_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "deps", "docker-compose.yaml"), + "run", + "--rm", + "sleep", + "/bin/sh", + "-c", + "wget -q -O - http://web:8000/hosts", + ] + + out, _ = self.run_subprocess_assert_returncode(run_cmd) + self.assertIn(b'127.0.0.1\tlocalhost', out) + + # Run it again to make sure we can run it twice. I saw an issue where a second run, with + # the container left up, would fail + run_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "deps", "docker-compose.yaml"), + "run", + "--rm", + "sleep", + "/bin/sh", + "-c", + "wget -q -O - http://web:8000/hosts", + ] + + out, _ = self.run_subprocess_assert_returncode(run_cmd) + self.assertIn(b'127.0.0.1\tlocalhost', out) + + # This leaves a container running. Not sure it's intended, but it matches docker-compose + down_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "deps", "docker-compose.yaml"), + "down", + ] + + self.run_subprocess_assert_returncode(down_cmd) + + def test_up_with_ports(self): + up_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "ports", "docker-compose.yml"), + "up", + "-d", + "--force-recreate", + ] + + down_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "ports", "docker-compose.yml"), + "down", + "--volumes", + ] + + try: + self.run_subprocess_assert_returncode(up_cmd) + + finally: + self.run_subprocess_assert_returncode(down_cmd) + + def test_down_with_vols(self): + up_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "vol", "docker-compose.yaml"), + "up", + "-d", + ] + + down_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "vol", "docker-compose.yaml"), + "down", + "--volumes", + ] + + try: + self.run_subprocess_assert_returncode(["podman", "volume", "create", "my-app-data"]) + self.run_subprocess_assert_returncode([ + "podman", + "volume", + "create", + "actual-name-of-volume", + ]) + + self.run_subprocess_assert_returncode(up_cmd) + self.run_subprocess(["podman", "inspect", "volume", ""]) + + finally: + out, _, return_code = self.run_subprocess(down_cmd) + self.run_subprocess(["podman", "volume", "rm", "my-app-data"]) + self.run_subprocess(["podman", "volume", "rm", "actual-name-of-volume"]) + self.assertEqual(return_code, 0) + + def test_down_with_orphans(self): + container_id, _ = self.run_subprocess_assert_returncode([ + "podman", + "run", + "--rm", + "-d", + "nopush/podman-compose-test", + "dumb-init", + "/bin/busybox", + "httpd", + "-f", + "-h", + "/etc/", + "-p", + "8000", + ]) + + down_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + os.path.join(test_path(), "ports", "docker-compose.yml"), + "down", + "--volumes", + "--remove-orphans", + ] + + self.run_subprocess_assert_returncode(down_cmd) + + self.run_subprocess_assert_returncode( + [ + "podman", + "container", + "exists", + container_id.decode("utf-8"), + ], + 1, + ) diff --git a/tests/integration/test_podman_compose_up_down.py b/tests/integration/test_podman_compose_up_down.py new file mode 100644 index 0000000..f2f554c --- /dev/null +++ b/tests/integration/test_podman_compose_up_down.py @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: GPL-2.0 + +""" +test_podman_compose_up_down.py + +Tests the podman compose up and down commands used to create and remove services. +""" + +# pylint: disable=redefined-outer-name +import os +import unittest + +from parameterized import parameterized + +from tests.integration.test_podman_compose import podman_compose_path +from tests.integration.test_podman_compose import test_path +from tests.integration.test_utils import RunSubprocessMixin + + +def profile_compose_file(): + """ "Returns the path to the `profile` compose file used for this test module""" + return os.path.join(test_path(), "profile", "docker-compose.yml") + + +class TestUpDown(unittest.TestCase, RunSubprocessMixin): + def tearDown(self): + """ + Ensures that the services within the "profile compose file" are removed between each test + case. + """ + # run the test case + + down_cmd = [ + "coverage", + "run", + podman_compose_path(), + "--profile", + "profile-1", + "--profile", + "profile-2", + "-f", + profile_compose_file(), + "down", + ] + self.run_subprocess(down_cmd) + + @parameterized.expand( + [ + ( + ["--profile", "profile-1", "up", "-d"], + {"default-service": True, "service-1": True, "service-2": False}, + ), + ( + ["--profile", "profile-2", "up", "-d"], + {"default-service": True, "service-1": False, "service-2": True}, + ), + ( + ["--profile", "profile-1", "--profile", "profile-2", "up", "-d"], + {"default-service": True, "service-1": True, "service-2": True}, + ), + ], + ) + def test_up(self, profiles, expected_services): + up_cmd = [ + "coverage", + "run", + podman_compose_path(), + "-f", + profile_compose_file(), + ] + up_cmd.extend(profiles) + + self.run_subprocess_assert_returncode(up_cmd) + + check_cmd = [ + "podman", + "container", + "ps", + "--format", + '"{{.Names}}"', + ] + out, _ = self.run_subprocess_assert_returncode(check_cmd) + + self.assertEqual(len(expected_services), 3) + actual_output = out.decode("utf-8") + + actual_services = {} + for service, _ in expected_services.items(): + actual_services[service] = service in actual_output + + self.assertEqual(expected_services, actual_services) diff --git a/tests/integration/test_utils.py b/tests/integration/test_utils.py new file mode 100644 index 0000000..c0faf32 --- /dev/null +++ b/tests/integration/test_utils.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import subprocess +import time + + +class RunSubprocessMixin: + def is_debug_enabled(self): + return "TESTS_DEBUG" in os.environ + + def run_subprocess(self, args): + begin = time.time() + if self.is_debug_enabled(): + print("TEST_CALL", args) + proc = subprocess.Popen( + args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + out, err = proc.communicate() + if self.is_debug_enabled(): + print("TEST_CALL completed", time.time() - begin) + print("STDOUT:", out.decode('utf-8')) + print("STDERR:", err.decode('utf-8')) + return out, err, proc.returncode + + def run_subprocess_assert_returncode(self, args, expected_returncode=0): + out, err, returncode = self.run_subprocess(args) + decoded_out = out.decode('utf-8') + decoded_err = err.decode('utf-8') + self.assertEqual( + returncode, + expected_returncode, + f"Invalid return code of process {returncode} != {expected_returncode}\n" + f"stdout: {decoded_out}\nstderr: {decoded_err}\n", + ) + return out, err diff --git a/tests/testlogs/docker-compose.yml b/tests/integration/testlogs/docker-compose.yml similarity index 100% rename from tests/testlogs/docker-compose.yml rename to tests/integration/testlogs/docker-compose.yml diff --git a/tests/uidmaps/docker-compose.yml b/tests/integration/uidmaps/docker-compose.yml similarity index 100% rename from tests/uidmaps/docker-compose.yml rename to tests/integration/uidmaps/docker-compose.yml diff --git a/tests/ulimit/Dockerfile b/tests/integration/ulimit/Dockerfile similarity index 100% rename from tests/ulimit/Dockerfile rename to tests/integration/ulimit/Dockerfile diff --git a/tests/ulimit/docker-compose.yaml b/tests/integration/ulimit/docker-compose.yaml similarity index 100% rename from tests/ulimit/docker-compose.yaml rename to tests/integration/ulimit/docker-compose.yaml diff --git a/tests/ulimit/ulimit.sh b/tests/integration/ulimit/ulimit.sh similarity index 100% rename from tests/ulimit/ulimit.sh rename to tests/integration/ulimit/ulimit.sh diff --git a/tests/integration/ulimit_build/Dockerfile b/tests/integration/ulimit_build/Dockerfile new file mode 100644 index 0000000..f80774e --- /dev/null +++ b/tests/integration/ulimit_build/Dockerfile @@ -0,0 +1,5 @@ +FROM busybox + +COPY ./ulimit.sh /bin/ulimit.sh + +RUN /bin/ulimit.sh diff --git a/tests/integration/ulimit_build/docker-compose.yaml b/tests/integration/ulimit_build/docker-compose.yaml new file mode 100644 index 0000000..ade949e --- /dev/null +++ b/tests/integration/ulimit_build/docker-compose.yaml @@ -0,0 +1,26 @@ +version: "3" +services: + ulimit1: + image: ulimit_build_test + build: + context: ./ + dockerfile: Dockerfile + ulimits: nofile=1001 + ulimit2: + image: ulimit_build_test + build: + context: ./ + dockerfile: Dockerfile + ulimits: + - nproc=1002:2002 + - nofile=1002 + ulimit3: + image: ulimit_build_test + build: + context: ./ + dockerfile: Dockerfile + ulimits: + nofile: 1003 + nproc: + soft: 1003 + hard: 2003 diff --git a/tests/integration/ulimit_build/ulimit.sh b/tests/integration/ulimit_build/ulimit.sh new file mode 100755 index 0000000..1685b3d --- /dev/null +++ b/tests/integration/ulimit_build/ulimit.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +echo "soft process limit:" $(ulimit -S -u) +echo "hard process limit:" $(ulimit -H -u) +echo "soft nofile limit:" $(ulimit -S -n) +echo "hard nofile limit:" $(ulimit -H -n) diff --git a/tests/vol/README.md b/tests/integration/vol/README.md similarity index 100% rename from tests/vol/README.md rename to tests/integration/vol/README.md diff --git a/tests/vol/docker-compose.yaml b/tests/integration/vol/docker-compose.yaml similarity index 58% rename from tests/vol/docker-compose.yaml rename to tests/integration/vol/docker-compose.yaml index 5dc8d25..85a60d6 100644 --- a/tests/vol/docker-compose.yaml +++ b/tests/integration/vol/docker-compose.yaml @@ -1,8 +1,8 @@ version: "3" services: web: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8000"] + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8000"] working_dir: /var/www/html restart: always volumes: @@ -11,21 +11,21 @@ services: - /run - /tmp web1: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] restart: unless-stopped working_dir: /var/www/html volumes: - myvol1:/var/www/html:ro,z web2: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] working_dir: /var/www/html volumes: - myvol2:/var/www/html:ro web3: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8003"] + image: nopush/podman-compose-test + command: ["dumb-init", "/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8003"] working_dir: /var/www/html volumes: - myvol2:/var/www/html diff --git a/tests/volumes_merge/docker-compose.override.yaml b/tests/integration/volumes_merge/docker-compose.override.yaml similarity index 100% rename from tests/volumes_merge/docker-compose.override.yaml rename to tests/integration/volumes_merge/docker-compose.override.yaml diff --git a/tests/volumes_merge/docker-compose.yaml b/tests/integration/volumes_merge/docker-compose.yaml similarity index 100% rename from tests/volumes_merge/docker-compose.yaml rename to tests/integration/volumes_merge/docker-compose.yaml diff --git a/tests/volumes_merge/index.txt b/tests/integration/volumes_merge/index.txt similarity index 100% rename from tests/volumes_merge/index.txt rename to tests/integration/volumes_merge/index.txt diff --git a/tests/volumes_merge/override.txt b/tests/integration/volumes_merge/override.txt similarity index 100% rename from tests/volumes_merge/override.txt rename to tests/integration/volumes_merge/override.txt diff --git a/tests/yamlmagic/docker-compose.yml b/tests/integration/yamlmagic/docker-compose.yml similarity index 100% rename from tests/yamlmagic/docker-compose.yml rename to tests/integration/yamlmagic/docker-compose.yml diff --git a/tests/profile/docker-compose.yml b/tests/profile/docker-compose.yml deleted file mode 100644 index 0a2a7cd..0000000 --- a/tests/profile/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "3" -services: - default-service: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] - tmpfs: - - /run - - /tmp - service-1: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] - tmpfs: - - /run - - /tmp - profiles: - - profile-1 - service-2: - image: busybox - command: ["/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] - tmpfs: - - /run - - /tmp - profiles: - - profile-2 \ No newline at end of file diff --git a/tests/test_podman_compose.py b/tests/test_podman_compose.py deleted file mode 100644 index 14e80b2..0000000 --- a/tests/test_podman_compose.py +++ /dev/null @@ -1,84 +0,0 @@ -from pathlib import Path -import subprocess - - -def capture(command): - proc = subprocess.Popen( - command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - out, err = proc.communicate() - return out, err, proc.returncode - - -def test_podman_compose_extends_w_file_subdir(): - """ - Test that podman-compose can execute podman-compose -f up with extended File which - includes a build context - :return: - """ - main_path = Path(__file__).parent.parent - - command_up = [ - "python3", - str(main_path.joinpath("podman_compose.py")), - "-f", - str(main_path.joinpath("tests", "extends_w_file_subdir", "docker-compose.yml")), - "up", - "-d", - ] - - command_check_container = [ - "podman", - "container", - "ps", - "--all", - "--format", - '"{{.Image}}"', - ] - - command_down = [ - "podman", - "rmi", - "--force", - "localhost/subdir_test:me", - "docker.io/library/busybox", - ] - - out, _, returncode = capture(command_up) - assert 0 == returncode - # check container was created and exists - out, _, returncode = capture(command_check_container) - assert 0 == returncode - assert out == b'"localhost/subdir_test:me"\n' - out, _, returncode = capture(command_down) - # cleanup test image(tags) - assert 0 == returncode - # check container did not exists anymore - out, _, returncode = capture(command_check_container) - assert 0 == returncode - assert out == b"" - - -def test_podman_compose_extends_w_empty_service(): - """ - Test that podman-compose can execute podman-compose -f up with extended File which - includes an empty service. (e.g. if the file is used as placeholder for more complex configurations.) - :return: - """ - main_path = Path(__file__).parent.parent - - command_up = [ - "python3", - str(main_path.joinpath("podman_compose.py")), - "-f", - str( - main_path.joinpath("tests", "extends_w_empty_service", "docker-compose.yml") - ), - "up", - "-d", - ] - - _, _, returncode = capture(command_up) - assert 0 == returncode diff --git a/tests/test_podman_compose_config.py b/tests/test_podman_compose_config.py deleted file mode 100644 index 2f879ba..0000000 --- a/tests/test_podman_compose_config.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -test_podman_compose_config.py - -Tests the podman-compose config command which is used to return defined compose services. -""" -# pylint: disable=redefined-outer-name -import os -from test_podman_compose import capture -import pytest - - -@pytest.fixture -def profile_compose_file(test_path): - """ "Returns the path to the `profile` compose file used for this test module""" - return os.path.join(test_path, "profile", "docker-compose.yml") - - -def test_config_no_profiles(podman_compose_path, profile_compose_file): - """ - Tests podman-compose config command without profile enablement. - - :param podman_compose_path: The fixture used to specify the path to the podman compose file. - :param profile_compose_file: The fixtued used to specify the path to the "profile" compose used in the test. - """ - config_cmd = ["python3", podman_compose_path, "-f", profile_compose_file, "config"] - - out, _, return_code = capture(config_cmd) - assert return_code == 0 - - string_output = out.decode("utf-8") - assert "default-service" in string_output - assert "service-1" not in string_output - assert "service-2" not in string_output - - -@pytest.mark.parametrize( - "profiles, expected_services", - [ - ( - ["--profile", "profile-1", "config"], - {"default-service": True, "service-1": True, "service-2": False}, - ), - ( - ["--profile", "profile-2", "config"], - {"default-service": True, "service-1": False, "service-2": True}, - ), - ( - ["--profile", "profile-1", "--profile", "profile-2", "config"], - {"default-service": True, "service-1": True, "service-2": True}, - ), - ], -) -def test_config_profiles( - podman_compose_path, profile_compose_file, profiles, expected_services -): - """ - Tests podman-compose - :param podman_compose_path: The fixture used to specify the path to the podman compose file. - :param profile_compose_file: The fixtued used to specify the path to the "profile" compose used in the test. - :param profiles: The enabled profiles for the parameterized test. - :param expected_services: Dictionary used to model the expected "enabled" services in the profile. - Key = service name, Value = True if the service is enabled, otherwise False. - """ - config_cmd = ["python3", podman_compose_path, "-f", profile_compose_file] - config_cmd.extend(profiles) - - out, _, return_code = capture(config_cmd) - assert return_code == 0 - - actual_output = out.decode("utf-8") - - assert len(expected_services) == 3 - - actual_services = {} - for service, _ in expected_services.items(): - actual_services[service] = service in actual_output - - assert expected_services == actual_services diff --git a/tests/test_podman_compose_include.py b/tests/test_podman_compose_include.py deleted file mode 100644 index c9867f5..0000000 --- a/tests/test_podman_compose_include.py +++ /dev/null @@ -1,71 +0,0 @@ -from pathlib import Path -import subprocess - - -def capture(command): - proc = subprocess.Popen( - command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - out, err = proc.communicate() - return out, err, proc.returncode - - -def test_podman_compose_include(): - """ - Test that podman-compose can execute podman-compose -f up with include - :return: - """ - main_path = Path(__file__).parent.parent - - command_up = [ - "python3", - str(main_path.joinpath("podman_compose.py")), - "-f", - str(main_path.joinpath("tests", "include", "docker-compose.yaml")), - "up", - "-d", - ] - - command_check_container = [ - "podman", - "ps", - "-a", - "--filter", - "label=io.podman.compose.project=include", - "--format", - '"{{.Image}}"', - ] - - command_container_id = [ - "podman", - "ps", - "-a", - "--filter", - "label=io.podman.compose.project=include", - "--format", - '"{{.ID}}"', - ] - - command_down = ["podman", "rm", "--force", "CONTAINER_ID"] - - out, _, returncode = capture(command_up) - assert 0 == returncode - out, _, returncode = capture(command_check_container) - assert 0 == returncode - assert out == b'"docker.io/library/busybox:latest"\n' - # Get container ID to remove it - out, _, returncode = capture(command_container_id) - assert 0 == returncode - assert out != b"" - container_id = out.decode().strip().replace('"', "") - command_down[3] = container_id - out, _, returncode = capture(command_down) - # cleanup test image(tags) - assert 0 == returncode - assert out != b"" - # check container did not exists anymore - out, _, returncode = capture(command_check_container) - assert 0 == returncode - assert out == b"" diff --git a/tests/test_podman_compose_up_down.py b/tests/test_podman_compose_up_down.py deleted file mode 100644 index 833604e..0000000 --- a/tests/test_podman_compose_up_down.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -test_podman_compose_up_down.py - -Tests the podman compose up and down commands used to create and remove services. -""" -# pylint: disable=redefined-outer-name -import os -from test_podman_compose import capture -import pytest - - -@pytest.fixture -def profile_compose_file(test_path): - """ "Returns the path to the `profile` compose file used for this test module""" - return os.path.join(test_path, "profile", "docker-compose.yml") - - -@pytest.fixture(autouse=True) -def teardown(podman_compose_path, profile_compose_file): - """ - Ensures that the services within the "profile compose file" are removed between each test case. - - :param podman_compose_path: The path to the podman compose script. - :param profile_compose_file: The path to the compose file used for this test module. - """ - # run the test case - yield - - down_cmd = [ - "python3", - podman_compose_path, - "--profile", - "profile-1", - "--profile", - "profile-2", - "-f", - profile_compose_file, - "down", - ] - capture(down_cmd) - - -@pytest.mark.parametrize( - "profiles, expected_services", - [ - ( - ["--profile", "profile-1", "up", "-d"], - {"default-service": True, "service-1": True, "service-2": False}, - ), - ( - ["--profile", "profile-2", "up", "-d"], - {"default-service": True, "service-1": False, "service-2": True}, - ), - ( - ["--profile", "profile-1", "--profile", "profile-2", "up", "-d"], - {"default-service": True, "service-1": True, "service-2": True}, - ), - ], -) -def test_up(podman_compose_path, profile_compose_file, profiles, expected_services): - up_cmd = [ - "python3", - podman_compose_path, - "-f", - profile_compose_file, - ] - up_cmd.extend(profiles) - - out, _, return_code = capture(up_cmd) - assert return_code == 0 - - check_cmd = [ - "podman", - "container", - "ps", - "--format", - '"{{.Names}}"', - ] - out, _, return_code = capture(check_cmd) - assert return_code == 0 - - assert len(expected_services) == 3 - actual_output = out.decode("utf-8") - - actual_services = {} - for service, _ in expected_services.items(): - actual_services[service] = service in actual_output - - assert expected_services == actual_services diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/test_can_merge_build.py b/tests/unit/test_can_merge_build.py new file mode 100644 index 0000000..91f3fdd --- /dev/null +++ b/tests/unit/test_can_merge_build.py @@ -0,0 +1,168 @@ +# SPDX-License-Identifier: GPL-2.0 +from __future__ import annotations + +import argparse +import copy +import os +import unittest + +import yaml +from parameterized import parameterized + +from podman_compose import PodmanCompose + + +class TestCanMergeBuild(unittest.TestCase): + @parameterized.expand([ + ({}, {}, {}), + ({}, {"test": "test"}, {"test": "test"}), + ({"test": "test"}, {}, {"test": "test"}), + ({"test": "test-1"}, {"test": "test-2"}, {"test": "test-2"}), + ({}, {"build": "."}, {"build": {"context": "."}}), + ({"build": "."}, {}, {"build": {"context": "."}}), + ({"build": "./dir-1"}, {"build": "./dir-2"}, {"build": {"context": "./dir-2"}}), + ({}, {"build": {"context": "./dir-1"}}, {"build": {"context": "./dir-1"}}), + ({"build": {"context": "./dir-1"}}, {}, {"build": {"context": "./dir-1"}}), + ( + {"build": {"context": "./dir-1"}}, + {"build": {"context": "./dir-2"}}, + {"build": {"context": "./dir-2"}}, + ), + ( + {}, + {"build": {"dockerfile": "dockerfile-1"}}, + {"build": {"dockerfile": "dockerfile-1"}}, + ), + ( + {"build": {"dockerfile": "dockerfile-1"}}, + {}, + {"build": {"dockerfile": "dockerfile-1"}}, + ), + ( + {"build": {"dockerfile": "./dockerfile-1"}}, + {"build": {"dockerfile": "./dockerfile-2"}}, + {"build": {"dockerfile": "./dockerfile-2"}}, + ), + ( + {"build": {"dockerfile": "./dockerfile-1"}}, + {"build": {"context": "./dir-2"}}, + {"build": {"dockerfile": "./dockerfile-1", "context": "./dir-2"}}, + ), + ( + {"build": {"dockerfile": "./dockerfile-1", "context": "./dir-1"}}, + {"build": {"dockerfile": "./dockerfile-2", "context": "./dir-2"}}, + {"build": {"dockerfile": "./dockerfile-2", "context": "./dir-2"}}, + ), + ( + {"build": {"dockerfile": "./dockerfile-1"}}, + {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, + {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, + ), + ( + {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, + {"build": {"dockerfile": "./dockerfile-1"}}, + {"build": {"dockerfile": "./dockerfile-1", "args": ["ENV1=1"]}}, + ), + ( + {"build": {"dockerfile": "./dockerfile-2", "args": ["ENV1=1"]}}, + {"build": {"dockerfile": "./dockerfile-1", "args": ["ENV2=2"]}}, + {"build": {"dockerfile": "./dockerfile-1", "args": ["ENV1=1", "ENV2=2"]}}, + ), + ]) + def test_parse_compose_file_when_multiple_composes(self, input, override, expected): + compose_test_1 = {"services": {"test-service": input}} + compose_test_2 = {"services": {"test-service": override}} + dump_yaml(compose_test_1, "test-compose-1.yaml") + dump_yaml(compose_test_2, "test-compose-2.yaml") + + podman_compose = PodmanCompose() + set_args(podman_compose, ["test-compose-1.yaml", "test-compose-2.yaml"]) + + podman_compose._parse_compose_file() # pylint: disable=protected-access + + actual_compose = {} + if podman_compose.services: + podman_compose.services["test-service"].pop("_deps") + actual_compose = podman_compose.services["test-service"] + self.assertEqual(actual_compose, expected) + + # $$$ is a placeholder for either command or entrypoint + @parameterized.expand([ + ({}, {"$$$": []}, {"$$$": []}), + ({"$$$": []}, {}, {"$$$": []}), + ({"$$$": []}, {"$$$": "sh-2"}, {"$$$": ["sh-2"]}), + ({"$$$": "sh-2"}, {"$$$": []}, {"$$$": []}), + ({}, {"$$$": "sh"}, {"$$$": ["sh"]}), + ({"$$$": "sh"}, {}, {"$$$": ["sh"]}), + ({"$$$": "sh-1"}, {"$$$": "sh-2"}, {"$$$": ["sh-2"]}), + ({"$$$": ["sh-1"]}, {"$$$": "sh-2"}, {"$$$": ["sh-2"]}), + ({"$$$": "sh-1"}, {"$$$": ["sh-2"]}, {"$$$": ["sh-2"]}), + ({"$$$": "sh-1"}, {"$$$": ["sh-2", "sh-3"]}, {"$$$": ["sh-2", "sh-3"]}), + ({"$$$": ["sh-1"]}, {"$$$": ["sh-2", "sh-3"]}, {"$$$": ["sh-2", "sh-3"]}), + ({"$$$": ["sh-1", "sh-2"]}, {"$$$": ["sh-3", "sh-4"]}, {"$$$": ["sh-3", "sh-4"]}), + ({}, {"$$$": ["sh-3", "sh 4"]}, {"$$$": ["sh-3", "sh 4"]}), + ({"$$$": "sleep infinity"}, {"$$$": "sh"}, {"$$$": ["sh"]}), + ({"$$$": "sh"}, {"$$$": "sleep infinity"}, {"$$$": ["sleep", "infinity"]}), + ( + {}, + {"$$$": "bash -c 'sleep infinity'"}, + {"$$$": ["bash", "-c", "sleep infinity"]}, + ), + ]) + def test_parse_compose_file_when_multiple_composes_keys_command_entrypoint( + self, base_template, override_template, expected_template + ): + for key in ['command', 'entrypoint']: + base, override, expected = template_to_expression( + base_template, override_template, expected_template, key + ) + compose_test_1 = {"services": {"test-service": base}} + compose_test_2 = {"services": {"test-service": override}} + dump_yaml(compose_test_1, "test-compose-1.yaml") + dump_yaml(compose_test_2, "test-compose-2.yaml") + + podman_compose = PodmanCompose() + set_args(podman_compose, ["test-compose-1.yaml", "test-compose-2.yaml"]) + + podman_compose._parse_compose_file() # pylint: disable=protected-access + + actual = {} + if podman_compose.services: + podman_compose.services["test-service"].pop("_deps") + actual = podman_compose.services["test-service"] + self.assertEqual(actual, expected) + + +def set_args(podman_compose: PodmanCompose, file_names: list[str]) -> None: + podman_compose.global_args = argparse.Namespace() + podman_compose.global_args.file = file_names + podman_compose.global_args.project_name = None + podman_compose.global_args.env_file = None + podman_compose.global_args.profile = [] + podman_compose.global_args.in_pod_bool = True + podman_compose.global_args.no_normalize = True + + +def dump_yaml(compose: dict, name: str) -> None: + with open(name, "w", encoding="utf-8") as outfile: + yaml.safe_dump(compose, outfile, default_flow_style=False) + + +def template_to_expression(base, override, expected, key): + base_copy = copy.deepcopy(base) + override_copy = copy.deepcopy(override) + expected_copy = copy.deepcopy(expected) + + expected_copy[key] = expected_copy.pop("$$$") + if "$$$" in base: + base_copy[key] = base_copy.pop("$$$") + if "$$$" in override: + override_copy[key] = override_copy.pop("$$$") + return base_copy, override_copy, expected_copy + + +def test_clean_test_yamls() -> None: + test_files = ["test-compose-1.yaml", "test-compose-2.yaml"] + for file in test_files: + if os.path.exists(file): + os.remove(file) diff --git a/tests/unit/test_compose_exec_args.py b/tests/unit/test_compose_exec_args.py new file mode 100644 index 0000000..1092bcd --- /dev/null +++ b/tests/unit/test_compose_exec_args.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0 + +import argparse +import unittest + +from podman_compose import compose_exec_args + + +class TestComposeExecArgs(unittest.TestCase): + def test_minimal(self): + cnt = get_minimal_container() + args = get_minimal_args() + + result = compose_exec_args(cnt, "container_name", args) + expected = ["--interactive", "--tty", "container_name"] + self.assertEqual(result, expected) + + def test_additional_env_value_equals(self): + cnt = get_minimal_container() + args = get_minimal_args() + args.env = ["key=valuepart1=valuepart2"] + + result = compose_exec_args(cnt, "container_name", args) + expected = [ + "--interactive", + "--tty", + "--env", + "key=valuepart1=valuepart2", + "container_name", + ] + self.assertEqual(result, expected) + + +def get_minimal_container(): + return {} + + +def get_minimal_args(): + return argparse.Namespace( + T=None, + cnt_command=None, + env=None, + privileged=None, + user=None, + workdir=None, + ) diff --git a/tests/unit/test_compose_run_update_container_from_args.py b/tests/unit/test_compose_run_update_container_from_args.py new file mode 100644 index 0000000..3bf2a13 --- /dev/null +++ b/tests/unit/test_compose_run_update_container_from_args.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: GPL-2.0 + +import argparse +import unittest + +from podman_compose import PodmanCompose +from podman_compose import compose_run_update_container_from_args + + +class TestComposeRunUpdateContainerFromArgs(unittest.TestCase): + def test_minimal(self): + cnt = get_minimal_container() + compose = get_minimal_compose() + args = get_minimal_args() + + compose_run_update_container_from_args(compose, cnt, args) + + expected_cnt = {"name": "default_name", "tty": True} + self.assertEqual(cnt, expected_cnt) + + def test_additional_env_value_equals(self): + cnt = get_minimal_container() + compose = get_minimal_compose() + args = get_minimal_args() + args.env = ["key=valuepart1=valuepart2"] + + compose_run_update_container_from_args(compose, cnt, args) + + expected_cnt = { + "environment": { + "key": "valuepart1=valuepart2", + }, + "name": "default_name", + "tty": True, + } + self.assertEqual(cnt, expected_cnt) + + def test_publish_ports(self): + cnt = get_minimal_container() + compose = get_minimal_compose() + args = get_minimal_args() + args.publish = ["1111", "2222:2222"] + + compose_run_update_container_from_args(compose, cnt, args) + + expected_cnt = { + "name": "default_name", + "ports": ["1111", "2222:2222"], + "tty": True, + } + self.assertEqual(cnt, expected_cnt) + + +def get_minimal_container(): + return {} + + +def get_minimal_compose(): + return PodmanCompose() + + +def get_minimal_args(): + return argparse.Namespace( + T=None, + cnt_command=None, + entrypoint=None, + env=None, + name="default_name", + rm=None, + service=None, + publish=None, + service_ports=None, + user=None, + volume=None, + workdir=None, + ) diff --git a/tests/unit/test_container_to_args.py b/tests/unit/test_container_to_args.py new file mode 100644 index 0000000..bd0fbdc --- /dev/null +++ b/tests/unit/test_container_to_args.py @@ -0,0 +1,594 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import unittest +from unittest import mock + +from parameterized import parameterized + +from podman_compose import container_to_args + + +def create_compose_mock(project_name="test_project_name"): + compose = mock.Mock() + compose.project_name = project_name + compose.dirname = "test_dirname" + compose.container_names_by_service.get = mock.Mock(return_value=None) + compose.prefer_volume_over_mount = False + compose.default_net = None + compose.networks = {} + compose.x_podman = {} + + async def podman_output(*args, **kwargs): + pass + + compose.podman.output = mock.Mock(side_effect=podman_output) + return compose + + +def get_minimal_container(): + return { + "name": "project_name_service_name1", + "service_name": "service_name", + "image": "busybox", + } + + +def get_test_file_path(rel_path): + repo_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + return os.path.realpath(os.path.join(repo_root, rel_path)) + + +class TestContainerToArgs(unittest.IsolatedAsyncioTestCase): + async def test_minimal(self): + c = create_compose_mock() + + cnt = get_minimal_container() + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_runtime(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["runtime"] = "runsc" + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--runtime", + "runsc", + "busybox", + ], + ) + + async def test_sysctl_list(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["sysctls"] = [ + "net.core.somaxconn=1024", + "net.ipv4.tcp_syncookies=0", + ] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--sysctl", + "net.core.somaxconn=1024", + "--sysctl", + "net.ipv4.tcp_syncookies=0", + "busybox", + ], + ) + + async def test_sysctl_map(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["sysctls"] = { + "net.core.somaxconn": 1024, + "net.ipv4.tcp_syncookies": 0, + } + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--sysctl", + "net.core.somaxconn=1024", + "--sysctl", + "net.ipv4.tcp_syncookies=0", + "busybox", + ], + ) + + async def test_sysctl_wrong_type(self): + c = create_compose_mock() + cnt = get_minimal_container() + + # check whether wrong types are correctly rejected + for wrong_type in [True, 0, 0.0, "wrong", ()]: + with self.assertRaises(TypeError): + cnt["sysctls"] = wrong_type + await container_to_args(c, cnt) + + async def test_pid(self): + c = create_compose_mock() + cnt = get_minimal_container() + + cnt["pid"] = "host" + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--pid", + "host", + "busybox", + ], + ) + + async def test_http_proxy(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["http_proxy"] = False + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--http-proxy=false", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_uidmaps_extension_old_path(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt['x-podman'] = {'uidmaps': ['1000:1000:1']} + + with self.assertRaises(ValueError): + await container_to_args(c, cnt) + + async def test_uidmaps_extension(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt['x-podman.uidmaps'] = ['1000:1000:1', '1001:1001:2'] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + '--uidmap', + '1000:1000:1', + '--uidmap', + '1001:1001:2', + "busybox", + ], + ) + + async def test_gidmaps_extension(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt['x-podman.gidmaps'] = ['1000:1000:1', '1001:1001:2'] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + '--gidmap', + '1000:1000:1', + '--gidmap', + '1001:1001:2', + "busybox", + ], + ) + + async def test_rootfs_extension(self): + c = create_compose_mock() + + cnt = get_minimal_container() + del cnt["image"] + cnt["x-podman.rootfs"] = "/path/to/rootfs" + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--rootfs", + "/path/to/rootfs", + ], + ) + + async def test_env_file_str(self): + c = create_compose_mock() + + cnt = get_minimal_container() + env_file = get_test_file_path('tests/integration/env-file-tests/env-files/project-1.env') + cnt['env_file'] = env_file + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "-e", + "ZZVAR1=podman-rocks-123", + "-e", + "ZZVAR2=podman-rocks-124", + "-e", + "ZZVAR3=podman-rocks-125", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_env_file_str_not_exists(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt['env_file'] = 'notexists' + + with self.assertRaises(ValueError): + await container_to_args(c, cnt) + + async def test_env_file_str_array_one_path(self): + c = create_compose_mock() + + cnt = get_minimal_container() + env_file = get_test_file_path('tests/integration/env-file-tests/env-files/project-1.env') + cnt['env_file'] = [env_file] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "-e", + "ZZVAR1=podman-rocks-123", + "-e", + "ZZVAR2=podman-rocks-124", + "-e", + "ZZVAR3=podman-rocks-125", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_env_file_str_array_two_paths(self): + c = create_compose_mock() + + cnt = get_minimal_container() + env_file = get_test_file_path('tests/integration/env-file-tests/env-files/project-1.env') + env_file_2 = get_test_file_path('tests/integration/env-file-tests/env-files/project-2.env') + cnt['env_file'] = [env_file, env_file_2] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "-e", + "ZZVAR1=podman-rocks-123", + "-e", + "ZZVAR2=podman-rocks-124", + "-e", + "ZZVAR3=podman-rocks-125", + "-e", + "ZZVAR1=podman-rocks-223", + "-e", + "ZZVAR2=podman-rocks-224", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_env_file_obj_required(self): + c = create_compose_mock() + + cnt = get_minimal_container() + env_file = get_test_file_path('tests/integration/env-file-tests/env-files/project-1.env') + cnt['env_file'] = {'path': env_file, 'required': True} + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "-e", + "ZZVAR1=podman-rocks-123", + "-e", + "ZZVAR2=podman-rocks-124", + "-e", + "ZZVAR3=podman-rocks-125", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_env_file_obj_required_non_existent_path(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt['env_file'] = {'path': 'not-exists', 'required': True} + + with self.assertRaises(ValueError): + await container_to_args(c, cnt) + + async def test_env_file_obj_optional(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt['env_file'] = {'path': 'not-exists', 'required': False} + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + async def test_gpu_count_all(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["command"] = ["nvidia-smi"] + cnt["deploy"] = {"resources": {"reservations": {"devices": [{}]}}} + + cnt["deploy"]["resources"]["reservations"]["devices"][0] = { + "driver": "nvidia", + "count": "all", + "capabilities": ["gpu"], + } + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--device", + "nvidia.com/gpu=all", + "--security-opt=label=disable", + "busybox", + "nvidia-smi", + ], + ) + + async def test_gpu_count_specific(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["command"] = ["nvidia-smi"] + cnt["deploy"] = { + "resources": { + "reservations": { + "devices": [ + { + "driver": "nvidia", + "count": 2, + "capabilities": ["gpu"], + } + ] + } + } + } + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--device", + "nvidia.com/gpu=0", + "--device", + "nvidia.com/gpu=1", + "--security-opt=label=disable", + "busybox", + "nvidia-smi", + ], + ) + + async def test_gpu_device_ids_all(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["command"] = ["nvidia-smi"] + cnt["deploy"] = { + "resources": { + "reservations": { + "devices": [ + { + "driver": "nvidia", + "device_ids": "all", + "capabilities": ["gpu"], + } + ] + } + } + } + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--device", + "nvidia.com/gpu=all", + "--security-opt=label=disable", + "busybox", + "nvidia-smi", + ], + ) + + async def test_gpu_device_ids_specific(self): + c = create_compose_mock() + + cnt = get_minimal_container() + cnt["command"] = ["nvidia-smi"] + cnt["deploy"] = { + "resources": { + "reservations": { + "devices": [ + { + "driver": "nvidia", + "device_ids": [1, 3], + "capabilities": ["gpu"], + } + ] + } + } + } + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--device", + "nvidia.com/gpu=1", + "--device", + "nvidia.com/gpu=3", + "--security-opt=label=disable", + "busybox", + "nvidia-smi", + ], + ) + + @parameterized.expand([ + (False, "z", ["--mount", "type=bind,source=./foo,destination=/mnt,z"]), + (False, "Z", ["--mount", "type=bind,source=./foo,destination=/mnt,Z"]), + (True, "z", ["-v", "./foo:/mnt:z"]), + (True, "Z", ["-v", "./foo:/mnt:Z"]), + ]) + async def test_selinux_volume(self, prefer_volume, selinux_type, expected_additional_args): + c = create_compose_mock() + c.prefer_volume_over_mount = prefer_volume + + cnt = get_minimal_container() + + # This is supposed to happen during `_parse_compose_file` + # but that is probably getting skipped during testing + cnt["_service"] = cnt["service_name"] + + cnt["volumes"] = [ + { + "type": "bind", + "source": "./foo", + "target": "/mnt", + "bind": { + "selinux": selinux_type, + }, + } + ] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + *expected_additional_args, + "--network=bridge", + "--network-alias=service_name", + "busybox", + ], + ) + + @parameterized.expand([ + ("not_compat", False, "test_project_name", "test_project_name_network1"), + ("compat_no_dash", True, "test_project_name", "test_project_name_network1"), + ("compat_dash", True, "test_project-name", "test_projectname_network1"), + ]) + async def test_network_default_name(self, name, is_compat, project_name, expected_network_name): + c = create_compose_mock(project_name) + c.x_podman = {"default_net_name_compat": is_compat} + c.networks = {'network1': {}} + + cnt = get_minimal_container() + cnt['networks'] = ['network1'] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + f"--network={expected_network_name}", + "--network-alias=service_name", + "busybox", + ], + ) diff --git a/tests/unit/test_container_to_args_secrets.py b/tests/unit/test_container_to_args_secrets.py new file mode 100644 index 0000000..86540a7 --- /dev/null +++ b/tests/unit/test_container_to_args_secrets.py @@ -0,0 +1,408 @@ +# SPDX-License-Identifier: GPL-2.0 + +import os +import unittest + +from parameterized import parameterized + +from podman_compose import container_to_args +from tests.unit.test_container_to_args import create_compose_mock +from tests.unit.test_container_to_args import get_minimal_container + + +def repo_root(): + return os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + + +class TestContainerToArgsSecrets(unittest.IsolatedAsyncioTestCase): + async def test_pass_secret_as_env_variable(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret": {"external": "true"} # must have external or name value + } + + cnt = get_minimal_container() + cnt["secrets"] = [ + { + "source": "my_secret", + "target": "ENV_SECRET", + "type": "env", + }, + ] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--secret", + "my_secret,type=env,target=ENV_SECRET", + "busybox", + ], + ) + + async def test_secret_as_env_external_true_has_no_name(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret": { + "name": "my_secret", # must have external or name value + } + } + + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + { + "source": "my_secret", + "target": "ENV_SECRET", + "type": "env", + } + ] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--secret", + "my_secret,type=env,target=ENV_SECRET", + "busybox", + ], + ) + + async def test_pass_secret_as_env_variable_no_external(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret": {} # must have external or name value + } + + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + { + "source": "my_secret", + "target": "ENV_SECRET", + "type": "env", + } + ] + + with self.assertRaises(ValueError) as context: + await container_to_args(c, cnt) + self.assertIn('ERROR: unparsable secret: ', str(context.exception)) + + @parameterized.expand([ + ( + "secret_no_name", + {"my_secret": "my_secret_name", "external": "true"}, + {}, # must have a name + ), + ( + "no_secret_name_in_declared_secrets", + {}, # must have a name + { + "source": "my_secret_name", + }, + ), + ( + "secret_name_does_not_match_declared_secrets_name", + { + "wrong_name": "my_secret_name", + }, + { + "source": "name", # secret name must match the one in declared_secrets + }, + ), + ( + "secret_name_empty_string", + {"": "my_secret_name"}, + { + "source": "", # can not be empty string + }, + ), + ]) + async def test_secret_name(self, test_name, declared_secrets, add_to_minimal_container): + c = create_compose_mock() + c.declared_secrets = declared_secrets + + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [add_to_minimal_container] + + with self.assertRaises(ValueError) as context: + await container_to_args(c, cnt) + self.assertIn('ERROR: undeclared secret: ', str(context.exception)) + + async def test_secret_string_no_external_name_in_declared_secrets(self): + c = create_compose_mock() + c.declared_secrets = {"my_secret_name": {"external": "true"}} + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + "my_secret_name", + ] + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--secret", + "my_secret_name", + "busybox", + ], + ) + + async def test_secret_string_options_external_name_in_declared_secrets(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret_name": { + "external": "true", + "name": "my_secret_name", + } + } + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + { + "source": "my_secret_name", + "target": "my_secret_name", + "uid": "103", + "gid": "103", + "mode": "400", + } + ] + + with self.assertLogs() as cm: + args = await container_to_args(c, cnt) + self.assertEqual(len(cm.output), 1) + self.assertIn('That is un-supported and a no-op and is ignored.', cm.output[0]) + self.assertIn('my_secret_name', cm.output[0]) + + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--secret", + "my_secret_name,uid=103,gid=103,mode=400", + "busybox", + ], + ) + + async def test_secret_string_external_name_in_declared_secrets_does_not_match_secret(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret_name": { + "external": "true", + "name": "wrong_secret_name", + } + } + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + "my_secret_name", + ] + + with self.assertRaises(ValueError) as context: + await container_to_args(c, cnt) + self.assertIn('ERROR: Custom name/target reference ', str(context.exception)) + + async def test_secret_target_does_not_match_secret_name_secret_type_not_env(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret_name": { + "external": "true", + } + } + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + { + "source": "my_secret_name", + "target": "does_not_equal_secret_name", + "type": "does_not_equal_env", + } + ] + + with self.assertRaises(ValueError) as context: + await container_to_args(c, cnt) + self.assertIn('ERROR: Custom name/target reference ', str(context.exception)) + + async def test_secret_target_does_not_match_secret_name_secret_type_env(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret_name": { + "external": "true", + } + } + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + {"source": "my_secret_name", "target": "does_not_equal_secret_name", "type": "env"} + ] + + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--secret", + "my_secret_name,type=env,target=does_not_equal_secret_name", + "busybox", + ], + ) + + async def test_secret_target_matches_secret_name_secret_type_not_env(self): + c = create_compose_mock() + c.declared_secrets = { + "my_secret_name": { + "external": "true", + } + } + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + {"source": "my_secret_name", "target": "my_secret_name", "type": "does_not_equal_env"} + ] + + with self.assertLogs() as cm: + args = await container_to_args(c, cnt) + self.assertEqual(len(cm.output), 1) + self.assertIn('That is un-supported and a no-op and is ignored.', cm.output[0]) + self.assertIn('my_secret_name', cm.output[0]) + + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--secret", + "my_secret_name,type=does_not_equal_env", + "busybox", + ], + ) + + @parameterized.expand([ + ( + "no_secret_target", + { + "file_secret": { + "file": "./my_secret", + } + }, + "file_secret", + repo_root() + "/test_dirname/my_secret:/run/secrets/file_secret:ro,rprivate,rbind", + ), + ( + "custom_target_name", + { + "file_secret": { + "file": "./my_secret", + } + }, + { + "source": "file_secret", + "target": "custom_name", + }, + repo_root() + "/test_dirname/my_secret:/run/secrets/custom_name:ro,rprivate,rbind", + ), + ( + "no_custom_target_name", + { + "file_secret": { + "file": "./my_secret", + } + }, + { + "source": "file_secret", + }, + repo_root() + "/test_dirname/my_secret:/run/secrets/file_secret:ro,rprivate,rbind", + ), + ( + "custom_location", + { + "file_secret": { + "file": "./my_secret", + } + }, + { + "source": "file_secret", + "target": "/etc/custom_location", + }, + repo_root() + "/test_dirname/my_secret:/etc/custom_location:ro,rprivate,rbind", + ), + ]) + async def test_file_secret( + self, test_name, declared_secrets, add_to_minimal_container, expected_volume_ref + ): + c = create_compose_mock() + c.declared_secrets = declared_secrets + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [add_to_minimal_container] + args = await container_to_args(c, cnt) + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--volume", + expected_volume_ref, + "busybox", + ], + ) + + async def test_file_secret_unused_params_warning(self): + c = create_compose_mock() + c.declared_secrets = { + "file_secret": { + "file": "./my_secret", + } + } + cnt = get_minimal_container() + cnt["_service"] = "test-service" + cnt["secrets"] = [ + { + "source": "file_secret", + "target": "unused_params_warning", + "uid": "103", + "gid": "103", + "mode": "400", + } + ] + with self.assertLogs() as cm: + args = await container_to_args(c, cnt) + self.assertEqual(len(cm.output), 1) + self.assertIn('with uid, gid, or mode.', cm.output[0]) + self.assertIn('unused_params_warning', cm.output[0]) + + self.assertEqual( + args, + [ + "--name=project_name_service_name1", + "-d", + "--network=bridge", + "--network-alias=service_name", + "--volume", + repo_root() + + "/test_dirname/my_secret:/run/secrets/unused_params_warning:ro,rprivate,rbind", + "busybox", + ], + ) diff --git a/tests/unit/test_get_net_args.py b/tests/unit/test_get_net_args.py new file mode 100644 index 0000000..586067f --- /dev/null +++ b/tests/unit/test_get_net_args.py @@ -0,0 +1,297 @@ +import unittest + +from parameterized import parameterized + +from podman_compose import get_net_args +from tests.unit.test_container_to_args import create_compose_mock + +PROJECT_NAME = "test_project_name" +SERVICE_NAME = "service_name" +CONTAINER_NAME = f"{PROJECT_NAME}_{SERVICE_NAME}_1" + + +def get_networked_compose(num_networks=1): + compose = create_compose_mock(PROJECT_NAME) + for network in range(num_networks): + compose.networks[f"net{network}"] = { + "driver": "bridge", + "ipam": { + "config": [ + {"subnet": f"192.168.{network}.0/24"}, + {"subnet": f"fd00:{network}::/64"}, + ] + }, + "enable_ipv6": True, + } + + return compose + + +def get_minimal_container(): + return { + "name": CONTAINER_NAME, + "service_name": SERVICE_NAME, + "image": "busybox", + } + + +class TestGetNetArgs(unittest.TestCase): + def test_minimal(self): + compose = get_networked_compose() + container = get_minimal_container() + + expected_args = [ + "--network=bridge", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_one_net(self): + compose = get_networked_compose() + container = get_minimal_container() + container["networks"] = {"net0": {}} + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_alias(self): + compose = get_networked_compose() + container = get_minimal_container() + container["networks"] = {"net0": {}} + container["_aliases"] = ["alias1", "alias2"] + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--network-alias={SERVICE_NAME}", + "--network-alias=alias1", + "--network-alias=alias2", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_one_ipv4(self): + ip = "192.168.0.42" + compose = get_networked_compose() + container = get_minimal_container() + container["networks"] = {"net0": {"ipv4_address": ip}} + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--ip={ip}", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertEqual(expected_args, args) + + def test_one_ipv6(self): + ipv6_address = "fd00:0::42" + compose = get_networked_compose() + container = get_minimal_container() + container["networks"] = {"net0": {"ipv6_address": ipv6_address}} + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--ip6={ipv6_address}", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_one_mac(self): + mac = "00:11:22:33:44:55" + compose = get_networked_compose() + container = get_minimal_container() + container["networks"] = {"net0": {}} + container["mac_address"] = mac + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--mac-address={mac}", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_one_mac_two_nets(self): + mac = "00:11:22:33:44:55" + compose = get_networked_compose(num_networks=6) + container = get_minimal_container() + container["networks"] = {"net0": {}, "net1": {}} + container["mac_address"] = mac + + expected_args = [ + f"--network={PROJECT_NAME}_net0:mac={mac}", + f"--network={PROJECT_NAME}_net1", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_two_nets_as_dict(self): + compose = get_networked_compose(num_networks=2) + container = get_minimal_container() + container["networks"] = {"net0": {}, "net1": {}} + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--network={PROJECT_NAME}_net1", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_two_nets_as_list(self): + compose = get_networked_compose(num_networks=2) + container = get_minimal_container() + container["networks"] = ["net0", "net1"] + + expected_args = [ + f"--network={PROJECT_NAME}_net0", + f"--network={PROJECT_NAME}_net1", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_two_ipv4(self): + ip0 = "192.168.0.42" + ip1 = "192.168.1.42" + compose = get_networked_compose(num_networks=2) + container = get_minimal_container() + container["networks"] = {"net0": {"ipv4_address": ip0}, "net1": {"ipv4_address": ip1}} + + expected_args = [ + f"--network={PROJECT_NAME}_net0:ip={ip0}", + f"--network={PROJECT_NAME}_net1:ip={ip1}", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_two_ipv6(self): + ip0 = "fd00:0::42" + ip1 = "fd00:1::42" + compose = get_networked_compose(num_networks=2) + container = get_minimal_container() + container["networks"] = {"net0": {"ipv6_address": ip0}, "net1": {"ipv6_address": ip1}} + + expected_args = [ + f"--network={PROJECT_NAME}_net0:ip={ip0}", + f"--network={PROJECT_NAME}_net1:ip={ip1}", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + # custom extension; not supported by docker-compose + def test_two_mac(self): + mac0 = "00:00:00:00:00:01" + mac1 = "00:00:00:00:00:02" + compose = get_networked_compose(num_networks=2) + container = get_minimal_container() + container["networks"] = { + "net0": {"x-podman.mac_address": mac0}, + "net1": {"x-podman.mac_address": mac1}, + } + + expected_args = [ + f"--network={PROJECT_NAME}_net0:mac={mac0}", + f"--network={PROJECT_NAME}_net1:mac={mac1}", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_mixed_mac(self): + ip4_0 = "192.168.0.42" + ip4_1 = "192.168.1.42" + ip4_2 = "192.168.2.42" + mac_0 = "00:00:00:00:00:01" + mac_1 = "00:00:00:00:00:02" + + compose = get_networked_compose(num_networks=3) + container = get_minimal_container() + container["networks"] = { + "net0": {"ipv4_address": ip4_0}, + "net1": {"ipv4_address": ip4_1, "x-podman.mac_address": mac_0}, + "net2": {"ipv4_address": ip4_2}, + } + container["mac_address"] = mac_1 + + expected_exception = ( + r"specifying mac_address on both container and network level " r"is not supported" + ) + self.assertRaisesRegex(RuntimeError, expected_exception, get_net_args, compose, container) + + def test_mixed_config(self): + ip4_0 = "192.168.0.42" + ip4_1 = "192.168.1.42" + ip6_0 = "fd00:0::42" + ip6_2 = "fd00:2::42" + mac = "00:11:22:33:44:55" + compose = get_networked_compose(num_networks=4) + container = get_minimal_container() + container["networks"] = { + "net0": {"ipv4_address": ip4_0, "ipv6_address": ip6_0}, + "net1": {"ipv4_address": ip4_1}, + "net2": {"ipv6_address": ip6_2}, + "net3": {}, + } + container["mac_address"] = mac + + expected_args = [ + f"--network={PROJECT_NAME}_net0:ip={ip4_0},ip={ip6_0},mac={mac}", + f"--network={PROJECT_NAME}_net1:ip={ip4_1}", + f"--network={PROJECT_NAME}_net2:ip={ip6_2}", + f"--network={PROJECT_NAME}_net3", + f"--network-alias={SERVICE_NAME}", + ] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + @parameterized.expand([ + ("bridge", ["--network=bridge", f"--network-alias={SERVICE_NAME}"]), + ("host", ["--network=host"]), + ("none", ["--network=none"]), + ("slirp4netns", ["--network=slirp4netns"]), + ("slirp4netns:cidr=10.42.0.0/24", ["--network=slirp4netns:cidr=10.42.0.0/24"]), + ("private", ["--network=private"]), + ("pasta", ["--network=pasta"]), + ("pasta:--ipv4-only,-a,10.0.2.0", ["--network=pasta:--ipv4-only,-a,10.0.2.0"]), + ("ns:my_namespace", ["--network=ns:my_namespace"]), + ("container:my_container", ["--network=container:my_container"]), + ]) + def test_network_modes(self, network_mode, expected_args): + compose = get_networked_compose() + container = get_minimal_container() + container["network_mode"] = network_mode + + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) + + def test_network_mode_invalid(self): + compose = get_networked_compose() + container = get_minimal_container() + container["network_mode"] = "invalid_mode" + + with self.assertRaises(SystemExit): + get_net_args(compose, container) + + def test_network__mode_service(self): + compose = get_networked_compose() + compose.container_names_by_service = { + "service_1": ["container_1"], + "service_2": ["container_2"], + } + + container = get_minimal_container() + container["network_mode"] = "service:service_2" + + expected_args = ["--network=container:container_2"] + args = get_net_args(compose, container) + self.assertListEqual(expected_args, args) diff --git a/tests/unit/test_get_network_create_args.py b/tests/unit/test_get_network_create_args.py new file mode 100644 index 0000000..55d3021 --- /dev/null +++ b/tests/unit/test_get_network_create_args.py @@ -0,0 +1,203 @@ +import unittest + +from podman_compose import get_network_create_args + + +class TestGetNetworkCreateArgs(unittest.TestCase): + def test_minimal(self): + net_desc = { + "labels": [], + "internal": False, + "driver": None, + "driver_opts": {}, + "ipam": {"config": []}, + "enable_ipv6": False, + } + proj_name = "test_project" + net_name = "test_network" + expected_args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + net_name, + ] + args = get_network_create_args(net_desc, proj_name, net_name) + self.assertEqual(args, expected_args) + + def test_ipv6(self): + net_desc = { + "labels": [], + "internal": False, + "driver": None, + "driver_opts": {}, + "ipam": {"config": []}, + "enable_ipv6": True, + } + proj_name = "test_project" + net_name = "test_network" + expected_args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + "--ipv6", + net_name, + ] + args = get_network_create_args(net_desc, proj_name, net_name) + self.assertEqual(args, expected_args) + + def test_bridge(self): + net_desc = { + "labels": [], + "internal": False, + "driver": "bridge", + "driver_opts": {"opt1": "value1", "opt2": "value2"}, + "ipam": {"config": []}, + "enable_ipv6": False, + } + proj_name = "test_project" + net_name = "test_network" + expected_args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + "--driver", + "bridge", + "--opt", + "opt1=value1", + "--opt", + "opt2=value2", + net_name, + ] + args = get_network_create_args(net_desc, proj_name, net_name) + self.assertEqual(args, expected_args) + + def test_ipam_driver_default(self): + net_desc = { + "labels": [], + "internal": False, + "driver": None, + "driver_opts": {}, + "ipam": { + "driver": "default", + "config": [ + { + "subnet": "192.168.0.0/24", + "ip_range": "192.168.0.2/24", + "gateway": "192.168.0.1", + } + ], + }, + } + proj_name = "test_project" + net_name = "test_network" + expected_args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + "--subnet", + "192.168.0.0/24", + "--ip-range", + "192.168.0.2/24", + "--gateway", + "192.168.0.1", + net_name, + ] + args = get_network_create_args(net_desc, proj_name, net_name) + self.assertEqual(args, expected_args) + + def test_ipam_driver(self): + net_desc = { + "labels": [], + "internal": False, + "driver": None, + "driver_opts": {}, + "ipam": { + "driver": "someipamdriver", + "config": [ + { + "subnet": "192.168.0.0/24", + "ip_range": "192.168.0.2/24", + "gateway": "192.168.0.1", + } + ], + }, + } + proj_name = "test_project" + net_name = "test_network" + expected_args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + "--ipam-driver", + "someipamdriver", + "--subnet", + "192.168.0.0/24", + "--ip-range", + "192.168.0.2/24", + "--gateway", + "192.168.0.1", + net_name, + ] + args = get_network_create_args(net_desc, proj_name, net_name) + self.assertEqual(args, expected_args) + + def test_complete(self): + net_desc = { + "labels": ["label1", "label2"], + "internal": True, + "driver": "bridge", + "driver_opts": {"opt1": "value1", "opt2": "value2"}, + "ipam": { + "driver": "someipamdriver", + "config": [ + { + "subnet": "192.168.0.0/24", + "ip_range": "192.168.0.2/24", + "gateway": "192.168.0.1", + } + ], + }, + "enable_ipv6": True, + } + proj_name = "test_project" + net_name = "test_network" + expected_args = [ + "create", + "--label", + f"io.podman.compose.project={proj_name}", + "--label", + f"com.docker.compose.project={proj_name}", + "--label", + "label1", + "--label", + "label2", + "--internal", + "--driver", + "bridge", + "--opt", + "opt1=value1", + "--opt", + "opt2=value2", + "--ipam-driver", + "someipamdriver", + "--ipv6", + "--subnet", + "192.168.0.0/24", + "--ip-range", + "192.168.0.2/24", + "--gateway", + "192.168.0.1", + net_name, + ] + args = get_network_create_args(net_desc, proj_name, net_name) + self.assertEqual(args, expected_args) diff --git a/tests/unit/test_normalize_depends_on.py b/tests/unit/test_normalize_depends_on.py new file mode 100644 index 0000000..de773e6 --- /dev/null +++ b/tests/unit/test_normalize_depends_on.py @@ -0,0 +1,43 @@ +import copy + +from podman_compose import normalize_service + +test_cases_simple = [ + ( + {"depends_on": "my_service"}, + {"depends_on": {"my_service": {"condition": "service_started"}}}, + ), + ( + {"depends_on": ["my_service"]}, + {"depends_on": {"my_service": {"condition": "service_started"}}}, + ), + ( + {"depends_on": ["my_service1", "my_service2"]}, + { + "depends_on": { + "my_service1": {"condition": "service_started"}, + "my_service2": {"condition": "service_started"}, + }, + }, + ), + ( + {"depends_on": {"my_service": {"condition": "service_started"}}}, + {"depends_on": {"my_service": {"condition": "service_started"}}}, + ), + ( + {"depends_on": {"my_service": {"condition": "service_healthy"}}}, + {"depends_on": {"my_service": {"condition": "service_healthy"}}}, + ), +] + + +def test_normalize_service_simple(): + for test_case, expected in copy.deepcopy(test_cases_simple): + test_original = copy.deepcopy(test_case) + test_case = normalize_service(test_case) + test_result = expected == test_case + if not test_result: + print("test: ", test_original) + print("expected: ", expected) + print("actual: ", test_case) + assert test_result diff --git a/tests/unit/test_normalize_final_build.py b/tests/unit/test_normalize_final_build.py new file mode 100644 index 0000000..34ba0ea --- /dev/null +++ b/tests/unit/test_normalize_final_build.py @@ -0,0 +1,266 @@ +# SPDX-License-Identifier: GPL-2.0 +# pylint: disable=protected-access +from __future__ import annotations + +import argparse +import os +import unittest + +import yaml +from parameterized import parameterized + +from podman_compose import PodmanCompose +from podman_compose import normalize_final +from podman_compose import normalize_service_final + +cwd = os.path.abspath(".") + + +class TestNormalizeFinalBuild(unittest.TestCase): + cases_simple_normalization = [ + ({"image": "test-image"}, {"image": "test-image"}), + ( + {"build": "."}, + { + "build": {"context": cwd}, + }, + ), + ( + {"build": "../relative"}, + { + "build": { + "context": os.path.normpath(os.path.join(cwd, "../relative")), + }, + }, + ), + ( + {"build": "./relative"}, + { + "build": { + "context": os.path.normpath(os.path.join(cwd, "./relative")), + }, + }, + ), + ( + {"build": "/workspace/absolute"}, + { + "build": { + "context": "/workspace/absolute", + }, + }, + ), + ( + { + "build": { + "dockerfile": "Dockerfile", + }, + }, + { + "build": { + "context": cwd, + "dockerfile": "Dockerfile", + }, + }, + ), + ( + { + "build": { + "context": ".", + }, + }, + { + "build": { + "context": cwd, + }, + }, + ), + ( + { + "build": {"context": "../", "dockerfile": "test-dockerfile"}, + }, + { + "build": { + "context": os.path.normpath(os.path.join(cwd, "../")), + "dockerfile": "test-dockerfile", + }, + }, + ), + ( + { + "build": {"context": ".", "dockerfile": "./dev/test-dockerfile"}, + }, + { + "build": { + "context": cwd, + "dockerfile": "./dev/test-dockerfile", + }, + }, + ), + ] + + @parameterized.expand(cases_simple_normalization) + def test_normalize_service_final_returns_absolute_path_in_context(self, input, expected): + # Tests that [service.build] is normalized after merges + project_dir = cwd + self.assertEqual(normalize_service_final(input, project_dir), expected) + + @parameterized.expand(cases_simple_normalization) + def test_normalize_returns_absolute_path_in_context(self, input, expected): + project_dir = cwd + compose_test = {"services": {"test-service": input}} + compose_expected = {"services": {"test-service": expected}} + self.assertEqual(normalize_final(compose_test, project_dir), compose_expected) + + @parameterized.expand(cases_simple_normalization) + def test_parse_compose_file_when_single_compose(self, input, expected): + compose_test = {"services": {"test-service": input}} + dump_yaml(compose_test, "test-compose.yaml") + + podman_compose = PodmanCompose() + set_args(podman_compose, ["test-compose.yaml"], no_normalize=None) + + podman_compose._parse_compose_file() + + actual_compose = {} + if podman_compose.services: + podman_compose.services["test-service"].pop("_deps") + actual_compose = podman_compose.services["test-service"] + self.assertEqual(actual_compose, expected) + + @parameterized.expand([ + ( + {}, + {"build": "."}, + {"build": {"context": cwd}}, + ), + ( + {"build": "."}, + {}, + {"build": {"context": cwd}}, + ), + ( + {"build": "/workspace/absolute"}, + {"build": "./relative"}, + { + "build": { + "context": os.path.normpath(os.path.join(cwd, "./relative")), + } + }, + ), + ( + {"build": "./relative"}, + {"build": "/workspace/absolute"}, + {"build": {"context": "/workspace/absolute"}}, + ), + ( + {"build": "./relative"}, + {"build": "/workspace/absolute"}, + {"build": {"context": "/workspace/absolute"}}, + ), + ( + {"build": {"dockerfile": "test-dockerfile"}}, + {}, + {"build": {"context": cwd, "dockerfile": "test-dockerfile"}}, + ), + ( + {}, + {"build": {"dockerfile": "test-dockerfile"}}, + {"build": {"context": cwd, "dockerfile": "test-dockerfile"}}, + ), + ( + {}, + {"build": {"dockerfile": "test-dockerfile"}}, + {"build": {"context": cwd, "dockerfile": "test-dockerfile"}}, + ), + ( + {"build": {"dockerfile": "test-dockerfile-1"}}, + {"build": {"dockerfile": "test-dockerfile-2"}}, + {"build": {"context": cwd, "dockerfile": "test-dockerfile-2"}}, + ), + ( + {"build": "/workspace/absolute"}, + {"build": {"dockerfile": "test-dockerfile"}}, + {"build": {"context": "/workspace/absolute", "dockerfile": "test-dockerfile"}}, + ), + ( + {"build": {"dockerfile": "test-dockerfile"}}, + {"build": "/workspace/absolute"}, + {"build": {"context": "/workspace/absolute", "dockerfile": "test-dockerfile"}}, + ), + ( + {"build": {"dockerfile": "./test-dockerfile-1"}}, + {"build": {"dockerfile": "./test-dockerfile-2", "args": ["ENV1=1"]}}, + { + "build": { + "context": cwd, + "dockerfile": "./test-dockerfile-2", + "args": ["ENV1=1"], + } + }, + ), + ( + {"build": {"dockerfile": "./test-dockerfile-1", "args": ["ENV1=1"]}}, + {"build": {"dockerfile": "./test-dockerfile-2"}}, + { + "build": { + "context": cwd, + "dockerfile": "./test-dockerfile-2", + "args": ["ENV1=1"], + } + }, + ), + ( + {"build": {"dockerfile": "./test-dockerfile-1", "args": ["ENV1=1"]}}, + {"build": {"dockerfile": "./test-dockerfile-2", "args": ["ENV2=2"]}}, + { + "build": { + "context": cwd, + "dockerfile": "./test-dockerfile-2", + "args": ["ENV1=1", "ENV2=2"], + } + }, + ), + ]) + def test_parse_when_multiple_composes(self, input, override, expected): + compose_test_1 = {"services": {"test-service": input}} + compose_test_2 = {"services": {"test-service": override}} + dump_yaml(compose_test_1, "test-compose-1.yaml") + dump_yaml(compose_test_2, "test-compose-2.yaml") + + podman_compose = PodmanCompose() + set_args( + podman_compose, + ["test-compose-1.yaml", "test-compose-2.yaml"], + no_normalize=None, + ) + + podman_compose._parse_compose_file() + + actual_compose = {} + if podman_compose.services: + podman_compose.services["test-service"].pop("_deps") + actual_compose = podman_compose.services["test-service"] + self.assertEqual(actual_compose, expected) + + +def set_args(podman_compose: PodmanCompose, file_names: list[str], no_normalize: bool) -> None: + podman_compose.global_args = argparse.Namespace() + podman_compose.global_args.file = file_names + podman_compose.global_args.project_name = None + podman_compose.global_args.env_file = None + podman_compose.global_args.profile = [] + podman_compose.global_args.in_pod_bool = True + podman_compose.global_args.no_normalize = no_normalize + + +def dump_yaml(compose: dict, name: str) -> None: + # Path(Path.cwd()/"subdirectory").mkdir(parents=True, exist_ok=True) + with open(name, "w", encoding="utf-8") as outfile: + yaml.safe_dump(compose, outfile, default_flow_style=False) + + +def test_clean_test_yamls() -> None: + test_files = ["test-compose-1.yaml", "test-compose-2.yaml", "test-compose.yaml"] + for file in test_files: + if os.path.exists(file): + os.remove(file) diff --git a/tests/unit/test_normalize_service.py b/tests/unit/test_normalize_service.py new file mode 100644 index 0000000..925af4e --- /dev/null +++ b/tests/unit/test_normalize_service.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: GPL-2.0 +import unittest + +from parameterized import parameterized + +from podman_compose import normalize_service + + +class TestNormalizeService(unittest.TestCase): + @parameterized.expand([ + ({"test": "test"}, {"test": "test"}), + ({"build": "."}, {"build": {"context": "."}}), + ({"build": "./dir-1"}, {"build": {"context": "./dir-1"}}), + ({"build": {"context": "./dir-1"}}, {"build": {"context": "./dir-1"}}), + ( + {"build": {"dockerfile": "dockerfile-1"}}, + {"build": {"dockerfile": "dockerfile-1"}}, + ), + ( + {"build": {"context": "./dir-1", "dockerfile": "dockerfile-1"}}, + {"build": {"context": "./dir-1", "dockerfile": "dockerfile-1"}}, + ), + ( + {"build": {"additional_contexts": ["ctx=../ctx", "ctx2=../ctx2"]}}, + {"build": {"additional_contexts": ["ctx=../ctx", "ctx2=../ctx2"]}}, + ), + ( + {"build": {"additional_contexts": {"ctx": "../ctx", "ctx2": "../ctx2"}}}, + {"build": {"additional_contexts": ["ctx=../ctx", "ctx2=../ctx2"]}}, + ), + ]) + def test_simple(self, input, expected): + self.assertEqual(normalize_service(input), expected) + + @parameterized.expand([ + ({"test": "test"}, {"test": "test"}), + ({"build": "."}, {"build": {"context": "./sub_dir/."}}), + ({"build": "./dir-1"}, {"build": {"context": "./sub_dir/dir-1"}}), + ({"build": {"context": "./dir-1"}}, {"build": {"context": "./sub_dir/dir-1"}}), + ( + {"build": {"dockerfile": "dockerfile-1"}}, + {"build": {"context": "./sub_dir", "dockerfile": "dockerfile-1"}}, + ), + ( + {"build": {"context": "./dir-1", "dockerfile": "dockerfile-1"}}, + {"build": {"context": "./sub_dir/dir-1", "dockerfile": "dockerfile-1"}}, + ), + ]) + def test_normalize_service_with_sub_dir(self, input, expected): + self.assertEqual(normalize_service(input, sub_dir="./sub_dir"), expected) + + @parameterized.expand([ + ([], []), + (["sh"], ["sh"]), + (["sh", "-c", "date"], ["sh", "-c", "date"]), + ("sh", ["sh"]), + ("sleep infinity", ["sleep", "infinity"]), + ( + "bash -c 'sleep infinity'", + ["bash", "-c", "sleep infinity"], + ), + ]) + def test_command_like(self, input, expected): + for key in ['command', 'entrypoint']: + input_service = {} + input_service[key] = input + + expected_service = {} + expected_service[key] = expected + self.assertEqual(normalize_service(input_service), expected_service) diff --git a/tests/unit/test_volumes.py b/tests/unit/test_volumes.py new file mode 100644 index 0000000..4c6a366 --- /dev/null +++ b/tests/unit/test_volumes.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# pylint: disable=redefined-outer-name +import unittest + +from podman_compose import parse_short_mount + + +class ParseShortMountTests(unittest.TestCase): + def test_multi_propagation(self): + self.assertEqual( + parse_short_mount("/foo/bar:/baz:U,Z", "/"), + { + "type": "bind", + "source": "/foo/bar", + "target": "/baz", + "bind": { + "propagation": "U,Z", + }, + }, + )