diff --git a/.dockerignore b/.dockerignore index ababae31d3..198b23ecbb 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,6 +10,7 @@ dist .tox .coverage html/* +__pycache__ # Compiled Documentation -site/ +docs/_build diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..65d0c51972 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,14 @@ +root = true + +[*] +indent_style = space +indent_size = 4 +insert_final_newline = true +trim_trailing_whitespace = true +max_line_length = 80 + +[*.md] +trim_trailing_whitespace = false + +[*.{yaml,yml}] +indent_size = 2 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000..9b43a27bc8 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,72 @@ +name: Python package + +on: [push, pull_request] + +env: + DOCKER_BUILDKIT: '1' + FORCE_COLOR: 1 + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.x' + - run: pip install -U ruff==0.1.8 + - name: Run ruff + run: ruff docker tests + + build: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.x' + - run: pip3 install build && python -m build . + - uses: actions/upload-artifact@v4 + with: + name: dist + path: dist + + unit-tests: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + allow-prereleases: true + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip + pip3 install '.[ssh,dev]' + - name: Run unit tests + run: | + docker logout + rm -rf ~/.docker + py.test -v --cov=docker tests/unit + + integration-tests: + runs-on: ubuntu-latest + strategy: + matrix: + variant: [ "integration-dind", "integration-dind-ssl", "integration-dind-ssh" ] + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + fetch-tags: true + - name: make ${{ matrix.variant }} + run: | + docker logout + rm -rf ~/.docker + make ${{ matrix.variant }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..17be00163d --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,53 @@ +name: Release + +on: + workflow_dispatch: + inputs: + tag: + description: "Release Tag WITHOUT `v` Prefix (e.g. 6.0.0)" + required: true + dry-run: + description: 'Dry run' + required: false + type: boolean + default: true + +env: + DOCKER_BUILDKIT: '1' + FORCE_COLOR: 1 + +jobs: + publish: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.x' + + - name: Generate Package + run: | + pip3 install build + python -m build . + env: + # This is also supported by Hatch; see + # https://github.com/ofek/hatch-vcs#version-source-environment-variables + SETUPTOOLS_SCM_PRETEND_VERSION: ${{ inputs.tag }} + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + if: '! inputs.dry-run' + with: + password: ${{ secrets.PYPI_API_TOKEN }} + + - name: Create GitHub release + uses: ncipollo/release-action@v1 + if: '! inputs.dry-run' + with: + artifacts: "dist/*" + generateReleaseNotes: true + draft: true + commit: ${{ github.sha }} + token: ${{ secrets.GITHUB_TOKEN }} + tag: ${{ inputs.tag }} diff --git a/.gitignore b/.gitignore index 9980377f9c..c88ccc1b3a 100644 --- a/.gitignore +++ b/.gitignore @@ -10,8 +10,13 @@ dist html/* # Compiled Documentation -site/ +_build/ +README.rst + +# setuptools_scm +_version.py env/ venv/ .idea/ +*.iml diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..907454ea92 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,17 @@ +version: 2 + +sphinx: + configuration: docs/conf.py + +build: + os: ubuntu-22.04 + tools: + python: '3.12' + +python: + install: + - method: pip + path: . + extra_requirements: + - ssh + - docs diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 12b9d13cdb..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -sudo: false -language: python -python: - - "2.7" -env: - - TOX_ENV=py26 - - TOX_ENV=py27 - - TOX_ENV=py32 - - TOX_ENV=py33 - - TOX_ENV=py34 - - TOX_ENV=flake8 -install: - - pip install tox -script: - - tox -e $TOX_ENV diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..acf22ef7ca --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,135 @@ +# Contributing guidelines + +See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). +The following is specific to Docker SDK for Python. + +Thank you for your interest in the project. We look forward to your +contribution. In order to make the process as fast and streamlined as possible, +here is a set of guidelines we recommend you follow. + +## Reporting issues + +We do our best to ensure bugs don't creep up in our releases, but some may +still slip through. If you encounter one while using the SDK, please +create an issue +[in the tracker](https://github.com/docker/docker-py/issues/new) with +the following information: + +- SDK version, Docker version and python version +``` +pip freeze | grep docker && python --version && docker version +``` +- OS, distribution and OS version +- The issue you're encountering including a stacktrace if applicable +- If possible, steps or a code snippet to reproduce the issue + +To save yourself time, please be sure to check our +[documentation](https://docker-py.readthedocs.io/) and use the +[search function](https://github.com/docker/docker-py/search) to find +out if it has already been addressed, or is currently being looked at. + +## Submitting pull requests + +Do you have a fix for an existing issue, or want to add a new functionality +to the SDK? We happily welcome pull requests. Here are a few tips to +make the review process easier on both the maintainers and yourself. + +### 1. Sign your commits + +Please refer to the ["Sign your work"](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) +paragraph in the Docker contribution guidelines. + +### 2. Make sure tests pass + +Before we can review your pull request, please ensure that nothing has been +broken by your changes by running the test suite. You can do so simply by +running `make test` in the project root. This also includes coding style using +`ruff` + +### 3. Write clear, self-contained commits + +Your commit message should be concise and describe the nature of the change. +The commit itself should make sense in isolation from the others in your PR. +Specifically, one should be able to review your commit separately from the +context. + +### 4. Rebase proactively + +It's much easier to review a pull request that is up to date against the +current master branch. + +### 5. Notify thread subscribers when changes are made + +GitHub doesn't notify subscribers when new commits happen on a PR, and +fixes or additions might be missed. Please add a comment to the PR thread +when you push new changes. + +### 6. Two maintainers LGTM are required for merging + +Please wait for review and approval of two maintainers, and respond to their +comments and suggestions during review. + +### 7. Add tests + +Whether you're adding new functionality to the project or fixing a bug, please +add relevant tests to ensure the code you added continues to work as the +project evolves. + +### 8. Add docs + +This usually applies to new features rather than bug fixes, but new behavior +should always be documented. + +### 9. Ask questions + +If you're ever confused about something pertaining to the project, feel free +to reach out and ask questions. We will do our best to answer and help out. + + +## Development environment + +If you're looking contribute to Docker SDK for Python but are new to the +project or Python, here are the steps to get you started. + +1. Fork https://github.com/docker/docker-py to your username. +2. Clone your forked repository locally with + `git clone git@github.com:yourusername/docker-py.git`. +3. Configure a + [remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) + for your fork so that you can + [sync changes you make](https://help.github.com/articles/syncing-a-fork/) + with the original repository. +4. Enter the local directory `cd docker-py`. +5. Run `python setup.py develop` to install the dev version of the project + and required dependencies. We recommend you do so inside a + [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs) + +## Running the tests & Code Quality + +To get the source source code and run the unit tests, run: +``` +$ git clone git://github.com/docker/docker-py.git +$ cd docker-py +$ make test +``` + +## Building the docs + +``` +$ make docs +$ open _build/index.html +``` + +## Release Checklist + +Before a new release, please go through the following checklist: + +* Bump version in docker/version.py +* Add a release note in docs/change_log.md +* Git tag the version +* Upload to pypi + +## Vulnerability Reporting +For any security issues, please do NOT file an issue or pull request on github! +Please contact [security@docker.com](mailto:security@docker.com) or read [the +Docker security page](https://www.docker.com/resources/security/). diff --git a/Dockerfile b/Dockerfile index 733f96cf38..e77e713738 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,13 @@ -FROM python:2.7 -MAINTAINER Joffrey F -ADD . /home/docker-py -WORKDIR /home/docker-py -RUN pip install -r test-requirements.txt -RUN pip install . +# syntax=docker/dockerfile:1 + +ARG PYTHON_VERSION=3.12 +FROM python:${PYTHON_VERSION} + +WORKDIR /src +COPY . . + +ARG VERSION=0.0.0.dev0 +RUN --mount=type=cache,target=/cache/pip \ + PIP_CACHE_DIR=/cache/pip \ + SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ + pip install .[ssh] diff --git a/Dockerfile-docs b/Dockerfile-docs new file mode 100644 index 0000000000..4671d2c492 --- /dev/null +++ b/Dockerfile-docs @@ -0,0 +1,22 @@ +# syntax=docker/dockerfile:1 + +ARG PYTHON_VERSION=3.12 + +FROM python:${PYTHON_VERSION} + +ARG uid=1000 +ARG gid=1000 + +RUN addgroup --gid $gid sphinx \ + && useradd --uid $uid --gid $gid -M sphinx + +WORKDIR /src +COPY . . + +ARG VERSION=0.0.0.dev0 +RUN --mount=type=cache,target=/cache/pip \ + PIP_CACHE_DIR=/cache/pip \ + SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ + pip install .[ssh,docs] + +USER sphinx diff --git a/LICENSE b/LICENSE index d645695673..75191a4dc7 100644 --- a/LICENSE +++ b/LICENSE @@ -176,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MAINTAINERS b/MAINTAINERS index 14f61963fc..96ba4752e8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,3 +1,85 @@ -Joffrey F (@shin-) -Maxime Petazzoni (@mpetazzoni) -Aanand Prasad (@aanand) +# Docker SDK for Python maintainers file +# +# This file describes who runs the docker/docker-py project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "glours", + "milas", + ] + [Org.Alumni] + people = [ + "aiordache", + "aanand", + "bfirsh", + "dnephin", + "mnowster", + "mpetazzoni", + "shin-", + "ulyssessouza", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aanand] + Name = "Aanand Prasad" + Email = "aanand@docker.com" + GitHub = "aanand" + + [people.aiordache] + Name = "Anca Iordache" + Email = "anca.iordache@docker.com" + GitHub = "aiordache" + + [people.bfirsh] + Name = "Ben Firshman" + Email = "b@fir.sh" + GitHub = "bfirsh" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.glours] + Name = "Guillaume Lours" + Email = "705411+glours@users.noreply.github.com" + GitHub = "glours" + + [people.milas] + Name = "Milas Bowman" + Email = "devnull@milas.dev" + GitHub = "milas" + + [people.mnowster] + Name = "Mazz Mosley" + Email = "mazz@houseofmnowster.com" + GitHub = "mnowster" + + [people.mpetazzoni] + Name = "Maxime Petazzoni" + Email = "maxime.petazzoni@bulix.org" + GitHub = "mpetazzoni" + + [people.shin-] + Name = "Joffrey F" + Email = "joffrey@docker.com" + GitHub = "shin-" + + [people.ulyssessouza] + Name = "Ulysses Domiciano Souza" + Email = "ulysses.souza@docker.com" + GitHub = "ulyssessouza" diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index f60f3f1173..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include test-requirements.txt -include requirements.txt -include requirements3.txt -include README.md -include LICENSE -recursive-include tests *.py diff --git a/Makefile b/Makefile index cf291ea96e..79aa15e2e9 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,189 @@ -.PHONY: all build test integration-test unit-test +TEST_API_VERSION ?= 1.45 +TEST_ENGINE_VERSION ?= 26.1 -HOST_TMPDIR=test -n "$(TMPDIR)" && echo $(TMPDIR) || echo /tmp +ifeq ($(OS),Windows_NT) + PLATFORM := Windows +else + PLATFORM := $(shell sh -c 'uname -s 2>/dev/null || echo Unknown') +endif +ifeq ($(PLATFORM),Linux) + uid_args := "--build-arg uid=$(shell id -u) --build-arg gid=$(shell id -g)" +endif + +SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER ?= $(shell git describe --match '[0-9]*' --dirty='.m' --always --tags 2>/dev/null | sed -r 's/-([0-9]+)/.dev\1/' | sed 's/-/+/') +ifeq ($(SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER),) + SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER = "0.0.0.dev0" +endif + +.PHONY: all all: test +.PHONY: clean +clean: + -docker rm -f dpy-dind dpy-dind-certs dpy-dind-ssl + find -name "__pycache__" | xargs rm -rf + +.PHONY: build-dind-ssh +build-dind-ssh: + docker build \ + --pull \ + -t docker-dind-ssh \ + -f tests/Dockerfile-ssh-dind \ + --build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \ + --build-arg ENGINE_VERSION=${TEST_ENGINE_VERSION} \ + --build-arg API_VERSION=${TEST_API_VERSION} \ + . + +.PHONY: build build: - docker build -t docker-py . + docker build \ + --pull \ + -t docker-sdk-python3 \ + -f tests/Dockerfile \ + --build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \ + . + +.PHONY: build-docs +build-docs: + docker build \ + -t docker-sdk-python-docs \ + -f Dockerfile-docs \ + --build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \ + $(uid_args) \ + . + +.PHONY: build-dind-certs +build-dind-certs: + docker build \ + -t dpy-dind-certs \ + -f tests/Dockerfile-dind-certs \ + --build-arg VERSION=${SETUPTOOLS_SCM_PRETEND_VERSION_DOCKER} \ + . -test: unit-test integration-test +.PHONY: test +test: ruff unit-test integration-dind integration-dind-ssl +.PHONY: unit-test unit-test: build - docker run docker-py python tests/test.py + docker run -t --rm docker-sdk-python3 py.test tests/unit +.PHONY: integration-test integration-test: build - docker run -e NOT_ON_HOST=true -v `$(HOST_TMPDIR)`:/tmp -v /var/run/docker.sock:/var/run/docker.sock docker-py python tests/integration_test.py + docker run -t --rm -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 py.test -v tests/integration/${file} + +.PHONY: setup-network +setup-network: + docker network inspect dpy-tests || docker network create dpy-tests + +.PHONY: integration-dind +integration-dind: build setup-network + docker rm -vf dpy-dind || : + + docker run \ + --detach \ + --name dpy-dind \ + --network dpy-tests \ + --pull=always \ + --privileged \ + docker:${TEST_ENGINE_VERSION}-dind \ + dockerd -H tcp://0.0.0.0:2375 --experimental + + # Wait for Docker-in-Docker to come to life + docker run \ + --network dpy-tests \ + --rm \ + --tty \ + busybox \ + sh -c 'while ! nc -z dpy-dind 2375; do sleep 1; done' + + docker run \ + --env="DOCKER_HOST=tcp://dpy-dind:2375" \ + --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \ + --network dpy-tests \ + --rm \ + --tty \ + docker-sdk-python3 \ + py.test tests/integration/${file} + + docker rm -vf dpy-dind + + +.PHONY: integration-dind-ssh +integration-dind-ssh: build-dind-ssh build setup-network + docker rm -vf dpy-dind-ssh || : + docker run -d --network dpy-tests --name dpy-dind-ssh --privileged \ + docker-dind-ssh dockerd --experimental + # start SSH daemon for known key + docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/known_ed25519 -p 22" + docker exec dpy-dind-ssh sh -c "/usr/sbin/sshd -h /etc/ssh/unknown_ed25519 -p 2222" + docker run \ + --tty \ + --rm \ + --env="DOCKER_HOST=ssh://dpy-dind-ssh" \ + --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \ + --env="UNKNOWN_DOCKER_SSH_HOST=ssh://dpy-dind-ssh:2222" \ + --network dpy-tests \ + docker-sdk-python3 py.test tests/ssh/${file} + docker rm -vf dpy-dind-ssh + + +.PHONY: integration-dind-ssl +integration-dind-ssl: build-dind-certs build setup-network + docker rm -vf dpy-dind-certs dpy-dind-ssl || : + docker run -d --name dpy-dind-certs dpy-dind-certs + + docker run \ + --detach \ + --env="DOCKER_CERT_PATH=/certs" \ + --env="DOCKER_HOST=tcp://localhost:2375" \ + --env="DOCKER_TLS_VERIFY=1" \ + --name dpy-dind-ssl \ + --network dpy-tests \ + --network-alias docker \ + --pull=always \ + --privileged \ + --volume /tmp \ + --volumes-from dpy-dind-certs \ + docker:${TEST_ENGINE_VERSION}-dind \ + dockerd \ + --tlsverify \ + --tlscacert=/certs/ca.pem \ + --tlscert=/certs/server-cert.pem \ + --tlskey=/certs/server-key.pem \ + -H tcp://0.0.0.0:2375 \ + --experimental + + # Wait for Docker-in-Docker to come to life + docker run \ + --network dpy-tests \ + --rm \ + --tty \ + busybox \ + sh -c 'while ! nc -z dpy-dind-ssl 2375; do sleep 1; done' + + docker run \ + --env="DOCKER_CERT_PATH=/certs" \ + --env="DOCKER_HOST=tcp://docker:2375" \ + --env="DOCKER_TEST_API_VERSION=${TEST_API_VERSION}" \ + --env="DOCKER_TLS_VERIFY=1" \ + --network dpy-tests \ + --rm \ + --volumes-from dpy-dind-ssl \ + --tty \ + docker-sdk-python3 \ + py.test tests/integration/${file} + + docker rm -vf dpy-dind-ssl dpy-dind-certs + +.PHONY: ruff +ruff: build + docker run -t --rm docker-sdk-python3 ruff docker tests + +.PHONY: docs +docs: build-docs + docker run --rm -t -v `pwd`:/src docker-sdk-python-docs sphinx-build docs docs/_build + +.PHONY: shell +shell: build + docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker-sdk-python3 python diff --git a/README.md b/README.md index 385193a4de..a6e06a229f 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,76 @@ -docker-py -========= +# Docker SDK for Python -[![Build Status](https://travis-ci.org/docker/docker-py.png)](https://travis-ci.org/docker/docker-py) +[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml) -An API client for docker written in Python +A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc. -Installation ------------- +## Installation -Our latest stable is always available on PyPi. +The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip: - pip install docker-py + pip install docker -Documentation ------------- +> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support. +> This is no longer necessary and is a no-op, but is supported for backwards compatibility. -[![Documentation Status](https://readthedocs.org/projects/docker-py/badge/?version=latest)](https://readthedocs.org/projects/docker-py/?badge=latest) +## Usage -Full documentation is hosted on [ReadTheDocs](http://docker-py.readthedocs.org/en/latest/). -Sources are available in the `docs/` directory. +Connect to Docker using the default socket or the configuration in your environment: +```python +import docker +client = docker.from_env() +``` -License -------- -Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text +You can run containers: + +```python +>>> client.containers.run("ubuntu:latest", "echo hello world") +'hello world\n' +``` + +You can run containers in the background: + +```python +>>> client.containers.run("bfirsh/reticulate-splines", detach=True) + +``` + +You can manage containers: + +```python +>>> client.containers.list() +[, , ...] + +>>> container = client.containers.get('45e6d2de7c54') + +>>> container.attrs['Config']['Image'] +"bfirsh/reticulate-splines" + +>>> container.logs() +"Reticulating spline 1...\n" + +>>> container.stop() +``` + +You can stream logs: + +```python +>>> for line in container.logs(stream=True): +... print(line.strip()) +Reticulating spline 2... +Reticulating spline 3... +... +``` + +You can manage images: + +```python +>>> client.images.pull('nginx') + + +>>> client.images.list() +[, , ...] +``` + +[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do. diff --git a/docker/__init__.py b/docker/__init__.py index 3844c81ac8..fb7a5e921a 100644 --- a/docker/__init__.py +++ b/docker/__init__.py @@ -1,20 +1,7 @@ -# Copyright 2013 dotCloud inc. +from .api import APIClient +from .client import DockerClient, from_env +from .context import Context, ContextAPI +from .tls import TLSConfig +from .version import __version__ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .version import version, version_info - -__version__ = version -__title__ = 'docker-py' - -from .client import Client, AutoVersionClient # flake8: noqa +__title__ = 'docker' diff --git a/docker/api/__init__.py b/docker/api/__init__.py new file mode 100644 index 0000000000..7260e9537e --- /dev/null +++ b/docker/api/__init__.py @@ -0,0 +1 @@ +from .client import APIClient diff --git a/docker/api/build.py b/docker/api/build.py new file mode 100644 index 0000000000..47216a58fd --- /dev/null +++ b/docker/api/build.py @@ -0,0 +1,382 @@ +import json +import logging +import os +import random + +from .. import auth, constants, errors, utils + +log = logging.getLogger(__name__) + + +class BuildApiMixin: + def build(self, path=None, tag=None, quiet=False, fileobj=None, + nocache=False, rm=False, timeout=None, + custom_context=False, encoding=None, pull=False, + forcerm=False, dockerfile=None, container_limits=None, + decode=False, buildargs=None, gzip=False, shmsize=None, + labels=None, cache_from=None, target=None, network_mode=None, + squash=None, extra_hosts=None, platform=None, isolation=None, + use_config_proxy=True): + """ + Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` + needs to be set. ``path`` can be a local path (to a directory + containing a Dockerfile) or a remote URL. ``fileobj`` must be a + readable file-like object to a Dockerfile. + + If you have a tar file for the Docker build context (including a + Dockerfile) already, pass a readable file-like object to ``fileobj`` + and also pass ``custom_context=True``. If the stream is compressed + also, set ``encoding`` to the correct value (e.g ``gzip``). + + Example: + >>> from io import BytesIO + >>> from docker import APIClient + >>> dockerfile = ''' + ... # Shared Volume + ... FROM busybox:buildroot-2014.02 + ... VOLUME /data + ... CMD ["/bin/sh"] + ... ''' + >>> f = BytesIO(dockerfile.encode('utf-8')) + >>> cli = APIClient(base_url='tcp://127.0.0.1:2375') + >>> response = [line for line in cli.build( + ... fileobj=f, rm=True, tag='yourname/volume' + ... )] + >>> response + ['{"stream":" ---\\u003e a9eb17255234\\n"}', + '{"stream":"Step 1 : VOLUME /data\\n"}', + '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}', + '{"stream":" ---\\u003e 713bca62012e\\n"}', + '{"stream":"Removing intermediate container abdc1e6896c6\\n"}', + '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}', + '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}', + '{"stream":" ---\\u003e 032b8b2855fc\\n"}', + '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}', + '{"stream":"Successfully built 032b8b2855fc\\n"}'] + + Args: + path (str): Path to the directory containing the Dockerfile + fileobj: A file object to use as the Dockerfile. (Or a file-like + object) + tag (str): A tag to add to the final image + quiet (bool): Whether to return the status + nocache (bool): Don't use the cache when set to ``True`` + rm (bool): Remove intermediate containers. The ``docker build`` + command now defaults to ``--rm=true``, but we have kept the old + default of `False` to preserve backward compatibility + timeout (int): HTTP timeout + custom_context (bool): Optional if using ``fileobj`` + encoding (str): The encoding for a stream. Set to ``gzip`` for + compressing + pull (bool): Downloads any updates to the FROM image in Dockerfiles + forcerm (bool): Always remove intermediate containers, even after + unsuccessful builds + dockerfile (str): path within the build context to the Dockerfile + gzip (bool): If set to ``True``, gzip compression/encoding is used + buildargs (dict): A dictionary of build arguments + container_limits (dict): A dictionary of limits applied to each + container created by the build process. Valid keys: + + - memory (int): set memory limit for build + - memswap (int): Total memory (memory + swap), -1 to disable + swap + - cpushares (int): CPU shares (relative weight) + - cpusetcpus (str): CPUs in which to allow execution, e.g., + ``"0-3"``, ``"0,1"`` + decode (bool): If set to ``True``, the returned stream will be + decoded into dicts on the fly. Default ``False`` + shmsize (int): Size of `/dev/shm` in bytes. The size must be + greater than 0. If omitted the system uses 64MB + labels (dict): A dictionary of labels to set on the image + cache_from (:py:class:`list`): A list of images used for build + cache resolution + target (str): Name of the build-stage to build in a multi-stage + Dockerfile + network_mode (str): networking mode for the run commands during + build + squash (bool): Squash the resulting images layers into a + single layer. + extra_hosts (dict): Extra hosts to add to /etc/hosts in building + containers, as a mapping of hostname to IP address. + platform (str): Platform in the format ``os[/arch[/variant]]`` + isolation (str): Isolation technology used during build. + Default: `None`. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. + + Returns: + A generator for the build output. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + ``TypeError`` + If neither ``path`` nor ``fileobj`` is specified. + """ + remote = context = None + headers = {} + container_limits = container_limits or {} + buildargs = buildargs or {} + if path is None and fileobj is None: + raise TypeError("Either path or fileobj needs to be provided.") + if gzip and encoding is not None: + raise errors.DockerException( + 'Can not use custom encoding if gzip is enabled' + ) + if tag is not None: + if not utils.match_tag(tag): + raise errors.DockerException( + f"invalid tag '{tag}': invalid reference format" + ) + for key in container_limits.keys(): + if key not in constants.CONTAINER_LIMITS_KEYS: + raise errors.DockerException( + f"invalid tag '{tag}': invalid reference format" + ) + if custom_context: + if not fileobj: + raise TypeError("You must specify fileobj with custom_context") + context = fileobj + elif fileobj is not None: + context = utils.mkbuildcontext(fileobj) + elif path.startswith(('http://', 'https://', + 'git://', 'github.com/', 'git@')): + remote = path + elif not os.path.isdir(path): + raise TypeError("You must specify a directory to build in path") + else: + dockerignore = os.path.join(path, '.dockerignore') + exclude = None + if os.path.exists(dockerignore): + with open(dockerignore) as f: + exclude = list(filter( + lambda x: x != '' and x[0] != '#', + [line.strip() for line in f.read().splitlines()] + )) + dockerfile = process_dockerfile(dockerfile, path) + context = utils.tar( + path, exclude=exclude, dockerfile=dockerfile, gzip=gzip + ) + encoding = 'gzip' if gzip else encoding + + u = self._url('/build') + params = { + 't': tag, + 'remote': remote, + 'q': quiet, + 'nocache': nocache, + 'rm': rm, + 'forcerm': forcerm, + 'pull': pull, + 'dockerfile': dockerfile, + } + params.update(container_limits) + + if use_config_proxy: + proxy_args = self._proxy_configs.get_environment() + for k, v in proxy_args.items(): + buildargs.setdefault(k, v) + if buildargs: + params.update({'buildargs': json.dumps(buildargs)}) + + if shmsize: + if utils.version_gte(self._version, '1.22'): + params.update({'shmsize': shmsize}) + else: + raise errors.InvalidVersion( + 'shmsize was only introduced in API version 1.22' + ) + + if labels: + if utils.version_gte(self._version, '1.23'): + params.update({'labels': json.dumps(labels)}) + else: + raise errors.InvalidVersion( + 'labels was only introduced in API version 1.23' + ) + + if cache_from: + if utils.version_gte(self._version, '1.25'): + params.update({'cachefrom': json.dumps(cache_from)}) + else: + raise errors.InvalidVersion( + 'cache_from was only introduced in API version 1.25' + ) + + if target: + if utils.version_gte(self._version, '1.29'): + params.update({'target': target}) + else: + raise errors.InvalidVersion( + 'target was only introduced in API version 1.29' + ) + + if network_mode: + if utils.version_gte(self._version, '1.25'): + params.update({'networkmode': network_mode}) + else: + raise errors.InvalidVersion( + 'network_mode was only introduced in API version 1.25' + ) + + if squash: + if utils.version_gte(self._version, '1.25'): + params.update({'squash': squash}) + else: + raise errors.InvalidVersion( + 'squash was only introduced in API version 1.25' + ) + + if extra_hosts is not None: + if utils.version_lt(self._version, '1.27'): + raise errors.InvalidVersion( + 'extra_hosts was only introduced in API version 1.27' + ) + + if isinstance(extra_hosts, dict): + extra_hosts = utils.format_extra_hosts(extra_hosts) + params.update({'extrahosts': extra_hosts}) + + if platform is not None: + if utils.version_lt(self._version, '1.32'): + raise errors.InvalidVersion( + 'platform was only introduced in API version 1.32' + ) + params['platform'] = platform + + if isolation is not None: + if utils.version_lt(self._version, '1.24'): + raise errors.InvalidVersion( + 'isolation was only introduced in API version 1.24' + ) + params['isolation'] = isolation + + if context is not None: + headers = {'Content-Type': 'application/tar'} + if encoding: + headers['Content-Encoding'] = encoding + + self._set_auth_headers(headers) + + response = self._post( + u, + data=context, + params=params, + headers=headers, + stream=True, + timeout=timeout, + ) + + if context is not None and not custom_context: + context.close() + + return self._stream_helper(response, decode=decode) + + @utils.minimum_version('1.31') + def prune_builds(self, filters=None, keep_storage=None, all=None): + """ + Delete the builder cache + + Args: + filters (dict): Filters to process on the prune list. + Needs Docker API v1.39+ + Available filters: + - dangling (bool): When set to true (or 1), prune only + unused and untagged images. + - until (str): Can be Unix timestamps, date formatted + timestamps, or Go duration strings (e.g. 10m, 1h30m) computed + relative to the daemon's local time. + keep_storage (int): Amount of disk space in bytes to keep for cache. + Needs Docker API v1.39+ + all (bool): Remove all types of build cache. + Needs Docker API v1.39+ + + Returns: + (dict): A dictionary containing information about the operation's + result. The ``SpaceReclaimed`` key indicates the amount of + bytes of disk space reclaimed. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url("/build/prune") + if (filters, keep_storage, all) != (None, None, None) \ + and utils.version_lt(self._version, '1.39'): + raise errors.InvalidVersion( + '`filters`, `keep_storage`, and `all` args are only available ' + 'for API version > 1.38' + ) + params = {} + if filters is not None: + params['filters'] = utils.convert_filters(filters) + if keep_storage is not None: + params['keep-storage'] = keep_storage + if all is not None: + params['all'] = all + return self._result(self._post(url, params=params), True) + + def _set_auth_headers(self, headers): + log.debug('Looking for auth config') + + # If we don't have any auth data so far, try reloading the config + # file one more time in case anything showed up in there. + if not self._auth_configs or self._auth_configs.is_empty: + log.debug("No auth config in memory - loading from filesystem") + self._auth_configs = auth.load_config( + credstore_env=self.credstore_env + ) + + # Send the full auth configuration (if any exists), since the build + # could use any (or all) of the registries. + if self._auth_configs: + auth_data = self._auth_configs.get_all_credentials() + + # See https://github.com/docker/docker-py/issues/1683 + if (auth.INDEX_URL not in auth_data and + auth.INDEX_NAME in auth_data): + auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {}) + + log.debug( + "Sending auth config (%s)", + ', '.join(repr(k) for k in auth_data), + ) + + if auth_data: + headers['X-Registry-Config'] = auth.encode_header( + auth_data + ) + else: + log.debug('No auth config found') + + +def process_dockerfile(dockerfile, path): + if not dockerfile: + return (None, None) + + abs_dockerfile = dockerfile + if not os.path.isabs(dockerfile): + abs_dockerfile = os.path.join(path, dockerfile) + if constants.IS_WINDOWS_PLATFORM and path.startswith( + constants.WINDOWS_LONGPATH_PREFIX): + normpath = os.path.normpath( + abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):]) + abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}' + if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or + os.path.relpath(abs_dockerfile, path).startswith('..')): + # Dockerfile not in context - read data to insert into tar later + with open(abs_dockerfile) as df: + return ( + f'.dockerfile.{random.getrandbits(160):x}', + df.read() + ) + + # Dockerfile is inside the context - return path relative to context root + if dockerfile == abs_dockerfile: + # Only calculate relpath if necessary to avoid errors + # on Windows client -> Linux Docker + # see https://github.com/docker/compose/issues/5969 + dockerfile = os.path.relpath(abs_dockerfile, path) + return (dockerfile, None) diff --git a/docker/api/client.py b/docker/api/client.py new file mode 100644 index 0000000000..394ceb1f56 --- /dev/null +++ b/docker/api/client.py @@ -0,0 +1,532 @@ +import json +import struct +import urllib +from functools import partial + +import requests +import requests.adapters +import requests.exceptions + +from .. import auth +from ..constants import ( + DEFAULT_MAX_POOL_SIZE, + DEFAULT_NUM_POOLS, + DEFAULT_NUM_POOLS_SSH, + DEFAULT_TIMEOUT_SECONDS, + DEFAULT_USER_AGENT, + IS_WINDOWS_PLATFORM, + MINIMUM_DOCKER_API_VERSION, + STREAM_HEADER_SIZE_BYTES, +) +from ..errors import ( + DockerException, + InvalidVersion, + TLSParameterError, + create_api_error_from_http_exception, +) +from ..tls import TLSConfig +from ..transport import UnixHTTPAdapter +from ..utils import check_resource, config, update_headers, utils +from ..utils.json_stream import json_stream +from ..utils.proxy import ProxyConfig +from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter +from .build import BuildApiMixin +from .config import ConfigApiMixin +from .container import ContainerApiMixin +from .daemon import DaemonApiMixin +from .exec_api import ExecApiMixin +from .image import ImageApiMixin +from .network import NetworkApiMixin +from .plugin import PluginApiMixin +from .secret import SecretApiMixin +from .service import ServiceApiMixin +from .swarm import SwarmApiMixin +from .volume import VolumeApiMixin + +try: + from ..transport import NpipeHTTPAdapter +except ImportError: + pass + +try: + from ..transport import SSHHTTPAdapter +except ImportError: + pass + + +class APIClient( + requests.Session, + BuildApiMixin, + ConfigApiMixin, + ContainerApiMixin, + DaemonApiMixin, + ExecApiMixin, + ImageApiMixin, + NetworkApiMixin, + PluginApiMixin, + SecretApiMixin, + ServiceApiMixin, + SwarmApiMixin, + VolumeApiMixin): + """ + A low-level client for the Docker Engine API. + + Example: + + >>> import docker + >>> client = docker.APIClient(base_url='unix://var/run/docker.sock') + >>> client.version() + {u'ApiVersion': u'1.33', + u'Arch': u'amd64', + u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00', + u'GitCommit': u'f4ffd2511c', + u'GoVersion': u'go1.9.2', + u'KernelVersion': u'4.14.3-1-ARCH', + u'MinAPIVersion': u'1.12', + u'Os': u'linux', + u'Version': u'17.10.0-ce'} + + Args: + base_url (str): URL to the Docker server. For example, + ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. + version (str): The version of the API to use. Set to ``auto`` to + automatically detect the server's version. Default: ``1.35`` + timeout (int): Default timeout for API calls, in seconds. + tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass + ``True`` to enable it with default options, or pass a + :py:class:`~docker.tls.TLSConfig` object to use custom + configuration. + user_agent (str): Set a custom user agent for requests to the server. + credstore_env (dict): Override environment variables when calling the + credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is made + via shelling out to the ssh client. Ensure the ssh client is + installed and configured on the host. + max_pool_size (int): The maximum number of connections + to save in the pool. + """ + + __attrs__ = requests.Session.__attrs__ + ['_auth_configs', + '_general_configs', + '_version', + 'base_url', + 'timeout'] + + def __init__(self, base_url=None, version=None, + timeout=DEFAULT_TIMEOUT_SECONDS, tls=False, + user_agent=DEFAULT_USER_AGENT, num_pools=None, + credstore_env=None, use_ssh_client=False, + max_pool_size=DEFAULT_MAX_POOL_SIZE): + super().__init__() + + if tls and not base_url: + raise TLSParameterError( + 'If using TLS, the base_url argument must be provided.' + ) + + self.base_url = base_url + self.timeout = timeout + self.headers['User-Agent'] = user_agent + + self._general_configs = config.load_general_config() + + proxy_config = self._general_configs.get('proxies', {}) + try: + proxies = proxy_config[base_url] + except KeyError: + proxies = proxy_config.get('default', {}) + + self._proxy_configs = ProxyConfig.from_dict(proxies) + + self._auth_configs = auth.load_config( + config_dict=self._general_configs, credstore_env=credstore_env, + ) + self.credstore_env = credstore_env + + base_url = utils.parse_host( + base_url, IS_WINDOWS_PLATFORM, tls=bool(tls) + ) + # SSH has a different default for num_pools to all other adapters + num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \ + base_url.startswith('ssh://') else DEFAULT_NUM_POOLS + + if base_url.startswith('http+unix://'): + self._custom_adapter = UnixHTTPAdapter( + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size + ) + self.mount('http+docker://', self._custom_adapter) + self._unmount('http://', 'https://') + # host part of URL should be unused, but is resolved by requests + # module in proxy_bypass_macosx_sysconf() + self.base_url = 'http+docker://localhost' + elif base_url.startswith('npipe://'): + if not IS_WINDOWS_PLATFORM: + raise DockerException( + 'The npipe:// protocol is only supported on Windows' + ) + try: + self._custom_adapter = NpipeHTTPAdapter( + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size + ) + except NameError as err: + raise DockerException( + 'Install pypiwin32 package to enable npipe:// support' + ) from err + self.mount('http+docker://', self._custom_adapter) + self.base_url = 'http+docker://localnpipe' + elif base_url.startswith('ssh://'): + try: + self._custom_adapter = SSHHTTPAdapter( + base_url, timeout, pool_connections=num_pools, + max_pool_size=max_pool_size, shell_out=use_ssh_client + ) + except NameError as err: + raise DockerException( + 'Install paramiko package to enable ssh:// support' + ) from err + self.mount('http+docker://ssh', self._custom_adapter) + self._unmount('http://', 'https://') + self.base_url = 'http+docker://ssh' + else: + # Use SSLAdapter for the ability to specify SSL version + if isinstance(tls, TLSConfig): + tls.configure_client(self) + elif tls: + self._custom_adapter = requests.adapters.HTTPAdapter( + pool_connections=num_pools) + self.mount('https://', self._custom_adapter) + self.base_url = base_url + + # version detection needs to be after unix adapter mounting + if version is None or (isinstance( + version, + str + ) and version.lower() == 'auto'): + self._version = self._retrieve_server_version() + else: + self._version = version + if not isinstance(self._version, str): + raise DockerException( + 'Version parameter must be a string or None. ' + f'Found {type(version).__name__}' + ) + if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION): + raise InvalidVersion( + f'API versions below {MINIMUM_DOCKER_API_VERSION} are ' + f'no longer supported by this library.' + ) + + def _retrieve_server_version(self): + try: + return self.version(api_version=False)["ApiVersion"] + except KeyError as ke: + raise DockerException( + 'Invalid response from docker daemon: key "ApiVersion"' + ' is missing.' + ) from ke + except Exception as e: + raise DockerException( + f'Error while fetching server API version: {e}' + ) from e + + def _set_request_timeout(self, kwargs): + """Prepare the kwargs for an HTTP request by inserting the timeout + parameter, if not already present.""" + kwargs.setdefault('timeout', self.timeout) + return kwargs + + @update_headers + def _post(self, url, **kwargs): + return self.post(url, **self._set_request_timeout(kwargs)) + + @update_headers + def _get(self, url, **kwargs): + return self.get(url, **self._set_request_timeout(kwargs)) + + @update_headers + def _put(self, url, **kwargs): + return self.put(url, **self._set_request_timeout(kwargs)) + + @update_headers + def _delete(self, url, **kwargs): + return self.delete(url, **self._set_request_timeout(kwargs)) + + def _url(self, pathfmt, *args, **kwargs): + for arg in args: + if not isinstance(arg, str): + raise ValueError( + f'Expected a string but found {arg} ({type(arg)}) instead' + ) + + quote_f = partial(urllib.parse.quote, safe="/:") + args = map(quote_f, args) + + formatted_path = pathfmt.format(*args) + if kwargs.get('versioned_api', True): + return f'{self.base_url}/v{self._version}{formatted_path}' + else: + return f'{self.base_url}{formatted_path}' + + def _raise_for_status(self, response): + """Raises stored :class:`APIError`, if one occurred.""" + try: + response.raise_for_status() + except requests.exceptions.HTTPError as e: + raise create_api_error_from_http_exception(e) from e + + def _result(self, response, json=False, binary=False): + assert not (json and binary) + self._raise_for_status(response) + + if json: + return response.json() + if binary: + return response.content + return response.text + + def _post_json(self, url, data, **kwargs): + # Go <1.1 can't unserialize null to a string + # so we do this disgusting thing here. + data2 = {} + if data is not None and isinstance(data, dict): + for k, v in iter(data.items()): + if v is not None: + data2[k] = v + elif data is not None: + data2 = data + + if 'headers' not in kwargs: + kwargs['headers'] = {} + kwargs['headers']['Content-Type'] = 'application/json' + return self._post(url, data=json.dumps(data2), **kwargs) + + def _attach_params(self, override=None): + return override or { + 'stdout': 1, + 'stderr': 1, + 'stream': 1 + } + + @check_resource('container') + def _attach_websocket(self, container, params=None): + url = self._url("/containers/{0}/attach/ws", container) + req = requests.Request("POST", url, params=self._attach_params(params)) + full_url = req.prepare().url + full_url = full_url.replace("http://", "ws://", 1) + full_url = full_url.replace("https://", "wss://", 1) + return self._create_websocket_connection(full_url) + + def _create_websocket_connection(self, url): + try: + import websocket + return websocket.create_connection(url) + except ImportError as ie: + raise DockerException( + 'The `websocket-client` library is required ' + 'for using websocket connections. ' + 'You can install the `docker` library ' + 'with the [websocket] extra to install it.' + ) from ie + + def _get_raw_response_socket(self, response): + self._raise_for_status(response) + if self.base_url == "http+docker://localnpipe": + sock = response.raw._fp.fp.raw.sock + elif self.base_url.startswith('http+docker://ssh'): + sock = response.raw._fp.fp.channel + else: + sock = response.raw._fp.fp.raw + if self.base_url.startswith("https://"): + sock = sock._sock + try: + # Keep a reference to the response to stop it being garbage + # collected. If the response is garbage collected, it will + # close TLS sockets. + sock._response = response + except AttributeError: + # UNIX sockets can't have attributes set on them, but that's + # fine because we won't be doing TLS over them + pass + + return sock + + def _stream_helper(self, response, decode=False): + """Generator for data coming from a chunked-encoded HTTP response.""" + + if response.raw._fp.chunked: + if decode: + yield from json_stream(self._stream_helper(response, False)) + else: + reader = response.raw + while not reader.closed: + # this read call will block until we get a chunk + data = reader.read(1) + if not data: + break + if reader._fp.chunk_left: + data += reader.read(reader._fp.chunk_left) + yield data + else: + # Response isn't chunked, meaning we probably + # encountered an error immediately + yield self._result(response, json=decode) + + def _multiplexed_buffer_helper(self, response): + """A generator of multiplexed data blocks read from a buffered + response.""" + buf = self._result(response, binary=True) + buf_length = len(buf) + walker = 0 + while True: + if buf_length - walker < STREAM_HEADER_SIZE_BYTES: + break + header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES] + _, length = struct.unpack_from('>BxxxL', header) + start = walker + STREAM_HEADER_SIZE_BYTES + end = start + length + walker = end + yield buf[start:end] + + def _multiplexed_response_stream_helper(self, response): + """A generator of multiplexed data blocks coming from a response + stream.""" + + # Disable timeout on the underlying socket to prevent + # Read timed out(s) for long running processes + socket = self._get_raw_response_socket(response) + self._disable_socket_timeout(socket) + + while True: + header = response.raw.read(STREAM_HEADER_SIZE_BYTES) + if not header: + break + _, length = struct.unpack('>BxxxL', header) + if not length: + continue + data = response.raw.read(length) + if not data: + break + yield data + + def _stream_raw_result(self, response, chunk_size=1, decode=True): + ''' Stream result for TTY-enabled container and raw binary data''' + self._raise_for_status(response) + + # Disable timeout on the underlying socket to prevent + # Read timed out(s) for long running processes + socket = self._get_raw_response_socket(response) + self._disable_socket_timeout(socket) + + yield from response.iter_content(chunk_size, decode) + + def _read_from_socket(self, response, stream, tty=True, demux=False): + """Consume all data from the socket, close the response and return the + data. If stream=True, then a generator is returned instead and the + caller is responsible for closing the response. + """ + socket = self._get_raw_response_socket(response) + + gen = frames_iter(socket, tty) + + if demux: + # The generator will output tuples (stdout, stderr) + gen = (demux_adaptor(*frame) for frame in gen) + else: + # The generator will output strings + gen = (data for (_, data) in gen) + + if stream: + return gen + else: + try: + # Wait for all frames, concatenate them, and return the result + return consume_socket_output(gen, demux=demux) + finally: + response.close() + + def _disable_socket_timeout(self, socket): + """ Depending on the combination of python version and whether we're + connecting over http or https, we might need to access _sock, which + may or may not exist; or we may need to just settimeout on socket + itself, which also may or may not have settimeout on it. To avoid + missing the correct one, we try both. + + We also do not want to set the timeout if it is already disabled, as + you run the risk of changing a socket that was non-blocking to + blocking, for example when using gevent. + """ + sockets = [socket, getattr(socket, '_sock', None)] + + for s in sockets: + if not hasattr(s, 'settimeout'): + continue + + timeout = -1 + + if hasattr(s, 'gettimeout'): + timeout = s.gettimeout() + + # Don't change the timeout if it is already disabled. + if timeout is None or timeout == 0.0: + continue + + s.settimeout(None) + + @check_resource('container') + def _check_is_tty(self, container): + cont = self.inspect_container(container) + return cont['Config']['Tty'] + + def _get_result(self, container, stream, res): + return self._get_result_tty(stream, res, self._check_is_tty(container)) + + def _get_result_tty(self, stream, res, is_tty): + # We should also use raw streaming (without keep-alives) + # if we're dealing with a tty-enabled container. + if is_tty: + return self._stream_raw_result(res) if stream else \ + self._result(res, binary=True) + + self._raise_for_status(res) + sep = b'' + if stream: + return self._multiplexed_response_stream_helper(res) + else: + return sep.join( + list(self._multiplexed_buffer_helper(res)) + ) + + def _unmount(self, *args): + for proto in args: + self.adapters.pop(proto) + + def get_adapter(self, url): + try: + return super().get_adapter(url) + except requests.exceptions.InvalidSchema as e: + if self._custom_adapter: + return self._custom_adapter + else: + raise e + + @property + def api_version(self): + return self._version + + def reload_config(self, dockercfg_path=None): + """ + Force a reload of the auth configuration + + Args: + dockercfg_path (str): Use a custom path for the Docker config file + (default ``$HOME/.docker/config.json`` if present, + otherwise ``$HOME/.dockercfg``) + + Returns: + None + """ + self._auth_configs = auth.load_config( + dockercfg_path, credstore_env=self.credstore_env + ) diff --git a/docker/api/config.py b/docker/api/config.py new file mode 100644 index 0000000000..88c367ec34 --- /dev/null +++ b/docker/api/config.py @@ -0,0 +1,92 @@ +import base64 + +from .. import utils + + +class ConfigApiMixin: + @utils.minimum_version('1.30') + def create_config(self, name, data, labels=None, templating=None): + """ + Create a config + + Args: + name (string): Name of the config + data (bytes): Config data to be stored + labels (dict): A mapping of labels to assign to the config + templating (dict): dictionary containing the name of the + templating driver to be used expressed as + { name: } + + Returns (dict): ID of the newly created config + """ + if not isinstance(data, bytes): + data = data.encode('utf-8') + + data = base64.b64encode(data) + data = data.decode('ascii') + body = { + 'Data': data, + 'Name': name, + 'Labels': labels, + 'Templating': templating + } + + url = self._url('/configs/create') + return self._result( + self._post_json(url, data=body), True + ) + + @utils.minimum_version('1.30') + @utils.check_resource('id') + def inspect_config(self, id): + """ + Retrieve config metadata + + Args: + id (string): Full ID of the config to inspect + + Returns (dict): A dictionary of metadata + + Raises: + :py:class:`docker.errors.NotFound` + if no config with that ID exists + """ + url = self._url('/configs/{0}', id) + return self._result(self._get(url), True) + + @utils.minimum_version('1.30') + @utils.check_resource('id') + def remove_config(self, id): + """ + Remove a config + + Args: + id (string): Full ID of the config to remove + + Returns (boolean): True if successful + + Raises: + :py:class:`docker.errors.NotFound` + if no config with that ID exists + """ + url = self._url('/configs/{0}', id) + res = self._delete(url) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.30') + def configs(self, filters=None): + """ + List configs + + Args: + filters (dict): A map of filters to process on the configs + list. Available filters: ``names`` + + Returns (list): A list of configs + """ + url = self._url('/configs') + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + return self._result(self._get(url, params=params), True) diff --git a/docker/api/container.py b/docker/api/container.py new file mode 100644 index 0000000000..d1b870f9c2 --- /dev/null +++ b/docker/api/container.py @@ -0,0 +1,1348 @@ +from datetime import datetime + +from .. import errors, utils +from ..constants import DEFAULT_DATA_CHUNK_SIZE +from ..types import ( + CancellableStream, + ContainerConfig, + EndpointConfig, + HostConfig, + NetworkingConfig, +) + + +class ContainerApiMixin: + @utils.check_resource('container') + def attach(self, container, stdout=True, stderr=True, + stream=False, logs=False, demux=False): + """ + Attach to a container. + + The ``.logs()`` function is a wrapper around this method, which you can + use instead if you want to fetch/stream container output without first + retrieving the entire backlog. + + Args: + container (str): The container to attach to. + stdout (bool): Include stdout. + stderr (bool): Include stderr. + stream (bool): Return container output progressively as an iterator + of strings, rather than a single string. + logs (bool): Include the container's previous output. + demux (bool): Keep stdout and stderr separate. + + Returns: + By default, the container's output as a single string (two if + ``demux=True``: one for stdout and one for stderr). + + If ``stream=True``, an iterator of output strings. If + ``demux=True``, two iterators are returned: one for stdout and one + for stderr. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = { + 'logs': logs and 1 or 0, + 'stdout': stdout and 1 or 0, + 'stderr': stderr and 1 or 0, + 'stream': stream and 1 or 0 + } + + headers = { + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + + u = self._url("/containers/{0}/attach", container) + response = self._post(u, headers=headers, params=params, stream=True) + + output = self._read_from_socket( + response, stream, self._check_is_tty(container), demux=demux) + + if stream: + return CancellableStream(output, response) + else: + return output + + @utils.check_resource('container') + def attach_socket(self, container, params=None, ws=False): + """ + Like ``attach``, but returns the underlying socket-like object for the + HTTP request. + + Args: + container (str): The container to attach to. + params (dict): Dictionary of request parameters (e.g. ``stdout``, + ``stderr``, ``stream``). + For ``detachKeys``, ~/.docker/config.json is used by default. + ws (bool): Use websockets instead of raw HTTP. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if params is None: + params = { + 'stdout': 1, + 'stderr': 1, + 'stream': 1 + } + + if 'detachKeys' not in params \ + and 'detachKeys' in self._general_configs: + + params['detachKeys'] = self._general_configs['detachKeys'] + + if ws: + return self._attach_websocket(container, params) + + headers = { + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + + u = self._url("/containers/{0}/attach", container) + return self._get_raw_response_socket( + self.post( + u, None, params=self._attach_params(params), stream=True, + headers=headers + ) + ) + + @utils.check_resource('container') + def commit(self, container, repository=None, tag=None, message=None, + author=None, pause=True, changes=None, conf=None): + """ + Commit a container to an image. Similar to the ``docker commit`` + command. + + Args: + container (str): The image hash of the container + repository (str): The repository to push the image to + tag (str): The tag to push + message (str): A commit message + author (str): The name of the author + pause (bool): Whether to pause the container before committing + changes (str): Dockerfile instructions to apply while committing + conf (dict): The configuration for the container. See the + `Engine API documentation + `_ + for full details. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = { + 'container': container, + 'repo': repository, + 'tag': tag, + 'comment': message, + 'author': author, + 'pause': pause, + 'changes': changes + } + u = self._url("/commit") + return self._result( + self._post_json(u, data=conf, params=params), json=True + ) + + def containers(self, quiet=False, all=False, trunc=False, latest=False, + since=None, before=None, limit=-1, size=False, + filters=None): + """ + List containers. Similar to the ``docker ps`` command. + + Args: + quiet (bool): Only display numeric Ids + all (bool): Show all containers. Only running containers are shown + by default + trunc (bool): Truncate output + latest (bool): Show only the latest created container, include + non-running ones. + since (str): Show only containers created since Id or Name, include + non-running ones + before (str): Show only container created before Id or Name, + include non-running ones + limit (int): Show `limit` last created containers, include + non-running ones + size (bool): Display sizes + filters (dict): Filters to be processed on the image list. + Available filters: + + - `exited` (int): Only containers with specified exit code + - `status` (str): One of ``restarting``, ``running``, + ``paused``, ``exited`` + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. + - `id` (str): The id of the container. + - `name` (str): The name of the container. + - `ancestor` (str): Filter by container ancestor. Format of + ``[:tag]``, ````, or + ````. + - `before` (str): Only containers created before a particular + container. Give the container name or id. + - `since` (str): Only containers created after a particular + container. Give container name or id. + + A comprehensive list can be found in the documentation for + `docker ps + `_. + + Returns: + A list of dicts, one per container + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = { + 'limit': 1 if latest else limit, + 'all': 1 if all else 0, + 'size': 1 if size else 0, + 'trunc_cmd': 1 if trunc else 0, + 'since': since, + 'before': before + } + if filters: + params['filters'] = utils.convert_filters(filters) + u = self._url("/containers/json") + res = self._result(self._get(u, params=params), True) + + if quiet: + return [{'Id': x['Id']} for x in res] + if trunc: + for x in res: + x['Id'] = x['Id'][:12] + return res + + def create_container(self, image, command=None, hostname=None, user=None, + detach=False, stdin_open=False, tty=False, ports=None, + environment=None, volumes=None, + network_disabled=False, name=None, entrypoint=None, + working_dir=None, domainname=None, host_config=None, + mac_address=None, labels=None, stop_signal=None, + networking_config=None, healthcheck=None, + stop_timeout=None, runtime=None, + use_config_proxy=True, platform=None): + """ + Creates a container. Parameters are similar to those for the ``docker + run`` command except it doesn't support the attach options (``-a``). + + The arguments that are passed directly to this function are + host-independent configuration options. Host-specific configuration + is passed with the `host_config` argument. You'll normally want to + use this method in combination with the :py:meth:`create_host_config` + method to generate ``host_config``. + + **Port bindings** + + Port binding is done in two parts: first, provide a list of ports to + open inside the container with the ``ports`` parameter, then declare + bindings with the ``host_config`` parameter. For example: + + .. code-block:: python + + container_id = client.api.create_container( + 'busybox', 'ls', ports=[1111, 2222], + host_config=client.api.create_host_config(port_bindings={ + 1111: 4567, + 2222: None + }) + ) + + + You can limit the host address on which the port will be exposed like + such: + + .. code-block:: python + + client.api.create_host_config( + port_bindings={1111: ('127.0.0.1', 4567)} + ) + + Or without host port assignment: + + .. code-block:: python + + client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)}) + + If you wish to use UDP instead of TCP (default), you need to declare + ports as such in both the config and host config: + + .. code-block:: python + + container_id = client.api.create_container( + 'busybox', 'ls', ports=[(1111, 'udp'), 2222], + host_config=client.api.create_host_config(port_bindings={ + '1111/udp': 4567, 2222: None + }) + ) + + To bind multiple host ports to a single container port, use the + following syntax: + + .. code-block:: python + + client.api.create_host_config(port_bindings={ + 1111: [1234, 4567] + }) + + You can also bind multiple IPs to a single container port: + + .. code-block:: python + + client.api.create_host_config(port_bindings={ + 1111: [ + ('192.168.0.100', 1234), + ('192.168.0.101', 1234) + ] + }) + + **Using volumes** + + Volume declaration is done in two parts. Provide a list of + paths to use as mountpoints inside the container with the + ``volumes`` parameter, and declare mappings from paths on the host + in the ``host_config`` section. + + .. code-block:: python + + container_id = client.api.create_container( + 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], + host_config=client.api.create_host_config(binds={ + '/home/user1/': { + 'bind': '/mnt/vol2', + 'mode': 'rw', + }, + '/var/www': { + 'bind': '/mnt/vol1', + 'mode': 'ro', + }, + '/autofs/user1': { + 'bind': '/mnt/vol3', + 'mode': 'rw', + 'propagation': 'shared' + } + }) + ) + + You can alternatively specify binds as a list. This code is equivalent + to the example above: + + .. code-block:: python + + container_id = client.api.create_container( + 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2', '/mnt/vol3'], + host_config=client.api.create_host_config(binds=[ + '/home/user1/:/mnt/vol2', + '/var/www:/mnt/vol1:ro', + '/autofs/user1:/mnt/vol3:rw,shared', + ]) + ) + + **Networking** + + You can specify networks to connect the container to by using the + ``networking_config`` parameter. At the time of creation, you can + only connect a container to a single networking, but you + can create more connections by using + :py:meth:`~connect_container_to_network`. + + For example: + + .. code-block:: python + + networking_config = client.api.create_networking_config({ + 'network1': client.api.create_endpoint_config( + ipv4_address='172.28.0.124', + aliases=['foo', 'bar'], + links=['container2'] + ) + }) + + ctnr = client.api.create_container( + img, command, networking_config=networking_config + ) + + Args: + image (str): The image to run + command (str or list): The command to be run in the container + hostname (str): Optional hostname for the container + user (str or int): Username or UID + detach (bool): Detached mode: run container in the background and + return container ID + stdin_open (bool): Keep STDIN open even if not attached + tty (bool): Allocate a pseudo-TTY + ports (list of ints): A list of port numbers + environment (dict or list): A dictionary or a list of strings in + the following format ``["PASSWORD=xxx"]`` or + ``{"PASSWORD": "xxx"}``. + volumes (str or list): List of paths inside the container to use + as volumes. + network_disabled (bool): Disable networking + name (str): A name for the container + entrypoint (str or list): An entrypoint + working_dir (str): Path to the working directory + domainname (str): The domain name to use for the container + host_config (dict): A dictionary created with + :py:meth:`create_host_config`. + mac_address (str): The Mac Address to assign the container + labels (dict or list): A dictionary of name-value labels (e.g. + ``{"label1": "value1", "label2": "value2"}``) or a list of + names of labels to set with empty values (e.g. + ``["label1", "label2"]``) + stop_signal (str): The stop signal to use to stop the container + (e.g. ``SIGINT``). + stop_timeout (int): Timeout to stop the container, in seconds. + Default: 10 + networking_config (dict): A networking configuration generated + by :py:meth:`create_networking_config`. + runtime (str): Runtime to use with this container. + healthcheck (dict): Specify a test to perform to check that the + container is healthy. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being created. + platform (str): Platform in the format ``os[/arch[/variant]]``. + + Returns: + A dictionary with an image 'Id' key and a 'Warnings' key. + + Raises: + :py:class:`docker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if isinstance(volumes, str): + volumes = [volumes, ] + + if isinstance(environment, dict): + environment = utils.utils.format_environment(environment) + + if use_config_proxy: + environment = self._proxy_configs.inject_proxy_environment( + environment + ) or None + + config = self.create_container_config( + image, command, hostname, user, detach, stdin_open, tty, + ports, environment, volumes, + network_disabled, entrypoint, working_dir, domainname, + host_config, mac_address, labels, + stop_signal, networking_config, healthcheck, + stop_timeout, runtime + ) + return self.create_container_from_config(config, name, platform) + + def create_container_config(self, *args, **kwargs): + return ContainerConfig(self._version, *args, **kwargs) + + def create_container_from_config(self, config, name=None, platform=None): + u = self._url("/containers/create") + params = { + 'name': name + } + if platform: + if utils.version_lt(self._version, '1.41'): + raise errors.InvalidVersion( + 'platform is not supported for API version < 1.41' + ) + params['platform'] = platform + res = self._post_json(u, data=config, params=params) + return self._result(res, True) + + def create_host_config(self, *args, **kwargs): + """ + Create a dictionary for the ``host_config`` argument to + :py:meth:`create_container`. + + Args: + auto_remove (bool): enable auto-removal of the container on daemon + side when the container's process exits. + binds (dict): Volumes to bind. See :py:meth:`create_container` + for more information. + blkio_weight_device: Block IO weight (relative device weight) in + the form of: ``[{"Path": "device_path", "Weight": weight}]``. + blkio_weight: Block IO weight (relative weight), accepts a weight + value between 10 and 1000. + cap_add (list of str): Add kernel capabilities. For example, + ``["SYS_ADMIN", "MKNOD"]``. + cap_drop (list of str): Drop kernel capabilities. + cpu_period (int): The length of a CPU period in microseconds. + cpu_quota (int): Microseconds of CPU time that the container can + get in a CPU period. + cpu_shares (int): CPU shares (relative weight). + cpuset_cpus (str): CPUs in which to allow execution (``0-3``, + ``0,1``). + cpuset_mems (str): Memory nodes (MEMs) in which to allow execution + (``0-3``, ``0,1``). Only effective on NUMA systems. + device_cgroup_rules (:py:class:`list`): A list of cgroup rules to + apply to the container. + device_read_bps: Limit read rate (bytes per second) from a device + in the form of: `[{"Path": "device_path", "Rate": rate}]` + device_read_iops: Limit read rate (IO per second) from a device. + device_write_bps: Limit write rate (bytes per second) from a + device. + device_write_iops: Limit write rate (IO per second) from a device. + devices (:py:class:`list`): Expose host devices to the container, + as a list of strings in the form + ``::``. + + For example, ``/dev/sda:/dev/xvda:rwm`` allows the container + to have read-write access to the host's ``/dev/sda`` via a + node named ``/dev/xvda`` inside the container. + device_requests (:py:class:`list`): Expose host resources such as + GPUs to the container, as a list of + :py:class:`docker.types.DeviceRequest` instances. + dns (:py:class:`list`): Set custom DNS servers. + dns_opt (:py:class:`list`): Additional options to be added to the + container's ``resolv.conf`` file + dns_search (:py:class:`list`): DNS search domains. + extra_hosts (dict): Additional hostnames to resolve inside the + container, as a mapping of hostname to IP address. + group_add (:py:class:`list`): List of additional group names and/or + IDs that the container process will run as. + init (bool): Run an init inside the container that forwards + signals and reaps processes + ipc_mode (str): Set the IPC mode for the container. + isolation (str): Isolation technology to use. Default: ``None``. + links (dict): Mapping of links using the + ``{'container': 'alias'}`` format. The alias is optional. + Containers declared in this dict will be linked to the new + container using the provided alias. Default: ``None``. + log_config (LogConfig): Logging configuration + lxc_conf (dict): LXC config. + mem_limit (float or str): Memory limit. Accepts float values + (which represent the memory limit of the created container in + bytes) or a string with a units identification char + (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is + specified without a units character, bytes are assumed as an + mem_reservation (float or str): Memory soft limit. + mem_swappiness (int): Tune a container's memory swappiness + behavior. Accepts number between 0 and 100. + memswap_limit (str or int): Maximum amount of memory + swap a + container is allowed to consume. + mounts (:py:class:`list`): Specification for mounts to be added to + the container. More powerful alternative to ``binds``. Each + item in the list is expected to be a + :py:class:`docker.types.Mount` object. + network_mode (str): One of: + + - ``bridge`` Create a new network stack for the container on + the bridge network. + - ``none`` No networking for this container. + - ``container:`` Reuse another container's network + stack. + - ``host`` Use the host network stack. + This mode is incompatible with ``port_bindings``. + + oom_kill_disable (bool): Whether to disable OOM killer. + oom_score_adj (int): An integer value containing the score given + to the container in order to tune OOM killer preferences. + pid_mode (str): If set to ``host``, use the host PID namespace + inside the container. + pids_limit (int): Tune a container's pids limit. Set ``-1`` for + unlimited. + port_bindings (dict): See :py:meth:`create_container` + for more information. + Imcompatible with ``host`` in ``network_mode``. + privileged (bool): Give extended privileges to this container. + publish_all_ports (bool): Publish all ports to the host. + read_only (bool): Mount the container's root filesystem as read + only. + restart_policy (dict): Restart the container when it exits. + Configured as a dictionary with keys: + + - ``Name`` One of ``on-failure``, or ``always``. + - ``MaximumRetryCount`` Number of times to restart the + container on failure. + security_opt (:py:class:`list`): A list of string values to + customize labels for MLS systems, such as SELinux. + shm_size (str or int): Size of /dev/shm (e.g. ``1G``). + storage_opt (dict): Storage driver options per container as a + key-value mapping. + sysctls (dict): Kernel parameters to set in the container. + tmpfs (dict): Temporary filesystems to mount, as a dictionary + mapping a path inside the container to options for that path. + + For example: + + .. code-block:: python + + { + '/mnt/vol2': '', + '/mnt/vol1': 'size=3G,uid=1000' + } + + ulimits (:py:class:`list`): Ulimits to set inside the container, + as a list of :py:class:`docker.types.Ulimit` instances. + userns_mode (str): Sets the user namespace mode for the container + when user namespace remapping option is enabled. Supported + values are: ``host`` + uts_mode (str): Sets the UTS namespace mode for the container. + Supported values are: ``host`` + volumes_from (:py:class:`list`): List of container names or IDs to + get volumes from. + runtime (str): Runtime to use with this container. + + + Returns: + (dict) A dictionary which can be passed to the ``host_config`` + argument to :py:meth:`create_container`. + + Example: + + >>> client.api.create_host_config( + ... privileged=True, + ... cap_drop=['MKNOD'], + ... volumes_from=['nostalgic_newton'], + ... ) + {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, + 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} + +""" + if not kwargs: + kwargs = {} + if 'version' in kwargs: + raise TypeError( + "create_host_config() got an unexpected " + "keyword argument 'version'" + ) + kwargs['version'] = self._version + return HostConfig(*args, **kwargs) + + def create_networking_config(self, *args, **kwargs): + """ + Create a networking config dictionary to be used as the + ``networking_config`` parameter in :py:meth:`create_container`. + + Args: + endpoints_config (dict): A dictionary mapping network names to + endpoint configurations generated by + :py:meth:`create_endpoint_config`. + + Returns: + (dict) A networking config. + + Example: + + >>> client.api.create_network('network1') + >>> networking_config = client.api.create_networking_config({ + 'network1': client.api.create_endpoint_config() + }) + >>> container = client.api.create_container( + img, command, networking_config=networking_config + ) + + """ + return NetworkingConfig(*args, **kwargs) + + def create_endpoint_config(self, *args, **kwargs): + """ + Create an endpoint config dictionary to be used with + :py:meth:`create_networking_config`. + + Args: + aliases (:py:class:`list`): A list of aliases for this endpoint. + Names in that list can be used within the network to reach the + container. Defaults to ``None``. + links (dict): Mapping of links for this endpoint using the + ``{'container': 'alias'}`` format. The alias is optional. + Containers declared in this dict will be linked to this + container using the provided alias. Defaults to ``None``. + ipv4_address (str): The IP address of this container on the + network, using the IPv4 protocol. Defaults to ``None``. + ipv6_address (str): The IP address of this container on the + network, using the IPv6 protocol. Defaults to ``None``. + link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) + addresses. + driver_opt (dict): A dictionary of options to provide to the + network driver. Defaults to ``None``. + + Returns: + (dict) An endpoint config. + + Example: + + >>> endpoint_config = client.api.create_endpoint_config( + aliases=['web', 'app'], + links={'app_db': 'db', 'another': None}, + ipv4_address='132.65.0.123' + ) + + """ + return EndpointConfig(self._version, *args, **kwargs) + + @utils.check_resource('container') + def diff(self, container): + """ + Inspect changes on a container's filesystem. + + Args: + container (str): The container to diff + + Returns: + (list) A list of dictionaries containing the attributes `Path` + and `Kind`. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self._result( + self._get(self._url("/containers/{0}/changes", container)), True + ) + + @utils.check_resource('container') + def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + """ + Export the contents of a filesystem as a tar archive. + + Args: + container (str): The container to export + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + + Returns: + (generator): The archived filesystem data stream + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + res = self._get( + self._url("/containers/{0}/export", container), stream=True + ) + return self._stream_raw_result(res, chunk_size, False) + + @utils.check_resource('container') + def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, + encode_stream=False): + """ + Retrieve a file or folder from a container in the form of a tar + archive. + + Args: + container (str): The container where the file is located + path (str): Path to the file or folder to retrieve + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + encode_stream (bool): Determines if data should be encoded + (gzip-compressed) during transmission. Default: False + + Returns: + (tuple): First element is a raw tar data stream. Second element is + a dict containing ``stat`` information on the specified ``path``. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> c = docker.APIClient() + >>> f = open('./sh_bin.tar', 'wb') + >>> bits, stat = c.api.get_archive(container, '/bin/sh') + >>> print(stat) + {'name': 'sh', 'size': 1075464, 'mode': 493, + 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} + >>> for chunk in bits: + ... f.write(chunk) + >>> f.close() + """ + params = { + 'path': path + } + headers = { + "Accept-Encoding": "gzip, deflate" + } if encode_stream else { + "Accept-Encoding": "identity" + } + url = self._url('/containers/{0}/archive', container) + res = self._get(url, params=params, stream=True, headers=headers) + self._raise_for_status(res) + encoded_stat = res.headers.get('x-docker-container-path-stat') + return ( + self._stream_raw_result(res, chunk_size, False), + utils.decode_json_header(encoded_stat) if encoded_stat else None + ) + + @utils.check_resource('container') + def inspect_container(self, container): + """ + Identical to the `docker inspect` command, but only for containers. + + Args: + container (str): The container to inspect + + Returns: + (dict): Similar to the output of `docker inspect`, but as a + single dict + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self._result( + self._get(self._url("/containers/{0}/json", container)), True + ) + + @utils.check_resource('container') + def kill(self, container, signal=None): + """ + Kill a container or send a signal to a container. + + Args: + container (str): The container to kill + signal (str or int): The signal to send. Defaults to ``SIGKILL`` + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url("/containers/{0}/kill", container) + params = {} + if signal is not None: + if not isinstance(signal, str): + signal = int(signal) + params['signal'] = signal + res = self._post(url, params=params) + + self._raise_for_status(res) + + @utils.check_resource('container') + def logs(self, container, stdout=True, stderr=True, stream=False, + timestamps=False, tail='all', since=None, follow=None, + until=None): + """ + Get logs from a container. Similar to the ``docker logs`` command. + + The ``stream`` parameter makes the ``logs`` function return a blocking + generator you can iterate over to retrieve log output as it happens. + + Args: + container (str): The container to get logs from + stdout (bool): Get ``STDOUT``. Default ``True`` + stderr (bool): Get ``STDERR``. Default ``True`` + stream (bool): Stream the response. Default ``False`` + timestamps (bool): Show timestamps. Default ``False`` + tail (str or int): Output specified number of lines at the end of + logs. Either an integer of number of lines or the string + ``all``. Default ``all`` + since (datetime, int, or float): Show logs since a given datetime, + integer epoch (in seconds) or float (in fractional seconds) + follow (bool): Follow log output. Default ``False`` + until (datetime, int, or float): Show logs that occurred before + the given datetime, integer epoch (in seconds), or + float (in fractional seconds) + + Returns: + (generator of bytes or bytes) + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if follow is None: + follow = stream + params = {'stderr': stderr and 1 or 0, + 'stdout': stdout and 1 or 0, + 'timestamps': timestamps and 1 or 0, + 'follow': follow and 1 or 0, + } + if tail != 'all' and (not isinstance(tail, int) or tail < 0): + tail = 'all' + params['tail'] = tail + + if since is not None: + if isinstance(since, datetime): + params['since'] = utils.datetime_to_timestamp(since) + elif (isinstance(since, int) and since > 0): + params['since'] = since + elif (isinstance(since, float) and since > 0.0): + params['since'] = since + else: + raise errors.InvalidArgument( + 'since value should be datetime or positive int/float,' + f' not {type(since)}' + ) + + if until is not None: + if utils.version_lt(self._version, '1.35'): + raise errors.InvalidVersion( + 'until is not supported for API version < 1.35' + ) + if isinstance(until, datetime): + params['until'] = utils.datetime_to_timestamp(until) + elif (isinstance(until, int) and until > 0): + params['until'] = until + elif (isinstance(until, float) and until > 0.0): + params['until'] = until + else: + raise errors.InvalidArgument( + f'until value should be datetime or positive int/float, ' + f'not {type(until)}' + ) + + url = self._url("/containers/{0}/logs", container) + res = self._get(url, params=params, stream=stream) + output = self._get_result(container, stream, res) + + if stream: + return CancellableStream(output, res) + else: + return output + + @utils.check_resource('container') + def pause(self, container): + """ + Pauses all processes within a container. + + Args: + container (str): The container to pause + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/containers/{0}/pause', container) + res = self._post(url) + self._raise_for_status(res) + + @utils.check_resource('container') + def port(self, container, private_port): + """ + Lookup the public-facing port that is NAT-ed to ``private_port``. + Identical to the ``docker port`` command. + + Args: + container (str): The container to look up + private_port (int): The private port to inspect + + Returns: + (list of dict): The mapping for the host ports + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + .. code-block:: bash + + $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30 + 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b + + .. code-block:: python + + >>> client.api.port('7174d6347063', 80) + [{'HostIp': '0.0.0.0', 'HostPort': '80'}] + """ + res = self._get(self._url("/containers/{0}/json", container)) + self._raise_for_status(res) + json_ = res.json() + private_port = str(private_port) + h_ports = None + + # Port settings is None when the container is running with + # network_mode=host. + port_settings = json_.get('NetworkSettings', {}).get('Ports') + if port_settings is None: + return None + + if '/' in private_port: + return port_settings.get(private_port) + + for protocol in ['tcp', 'udp', 'sctp']: + h_ports = port_settings.get(f"{private_port}/{protocol}") + if h_ports: + break + + return h_ports + + @utils.check_resource('container') + def put_archive(self, container, path, data): + """ + Insert a file or folder in an existing container using a tar archive as + source. + + Args: + container (str): The container where the file(s) will be extracted + path (str): Path inside the container where the file(s) will be + extracted. Must exist. + data (bytes or stream): tar data to be extracted + + Returns: + (bool): True if the call succeeds. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {'path': path} + url = self._url('/containers/{0}/archive', container) + res = self._put(url, params=params, data=data) + self._raise_for_status(res) + return res.status_code == 200 + + @utils.minimum_version('1.25') + def prune_containers(self, filters=None): + """ + Delete stopped containers + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted container IDs and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + url = self._url('/containers/prune') + return self._result(self._post(url, params=params), True) + + @utils.check_resource('container') + def remove_container(self, container, v=False, link=False, force=False): + """ + Remove a container. Similar to the ``docker rm`` command. + + Args: + container (str): The container to remove + v (bool): Remove the volumes associated with the container + link (bool): Remove the specified link and not the underlying + container + force (bool): Force the removal of a running container (uses + ``SIGKILL``) + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {'v': v, 'link': link, 'force': force} + res = self._delete( + self._url("/containers/{0}", container), params=params + ) + self._raise_for_status(res) + + @utils.check_resource('container') + def rename(self, container, name): + """ + Rename a container. Similar to the ``docker rename`` command. + + Args: + container (str): ID of the container to rename + name (str): New name for the container + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url("/containers/{0}/rename", container) + params = {'name': name} + res = self._post(url, params=params) + self._raise_for_status(res) + + @utils.check_resource('container') + def resize(self, container, height, width): + """ + Resize the tty session. + + Args: + container (str or dict): The container to resize + height (int): Height of tty session + width (int): Width of tty session + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {'h': height, 'w': width} + url = self._url("/containers/{0}/resize", container) + res = self._post(url, params=params) + self._raise_for_status(res) + + @utils.check_resource('container') + def restart(self, container, timeout=10): + """ + Restart a container. Similar to the ``docker restart`` command. + + Args: + container (str or dict): The container to restart. If a dict, the + ``Id`` key is used. + timeout (int): Number of seconds to try to stop for before killing + the container. Once killed it will then be restarted. Default + is 10 seconds. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {'t': timeout} + url = self._url("/containers/{0}/restart", container) + conn_timeout = self.timeout + if conn_timeout is not None: + conn_timeout += timeout + res = self._post(url, params=params, timeout=conn_timeout) + self._raise_for_status(res) + + @utils.check_resource('container') + def start(self, container, *args, **kwargs): + """ + Start a container. Similar to the ``docker start`` command, but + doesn't support attach options. + + **Deprecation warning:** Passing configuration options in ``start`` is + no longer supported. Users are expected to provide host config options + in the ``host_config`` parameter of + :py:meth:`~ContainerApiMixin.create_container`. + + + Args: + container (str): The container to start + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + :py:class:`docker.errors.DeprecatedMethod` + If any argument besides ``container`` are provided. + + Example: + + >>> container = client.api.create_container( + ... image='busybox:latest', + ... command='/bin/sleep 30') + >>> client.api.start(container=container.get('Id')) + """ + if args or kwargs: + raise errors.DeprecatedMethod( + 'Providing configuration in the start() method is no longer ' + 'supported. Use the host_config param in create_container ' + 'instead.' + ) + url = self._url("/containers/{0}/start", container) + res = self._post(url) + self._raise_for_status(res) + + @utils.check_resource('container') + def stats(self, container, decode=None, stream=True, one_shot=None): + """ + Stream statistics for a specific container. Similar to the + ``docker stats`` command. + + Args: + container (str): The container to stream statistics from + decode (bool): If set to true, stream will be decoded into dicts + on the fly. Only applicable if ``stream`` is True. + False by default. + stream (bool): If set to false, only the current stats will be + returned instead of a stream. True by default. + one_shot (bool): If set to true, Only get a single stat instead of + waiting for 2 cycles. Must be used with stream=false. False by + default. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + """ + url = self._url("/containers/{0}/stats", container) + params = { + 'stream': stream + } + if one_shot is not None: + if utils.version_lt(self._version, '1.41'): + raise errors.InvalidVersion( + 'one_shot is not supported for API version < 1.41' + ) + params['one-shot'] = one_shot + if stream: + if one_shot: + raise errors.InvalidArgument( + 'one_shot is only available in conjunction with ' + 'stream=False' + ) + return self._stream_helper( + self._get(url, stream=True, params=params), decode=decode + ) + else: + if decode: + raise errors.InvalidArgument( + "decode is only available in conjunction with stream=True" + ) + return self._result(self._get(url, params=params), json=True) + + @utils.check_resource('container') + def stop(self, container, timeout=None): + """ + Stops a container. Similar to the ``docker stop`` command. + + Args: + container (str): The container to stop + timeout (int): Timeout in seconds to wait for the container to + stop before sending a ``SIGKILL``. If None, then the + StopTimeout value of the container will be used. + Default: None + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if timeout is None: + params = {} + timeout = 10 + else: + params = {'t': timeout} + url = self._url("/containers/{0}/stop", container) + conn_timeout = self.timeout + if conn_timeout is not None: + conn_timeout += timeout + res = self._post(url, params=params, timeout=conn_timeout) + self._raise_for_status(res) + + @utils.check_resource('container') + def top(self, container, ps_args=None): + """ + Display the running processes of a container. + + Args: + container (str): The container to inspect + ps_args (str): An optional arguments passed to ps (e.g. ``aux``) + + Returns: + (str): The output of the top + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + u = self._url("/containers/{0}/top", container) + params = {} + if ps_args is not None: + params['ps_args'] = ps_args + return self._result(self._get(u, params=params), True) + + @utils.check_resource('container') + def unpause(self, container): + """ + Unpause all processes within a container. + + Args: + container (str): The container to unpause + """ + url = self._url('/containers/{0}/unpause', container) + res = self._post(url) + self._raise_for_status(res) + + @utils.minimum_version('1.22') + @utils.check_resource('container') + def update_container( + self, container, blkio_weight=None, cpu_period=None, cpu_quota=None, + cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None, + mem_reservation=None, memswap_limit=None, kernel_memory=None, + restart_policy=None + ): + """ + Update resource configs of one or more containers. + + Args: + container (str): The container to inspect + blkio_weight (int): Block IO (relative weight), between 10 and 1000 + cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period + cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota + cpu_shares (int): CPU shares (relative weight) + cpuset_cpus (str): CPUs in which to allow execution + cpuset_mems (str): MEMs in which to allow execution + mem_limit (float or str): Memory limit + mem_reservation (float or str): Memory soft limit + memswap_limit (int or str): Total memory (memory + swap), -1 to + disable swap + kernel_memory (int or str): Kernel memory limit + restart_policy (dict): Restart policy dictionary + + Returns: + (dict): Dictionary containing a ``Warnings`` key. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/containers/{0}/update', container) + data = {} + if blkio_weight: + data['BlkioWeight'] = blkio_weight + if cpu_period: + data['CpuPeriod'] = cpu_period + if cpu_shares: + data['CpuShares'] = cpu_shares + if cpu_quota: + data['CpuQuota'] = cpu_quota + if cpuset_cpus: + data['CpusetCpus'] = cpuset_cpus + if cpuset_mems: + data['CpusetMems'] = cpuset_mems + if mem_limit: + data['Memory'] = utils.parse_bytes(mem_limit) + if mem_reservation: + data['MemoryReservation'] = utils.parse_bytes(mem_reservation) + if memswap_limit: + data['MemorySwap'] = utils.parse_bytes(memswap_limit) + if kernel_memory: + data['KernelMemory'] = utils.parse_bytes(kernel_memory) + if restart_policy: + if utils.version_lt(self._version, '1.23'): + raise errors.InvalidVersion( + 'restart policy update is not supported ' + 'for API version < 1.23' + ) + data['RestartPolicy'] = restart_policy + + res = self._post_json(url, data=data) + return self._result(res, True) + + @utils.check_resource('container') + def wait(self, container, timeout=None, condition=None): + """ + Block until a container stops, then return its exit code. Similar to + the ``docker wait`` command. + + Args: + container (str or dict): The container to wait on. If a dict, the + ``Id`` key is used. + timeout (int): Request timeout + condition (str): Wait until a container state reaches the given + condition, either ``not-running`` (default), ``next-exit``, + or ``removed`` + + Returns: + (dict): The API's response as a Python dictionary, including + the container's exit code under the ``StatusCode`` attribute. + + Raises: + :py:class:`requests.exceptions.ReadTimeout` + If the timeout is exceeded. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url("/containers/{0}/wait", container) + params = {} + if condition is not None: + if utils.version_lt(self._version, '1.30'): + raise errors.InvalidVersion( + 'wait condition is not supported for API version < 1.30' + ) + params['condition'] = condition + + res = self._post(url, timeout=timeout, params=params) + return self._result(res, True) diff --git a/docker/api/daemon.py b/docker/api/daemon.py new file mode 100644 index 0000000000..a857213265 --- /dev/null +++ b/docker/api/daemon.py @@ -0,0 +1,181 @@ +import os +from datetime import datetime + +from .. import auth, types, utils + + +class DaemonApiMixin: + @utils.minimum_version('1.25') + def df(self): + """ + Get data usage information. + + Returns: + (dict): A dictionary representing different resource categories + and their respective data usage. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/system/df') + return self._result(self._get(url), True) + + def events(self, since=None, until=None, filters=None, decode=None): + """ + Get real-time events from the server. Similar to the ``docker events`` + command. + + Args: + since (UTC datetime or int): Get events from this point + until (UTC datetime or int): Get events until this point + filters (dict): Filter the events by event time, container or image + decode (bool): If set to true, stream will be decoded into dicts on + the fly. False by default. + + Returns: + A :py:class:`docker.types.daemon.CancellableStream` generator + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> for event in client.events(decode=True) + ... print(event) + {u'from': u'image/with:tag', + u'id': u'container-id', + u'status': u'start', + u'time': 1423339459} + ... + + or + + >>> events = client.events() + >>> for event in events: + ... print(event) + >>> # and cancel from another thread + >>> events.close() + """ + + if isinstance(since, datetime): + since = utils.datetime_to_timestamp(since) + + if isinstance(until, datetime): + until = utils.datetime_to_timestamp(until) + + if filters: + filters = utils.convert_filters(filters) + + params = { + 'since': since, + 'until': until, + 'filters': filters + } + url = self._url('/events') + + response = self._get(url, params=params, stream=True, timeout=None) + stream = self._stream_helper(response, decode=decode) + + return types.CancellableStream(stream, response) + + def info(self): + """ + Display system-wide information. Identical to the ``docker info`` + command. + + Returns: + (dict): The info as a dict + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self._result(self._get(self._url("/info")), True) + + def login(self, username, password=None, email=None, registry=None, + reauth=False, dockercfg_path=None): + """ + Authenticate with a registry. Similar to the ``docker login`` command. + + Args: + username (str): The registry username + password (str): The plaintext password + email (str): The email for the registry account + registry (str): URL to the registry. E.g. + ``https://index.docker.io/v1/`` + reauth (bool): Whether or not to refresh existing authentication on + the Docker server. + dockercfg_path (str): Use a custom path for the Docker config file + (default ``$HOME/.docker/config.json`` if present, + otherwise ``$HOME/.dockercfg``) + + Returns: + (dict): The response from the login request + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + # If we don't have any auth data so far, try reloading the config file + # one more time in case anything showed up in there. + # If dockercfg_path is passed check to see if the config file exists, + # if so load that config. + if dockercfg_path and os.path.exists(dockercfg_path): + self._auth_configs = auth.load_config( + dockercfg_path, credstore_env=self.credstore_env + ) + elif not self._auth_configs or self._auth_configs.is_empty: + self._auth_configs = auth.load_config( + credstore_env=self.credstore_env + ) + + authcfg = self._auth_configs.resolve_authconfig(registry) + # If we found an existing auth config for this registry and username + # combination, we can return it immediately unless reauth is requested. + if authcfg and authcfg.get('username', None) == username \ + and not reauth: + return authcfg + + req_data = { + 'username': username, + 'password': password, + 'email': email, + 'serveraddress': registry, + } + + response = self._post_json(self._url('/auth'), data=req_data) + if response.status_code == 200: + self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data) + return self._result(response, json=True) + + def ping(self): + """ + Checks the server is responsive. An exception will be raised if it + isn't responding. + + Returns: + (bool) The response from the server. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self._result(self._get(self._url('/_ping'))) == 'OK' + + def version(self, api_version=True): + """ + Returns version information from the server. Similar to the ``docker + version`` command. + + Returns: + (dict): The server version information + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url("/version", versioned_api=api_version) + return self._result(self._get(url), json=True) diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py new file mode 100644 index 0000000000..d8fc50dd3d --- /dev/null +++ b/docker/api/exec_api.py @@ -0,0 +1,176 @@ +from .. import errors, utils +from ..types import CancellableStream + + +class ExecApiMixin: + @utils.check_resource('container') + def exec_create(self, container, cmd, stdout=True, stderr=True, + stdin=False, tty=False, privileged=False, user='', + environment=None, workdir=None, detach_keys=None): + """ + Sets up an exec instance in a running container. + + Args: + container (str): Target container where exec instance will be + created + cmd (str or list): Command to be executed + stdout (bool): Attach to stdout. Default: ``True`` + stderr (bool): Attach to stderr. Default: ``True`` + stdin (bool): Attach to stdin. Default: ``False`` + tty (bool): Allocate a pseudo-TTY. Default: False + privileged (bool): Run as privileged. + user (str): User to execute command as. Default: root + environment (dict or list): A dictionary or a list of strings in + the following format ``["PASSWORD=xxx"]`` or + ``{"PASSWORD": "xxx"}``. + workdir (str): Path to working directory for this exec session + detach_keys (str): Override the key sequence for detaching + a container. Format is a single character `[a-Z]` + or `ctrl-` where `` is one of: + `a-z`, `@`, `^`, `[`, `,` or `_`. + ~/.docker/config.json is used by default. + + Returns: + (dict): A dictionary with an exec ``Id`` key. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + if environment is not None and utils.version_lt(self._version, '1.25'): + raise errors.InvalidVersion( + 'Setting environment for exec is not supported in API < 1.25' + ) + + if isinstance(cmd, str): + cmd = utils.split_command(cmd) + + if isinstance(environment, dict): + environment = utils.utils.format_environment(environment) + + data = { + 'Container': container, + 'User': user, + 'Privileged': privileged, + 'Tty': tty, + 'AttachStdin': stdin, + 'AttachStdout': stdout, + 'AttachStderr': stderr, + 'Cmd': cmd, + 'Env': environment, + } + + if workdir is not None: + if utils.version_lt(self._version, '1.35'): + raise errors.InvalidVersion( + 'workdir is not supported for API version < 1.35' + ) + data['WorkingDir'] = workdir + + if detach_keys: + data['detachKeys'] = detach_keys + elif 'detachKeys' in self._general_configs: + data['detachKeys'] = self._general_configs['detachKeys'] + + url = self._url('/containers/{0}/exec', container) + res = self._post_json(url, data=data) + return self._result(res, True) + + def exec_inspect(self, exec_id): + """ + Return low-level information about an exec command. + + Args: + exec_id (str): ID of the exec instance + + Returns: + (dict): Dictionary of values returned by the endpoint. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if isinstance(exec_id, dict): + exec_id = exec_id.get('Id') + res = self._get(self._url("/exec/{0}/json", exec_id)) + return self._result(res, True) + + def exec_resize(self, exec_id, height=None, width=None): + """ + Resize the tty session used by the specified exec command. + + Args: + exec_id (str): ID of the exec instance + height (int): Height of tty session + width (int): Width of tty session + """ + + if isinstance(exec_id, dict): + exec_id = exec_id.get('Id') + + params = {'h': height, 'w': width} + url = self._url("/exec/{0}/resize", exec_id) + res = self._post(url, params=params) + self._raise_for_status(res) + + @utils.check_resource('exec_id') + def exec_start(self, exec_id, detach=False, tty=False, stream=False, + socket=False, demux=False): + """ + Start a previously set up exec instance. + + Args: + exec_id (str): ID of the exec instance + detach (bool): If true, detach from the exec command. + Default: False + tty (bool): Allocate a pseudo-TTY. Default: False + stream (bool): Return response data progressively as an iterator + of strings, rather than a single string. + socket (bool): Return the connection socket to allow custom + read/write operations. Must be closed by the caller when done. + demux (bool): Return stdout and stderr separately + + Returns: + + (generator or str or tuple): If ``stream=True``, a generator + yielding response chunks. If ``socket=True``, a socket object for + the connection. A string containing response data otherwise. If + ``demux=True``, a tuple with two elements of type byte: stdout and + stderr. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + # we want opened socket if socket == True + + data = { + 'Tty': tty, + 'Detach': detach + } + + headers = {} if detach else { + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + + res = self._post_json( + self._url('/exec/{0}/start', exec_id), + headers=headers, + data=data, + stream=True + ) + if detach: + try: + return self._result(res) + finally: + res.close() + if socket: + return self._get_raw_response_socket(res) + + output = self._read_from_socket(res, stream, tty=tty, demux=demux) + if stream: + return CancellableStream(output, res) + else: + return output diff --git a/docker/api/image.py b/docker/api/image.py new file mode 100644 index 0000000000..85109473bc --- /dev/null +++ b/docker/api/image.py @@ -0,0 +1,601 @@ +import logging +import os + +from .. import auth, errors, utils +from ..constants import DEFAULT_DATA_CHUNK_SIZE + +log = logging.getLogger(__name__) + + +class ImageApiMixin: + + @utils.check_resource('image') + def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + """ + Get a tarball of an image. Similar to the ``docker save`` command. + + Args: + image (str): Image name to get + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + + Returns: + (generator): A stream of raw archive data. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> image = client.api.get_image("busybox:latest") + >>> f = open('/tmp/busybox-latest.tar', 'wb') + >>> for chunk in image: + >>> f.write(chunk) + >>> f.close() + """ + res = self._get(self._url("/images/{0}/get", image), stream=True) + return self._stream_raw_result(res, chunk_size, False) + + @utils.check_resource('image') + def history(self, image): + """ + Show the history of an image. + + Args: + image (str): The image to show history for + + Returns: + (list): The history of the image + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + res = self._get(self._url("/images/{0}/history", image)) + return self._result(res, True) + + def images(self, name=None, quiet=False, all=False, filters=None): + """ + List images. Similar to the ``docker images`` command. + + Args: + name (str): Only show images belonging to the repository ``name`` + quiet (bool): Only return numeric IDs as a list. + all (bool): Show intermediate image layers. By default, these are + filtered out. + filters (dict): Filters to be processed on the image list. + Available filters: + - ``dangling`` (bool) + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. + + Returns: + (dict or list): A list if ``quiet=True``, otherwise a dict. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = { + 'only_ids': 1 if quiet else 0, + 'all': 1 if all else 0, + } + if name: + if utils.version_lt(self._version, '1.25'): + # only use "filter" on API 1.24 and under, as it is deprecated + params['filter'] = name + else: + if filters: + filters['reference'] = name + else: + filters = {'reference': name} + if filters: + params['filters'] = utils.convert_filters(filters) + res = self._result(self._get(self._url("/images/json"), params=params), + True) + if quiet: + return [x['Id'] for x in res] + return res + + def import_image(self, src=None, repository=None, tag=None, image=None, + changes=None, stream_src=False): + """ + Import an image. Similar to the ``docker import`` command. + + If ``src`` is a string or unicode string, it will first be treated as a + path to a tarball on the local system. If there is an error reading + from that file, ``src`` will be treated as a URL instead to fetch the + image from. You can also pass an open file handle as ``src``, in which + case the data will be read from that file. + + If ``src`` is unset but ``image`` is set, the ``image`` parameter will + be taken as the name of an existing image to import from. + + Args: + src (str or file): Path to tarfile, URL, or file-like object + repository (str): The repository to create + tag (str): The tag to apply + image (str): Use another image like the ``FROM`` Dockerfile + parameter + """ + if not (src or image): + raise errors.DockerException( + 'Must specify src or image to import from' + ) + u = self._url('/images/create') + + params = _import_image_params( + repository, tag, image, + src=(src if isinstance(src, str) else None), + changes=changes + ) + headers = {'Content-Type': 'application/tar'} + + if image or params.get('fromSrc') != '-': # from image or URL + return self._result( + self._post(u, data=None, params=params) + ) + elif isinstance(src, str): # from file path + with open(src, 'rb') as f: + return self._result( + self._post( + u, data=f, params=params, headers=headers, timeout=None + ) + ) + else: # from raw data + if stream_src: + headers['Transfer-Encoding'] = 'chunked' + return self._result( + self._post(u, data=src, params=params, headers=headers) + ) + + def import_image_from_data(self, data, repository=None, tag=None, + changes=None): + """ + Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but + allows importing in-memory bytes data. + + Args: + data (bytes collection): Bytes collection containing valid tar data + repository (str): The repository to create + tag (str): The tag to apply + """ + + u = self._url('/images/create') + params = _import_image_params( + repository, tag, src='-', changes=changes + ) + headers = {'Content-Type': 'application/tar'} + return self._result( + self._post( + u, data=data, params=params, headers=headers, timeout=None + ) + ) + + def import_image_from_file(self, filename, repository=None, tag=None, + changes=None): + """ + Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only + supports importing from a tar file on disk. + + Args: + filename (str): Full path to a tar file. + repository (str): The repository to create + tag (str): The tag to apply + + Raises: + IOError: File does not exist. + """ + + return self.import_image( + src=filename, repository=repository, tag=tag, changes=changes + ) + + def import_image_from_stream(self, stream, repository=None, tag=None, + changes=None): + return self.import_image( + src=stream, stream_src=True, repository=repository, tag=tag, + changes=changes + ) + + def import_image_from_url(self, url, repository=None, tag=None, + changes=None): + """ + Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only + supports importing from a URL. + + Args: + url (str): A URL pointing to a tar file. + repository (str): The repository to create + tag (str): The tag to apply + """ + return self.import_image( + src=url, repository=repository, tag=tag, changes=changes + ) + + def import_image_from_image(self, image, repository=None, tag=None, + changes=None): + """ + Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only + supports importing from another image, like the ``FROM`` Dockerfile + parameter. + + Args: + image (str): Image name to import from + repository (str): The repository to create + tag (str): The tag to apply + """ + return self.import_image( + image=image, repository=repository, tag=tag, changes=changes + ) + + @utils.check_resource('image') + def inspect_image(self, image): + """ + Get detailed information about an image. Similar to the ``docker + inspect`` command, but only for images. + + Args: + image (str): The image to inspect + + Returns: + (dict): Similar to the output of ``docker inspect``, but as a + single dict + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self._result( + self._get(self._url("/images/{0}/json", image)), True + ) + + @utils.minimum_version('1.30') + @utils.check_resource('image') + def inspect_distribution(self, image, auth_config=None): + """ + Get image digest and platform information by contacting the registry. + + Args: + image (str): The image name to inspect + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. + + Returns: + (dict): A dict containing distribution data + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + registry, _ = auth.resolve_repository_name(image) + + headers = {} + if auth_config is None: + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + else: + log.debug('Sending supplied auth config') + headers['X-Registry-Auth'] = auth.encode_header(auth_config) + + url = self._url("/distribution/{0}/json", image) + + return self._result( + self._get(url, headers=headers), True + ) + + def load_image(self, data, quiet=None): + """ + Load an image that was previously saved using + :py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker + save``). Similar to ``docker load``. + + Args: + data (binary): Image data to be loaded. + quiet (boolean): Suppress progress details in response. + + Returns: + (generator): Progress output as JSON objects. Only available for + API version >= 1.23 + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {} + + if quiet is not None: + if utils.version_lt(self._version, '1.23'): + raise errors.InvalidVersion( + 'quiet is not supported in API version < 1.23' + ) + params['quiet'] = quiet + + res = self._post( + self._url("/images/load"), data=data, params=params, stream=True + ) + if utils.version_gte(self._version, '1.23'): + return self._stream_helper(res, decode=True) + + self._raise_for_status(res) + + @utils.minimum_version('1.25') + def prune_images(self, filters=None): + """ + Delete unused images + + Args: + filters (dict): Filters to process on the prune list. + Available filters: + - dangling (bool): When set to true (or 1), prune only + unused and untagged images. + + Returns: + (dict): A dict containing a list of deleted image IDs and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url("/images/prune") + params = {} + if filters is not None: + params['filters'] = utils.convert_filters(filters) + return self._result(self._post(url, params=params), True) + + def pull(self, repository, tag=None, stream=False, auth_config=None, + decode=False, platform=None, all_tags=False): + """ + Pulls an image. Similar to the ``docker pull`` command. + + Args: + repository (str): The repository to pull + tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it + is set to ``latest``. + stream (bool): Stream the output as a generator. Make sure to + consume the generator, otherwise pull might get cancelled. + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. + decode (bool): Decode the JSON data from the server into dicts. + Only applies with ``stream=True`` + platform (str): Platform in the format ``os[/arch[/variant]]`` + all_tags (bool): Pull all image tags, the ``tag`` parameter is + ignored. + + Returns: + (generator or str): The output + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> resp = client.api.pull('busybox', stream=True, decode=True) + ... for line in resp: + ... print(json.dumps(line, indent=4)) + { + "status": "Pulling image (latest) from busybox", + "progressDetail": {}, + "id": "e72ac664f4f0" + } + { + "status": "Pulling image (latest) from busybox, endpoint: ...", + "progressDetail": {}, + "id": "e72ac664f4f0" + } + + """ + repository, image_tag = utils.parse_repository_tag(repository) + tag = tag or image_tag or 'latest' + + if all_tags: + tag = None + + registry, repo_name = auth.resolve_repository_name(repository) + + params = { + 'tag': tag, + 'fromImage': repository + } + headers = {} + + if auth_config is None: + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + else: + log.debug('Sending supplied auth config') + headers['X-Registry-Auth'] = auth.encode_header(auth_config) + + if platform is not None: + if utils.version_lt(self._version, '1.32'): + raise errors.InvalidVersion( + 'platform was only introduced in API version 1.32' + ) + params['platform'] = platform + + response = self._post( + self._url('/images/create'), params=params, headers=headers, + stream=stream, timeout=None + ) + + self._raise_for_status(response) + + if stream: + return self._stream_helper(response, decode=decode) + + return self._result(response) + + def push(self, repository, tag=None, stream=False, auth_config=None, + decode=False): + """ + Push an image or a repository to the registry. Similar to the ``docker + push`` command. + + Args: + repository (str): The repository to push to + tag (str): An optional tag to push + stream (bool): Stream the output as a blocking generator + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. + decode (bool): Decode the JSON data from the server into dicts. + Only applies with ``stream=True`` + + Returns: + (generator or str): The output from the server. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + >>> resp = client.api.push( + ... 'yourname/app', + ... stream=True, + ... decode=True, + ... ) + ... for line in resp: + ... print(line) + {'status': 'Pushing repository yourname/app (1 tags)'} + {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} + {'status': 'Image already pushed, skipping', 'progressDetail':{}, + 'id': '511136ea3c5a'} + ... + + """ + if not tag: + repository, tag = utils.parse_repository_tag(repository) + registry, repo_name = auth.resolve_repository_name(repository) + u = self._url("/images/{0}/push", repository) + params = { + 'tag': tag + } + headers = {} + + if auth_config is None: + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + else: + log.debug('Sending supplied auth config') + headers['X-Registry-Auth'] = auth.encode_header(auth_config) + + response = self._post_json( + u, None, headers=headers, stream=stream, params=params + ) + + self._raise_for_status(response) + + if stream: + return self._stream_helper(response, decode=decode) + + return self._result(response) + + @utils.check_resource('image') + def remove_image(self, image, force=False, noprune=False): + """ + Remove an image. Similar to the ``docker rmi`` command. + + Args: + image (str): The image to remove + force (bool): Force removal of the image + noprune (bool): Do not delete untagged parents + """ + params = {'force': force, 'noprune': noprune} + res = self._delete(self._url("/images/{0}", image), params=params) + return self._result(res, True) + + def search(self, term, limit=None): + """ + Search for images on Docker Hub. Similar to the ``docker search`` + command. + + Args: + term (str): A term to search for. + limit (int): The maximum number of results to return. + + Returns: + (list of dicts): The response of the search. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {'term': term} + if limit is not None: + params['limit'] = limit + + return self._result( + self._get(self._url("/images/search"), params=params), + True + ) + + @utils.check_resource('image') + def tag(self, image, repository, tag=None, force=False): + """ + Tag an image into a repository. Similar to the ``docker tag`` command. + + Args: + image (str): The image to tag + repository (str): The repository to set for the tag + tag (str): The tag name + force (bool): Force + + Returns: + (bool): ``True`` if successful + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest', + force=True) + """ + params = { + 'tag': tag, + 'repo': repository, + 'force': 1 if force else 0 + } + url = self._url("/images/{0}/tag", image) + res = self._post(url, params=params) + self._raise_for_status(res) + return res.status_code == 201 + + +def is_file(src): + try: + return ( + isinstance(src, str) and + os.path.isfile(src) + ) + except TypeError: # a data string will make isfile() raise a TypeError + return False + + +def _import_image_params(repo, tag, image=None, src=None, + changes=None): + params = { + 'repo': repo, + 'tag': tag, + } + if image: + params['fromImage'] = image + elif src and not is_file(src): + params['fromSrc'] = src + else: + params['fromSrc'] = '-' + + if changes: + params['changes'] = changes + + return params diff --git a/docker/api/network.py b/docker/api/network.py new file mode 100644 index 0000000000..2b1925710e --- /dev/null +++ b/docker/api/network.py @@ -0,0 +1,277 @@ +from .. import utils +from ..errors import InvalidVersion +from ..utils import check_resource, minimum_version, version_lt + + +class NetworkApiMixin: + def networks(self, names=None, ids=None, filters=None): + """ + List networks. Similar to the ``docker network ls`` command. + + Args: + names (:py:class:`list`): List of names to filter by + ids (:py:class:`list`): List of ids to filter by + filters (dict): Filters to be processed on the network list. + Available filters: + - ``driver=[]`` Matches a network's driver. + - ``label=[]``, ``label=[=]`` or a list of + such. + - ``type=["custom"|"builtin"]`` Filters networks by type. + + Returns: + (dict): List of network objects. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + if filters is None: + filters = {} + if names: + filters['name'] = names + if ids: + filters['id'] = ids + params = {'filters': utils.convert_filters(filters)} + url = self._url("/networks") + res = self._get(url, params=params) + return self._result(res, json=True) + + def create_network(self, name, driver=None, options=None, ipam=None, + check_duplicate=None, internal=False, labels=None, + enable_ipv6=False, attachable=None, scope=None, + ingress=None): + """ + Create a network. Similar to the ``docker network create``. + + Args: + name (str): Name of the network + driver (str): Name of the driver used to create the network + options (dict): Driver options as a key-value dictionary + ipam (IPAMConfig): Optional custom IP scheme for the network. + check_duplicate (bool): Request daemon to check for networks with + same name. Default: ``None``. + internal (bool): Restrict external access to the network. Default + ``False``. + labels (dict): Map of labels to set on the network. Default + ``None``. + enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``. + attachable (bool): If enabled, and the network is in the global + scope, non-service containers on worker nodes will be able to + connect to the network. + scope (str): Specify the network's scope (``local``, ``global`` or + ``swarm``) + ingress (bool): If set, create an ingress network which provides + the routing-mesh in swarm mode. + + Returns: + (dict): The created network reference object + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + A network using the bridge driver: + + >>> client.api.create_network("network1", driver="bridge") + + You can also create more advanced networks with custom IPAM + configurations. For example, setting the subnet to + ``192.168.52.0/24`` and gateway address to ``192.168.52.254``. + + .. code-block:: python + + >>> ipam_pool = docker.types.IPAMPool( + subnet='192.168.52.0/24', + gateway='192.168.52.254' + ) + >>> ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + >>> client.api.create_network("network1", driver="bridge", + ipam=ipam_config) + """ + if options is not None and not isinstance(options, dict): + raise TypeError('options must be a dictionary') + + data = { + 'Name': name, + 'Driver': driver, + 'Options': options, + 'IPAM': ipam, + 'CheckDuplicate': check_duplicate, + } + + if labels is not None: + if version_lt(self._version, '1.23'): + raise InvalidVersion( + 'network labels were introduced in API 1.23' + ) + if not isinstance(labels, dict): + raise TypeError('labels must be a dictionary') + data["Labels"] = labels + + if enable_ipv6: + if version_lt(self._version, '1.23'): + raise InvalidVersion( + 'enable_ipv6 was introduced in API 1.23' + ) + data['EnableIPv6'] = True + + if internal: + if version_lt(self._version, '1.22'): + raise InvalidVersion('Internal networks are not ' + 'supported in API version < 1.22') + data['Internal'] = True + + if attachable is not None: + if version_lt(self._version, '1.24'): + raise InvalidVersion( + 'attachable is not supported in API version < 1.24' + ) + data['Attachable'] = attachable + + if ingress is not None: + if version_lt(self._version, '1.29'): + raise InvalidVersion( + 'ingress is not supported in API version < 1.29' + ) + + data['Ingress'] = ingress + + if scope is not None: + if version_lt(self._version, '1.30'): + raise InvalidVersion( + 'scope is not supported in API version < 1.30' + ) + data['Scope'] = scope + + url = self._url("/networks/create") + res = self._post_json(url, data=data) + return self._result(res, json=True) + + @minimum_version('1.25') + def prune_networks(self, filters=None): + """ + Delete unused networks + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted network names and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + url = self._url('/networks/prune') + return self._result(self._post(url, params=params), True) + + @check_resource('net_id') + def remove_network(self, net_id): + """ + Remove a network. Similar to the ``docker network rm`` command. + + Args: + net_id (str): The network's id + """ + url = self._url("/networks/{0}", net_id) + res = self._delete(url) + self._raise_for_status(res) + + @check_resource('net_id') + def inspect_network(self, net_id, verbose=None, scope=None): + """ + Get detailed information about a network. + + Args: + net_id (str): ID of network + verbose (bool): Show the service details across the cluster in + swarm mode. + scope (str): Filter the network by scope (``swarm``, ``global`` + or ``local``). + """ + params = {} + if verbose is not None: + if version_lt(self._version, '1.28'): + raise InvalidVersion('verbose was introduced in API 1.28') + params['verbose'] = verbose + if scope is not None: + if version_lt(self._version, '1.31'): + raise InvalidVersion('scope was introduced in API 1.31') + params['scope'] = scope + + url = self._url("/networks/{0}", net_id) + res = self._get(url, params=params) + return self._result(res, json=True) + + @check_resource('container') + def connect_container_to_network(self, container, net_id, + ipv4_address=None, ipv6_address=None, + aliases=None, links=None, + link_local_ips=None, driver_opt=None, + mac_address=None): + """ + Connect a container to a network. + + Args: + container (str): container-id/name to be connected to the network + net_id (str): network id + aliases (:py:class:`list`): A list of aliases for this endpoint. + Names in that list can be used within the network to reach the + container. Defaults to ``None``. + links (:py:class:`list`): A list of links for this endpoint. + Containers declared in this list will be linked to this + container. Defaults to ``None``. + ipv4_address (str): The IP address of this container on the + network, using the IPv4 protocol. Defaults to ``None``. + ipv6_address (str): The IP address of this container on the + network, using the IPv6 protocol. Defaults to ``None``. + link_local_ips (:py:class:`list`): A list of link-local + (IPv4/IPv6) addresses. + mac_address (str): The MAC address of this container on the + network. Defaults to ``None``. + """ + data = { + "Container": container, + "EndpointConfig": self.create_endpoint_config( + aliases=aliases, links=links, ipv4_address=ipv4_address, + ipv6_address=ipv6_address, link_local_ips=link_local_ips, + driver_opt=driver_opt, + mac_address=mac_address + ), + } + + url = self._url("/networks/{0}/connect", net_id) + res = self._post_json(url, data=data) + self._raise_for_status(res) + + @check_resource('container') + def disconnect_container_from_network(self, container, net_id, + force=False): + """ + Disconnect a container from a network. + + Args: + container (str): container ID or name to be disconnected from the + network + net_id (str): network ID + force (bool): Force the container to disconnect from a network. + Default: ``False`` + """ + data = {"Container": container} + if force: + if version_lt(self._version, '1.22'): + raise InvalidVersion( + 'Forced disconnect was introduced in API 1.22' + ) + data['Force'] = force + url = self._url("/networks/{0}/disconnect", net_id) + res = self._post_json(url, data=data) + self._raise_for_status(res) diff --git a/docker/api/plugin.py b/docker/api/plugin.py new file mode 100644 index 0000000000..10210c1a23 --- /dev/null +++ b/docker/api/plugin.py @@ -0,0 +1,261 @@ +from .. import auth, utils + + +class PluginApiMixin: + @utils.minimum_version('1.25') + @utils.check_resource('name') + def configure_plugin(self, name, options): + """ + Configure a plugin. + + Args: + name (string): The name of the plugin. The ``:latest`` tag is + optional, and is the default if omitted. + options (dict): A key-value mapping of options + + Returns: + ``True`` if successful + """ + url = self._url('/plugins/{0}/set', name) + data = options + if isinstance(data, dict): + data = [f'{k}={v}' for k, v in data.items()] + res = self._post_json(url, data=data) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.25') + def create_plugin(self, name, plugin_data_dir, gzip=False): + """ + Create a new plugin. + + Args: + name (string): The name of the plugin. The ``:latest`` tag is + optional, and is the default if omitted. + plugin_data_dir (string): Path to the plugin data directory. + Plugin data directory must contain the ``config.json`` + manifest file and the ``rootfs`` directory. + gzip (bool): Compress the context using gzip. Default: False + + Returns: + ``True`` if successful + """ + url = self._url('/plugins/create') + + with utils.create_archive( + root=plugin_data_dir, gzip=gzip, + files=set(utils.build.walk(plugin_data_dir, [])) + ) as archv: + res = self._post(url, params={'name': name}, data=archv) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.25') + def disable_plugin(self, name, force=False): + """ + Disable an installed plugin. + + Args: + name (string): The name of the plugin. The ``:latest`` tag is + optional, and is the default if omitted. + force (bool): To enable the force query parameter. + + Returns: + ``True`` if successful + """ + url = self._url('/plugins/{0}/disable', name) + res = self._post(url, params={'force': force}) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.25') + def enable_plugin(self, name, timeout=0): + """ + Enable an installed plugin. + + Args: + name (string): The name of the plugin. The ``:latest`` tag is + optional, and is the default if omitted. + timeout (int): Operation timeout (in seconds). Default: 0 + + Returns: + ``True`` if successful + """ + url = self._url('/plugins/{0}/enable', name) + params = {'timeout': timeout} + res = self._post(url, params=params) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.25') + def inspect_plugin(self, name): + """ + Retrieve plugin metadata. + + Args: + name (string): The name of the plugin. The ``:latest`` tag is + optional, and is the default if omitted. + + Returns: + A dict containing plugin info + """ + url = self._url('/plugins/{0}/json', name) + return self._result(self._get(url), True) + + @utils.minimum_version('1.25') + def pull_plugin(self, remote, privileges, name=None): + """ + Pull and install a plugin. After the plugin is installed, it can be + enabled using :py:meth:`~enable_plugin`. + + Args: + remote (string): Remote reference for the plugin to install. + The ``:latest`` tag is optional, and is the default if + omitted. + privileges (:py:class:`list`): A list of privileges the user + consents to grant to the plugin. Can be retrieved using + :py:meth:`~plugin_privileges`. + name (string): Local name for the pulled plugin. The + ``:latest`` tag is optional, and is the default if omitted. + + Returns: + An iterable object streaming the decoded API logs + """ + url = self._url('/plugins/pull') + params = { + 'remote': remote, + } + if name: + params['name'] = name + + headers = {} + registry, repo_name = auth.resolve_repository_name(remote) + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + response = self._post_json( + url, params=params, headers=headers, data=privileges, + stream=True + ) + self._raise_for_status(response) + return self._stream_helper(response, decode=True) + + @utils.minimum_version('1.25') + def plugins(self): + """ + Retrieve a list of installed plugins. + + Returns: + A list of dicts, one per plugin + """ + url = self._url('/plugins') + return self._result(self._get(url), True) + + @utils.minimum_version('1.25') + def plugin_privileges(self, name): + """ + Retrieve list of privileges to be granted to a plugin. + + Args: + name (string): Name of the remote plugin to examine. The + ``:latest`` tag is optional, and is the default if omitted. + + Returns: + A list of dictionaries representing the plugin's + permissions + + """ + params = { + 'remote': name, + } + + headers = {} + registry, repo_name = auth.resolve_repository_name(name) + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + + url = self._url('/plugins/privileges') + return self._result( + self._get(url, params=params, headers=headers), True + ) + + @utils.minimum_version('1.25') + @utils.check_resource('name') + def push_plugin(self, name): + """ + Push a plugin to the registry. + + Args: + name (string): Name of the plugin to upload. The ``:latest`` + tag is optional, and is the default if omitted. + + Returns: + ``True`` if successful + """ + url = self._url('/plugins/{0}/pull', name) + + headers = {} + registry, repo_name = auth.resolve_repository_name(name) + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + res = self._post(url, headers=headers) + self._raise_for_status(res) + return self._stream_helper(res, decode=True) + + @utils.minimum_version('1.25') + @utils.check_resource('name') + def remove_plugin(self, name, force=False): + """ + Remove an installed plugin. + + Args: + name (string): Name of the plugin to remove. The ``:latest`` + tag is optional, and is the default if omitted. + force (bool): Disable the plugin before removing. This may + result in issues if the plugin is in use by a container. + + Returns: + ``True`` if successful + """ + url = self._url('/plugins/{0}', name) + res = self._delete(url, params={'force': force}) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.26') + @utils.check_resource('name') + def upgrade_plugin(self, name, remote, privileges): + """ + Upgrade an installed plugin. + + Args: + name (string): Name of the plugin to upgrade. The ``:latest`` + tag is optional and is the default if omitted. + remote (string): Remote reference to upgrade to. The + ``:latest`` tag is optional and is the default if omitted. + privileges (:py:class:`list`): A list of privileges the user + consents to grant to the plugin. Can be retrieved using + :py:meth:`~plugin_privileges`. + + Returns: + An iterable object streaming the decoded API logs + """ + + url = self._url('/plugins/{0}/upgrade', name) + params = { + 'remote': remote, + } + + headers = {} + registry, repo_name = auth.resolve_repository_name(remote) + header = auth.get_config_header(self, registry) + if header: + headers['X-Registry-Auth'] = header + response = self._post_json( + url, params=params, headers=headers, data=privileges, + stream=True + ) + self._raise_for_status(response) + return self._stream_helper(response, decode=True) diff --git a/docker/api/secret.py b/docker/api/secret.py new file mode 100644 index 0000000000..db1701bdc0 --- /dev/null +++ b/docker/api/secret.py @@ -0,0 +1,98 @@ +import base64 + +from .. import errors, utils + + +class SecretApiMixin: + @utils.minimum_version('1.25') + def create_secret(self, name, data, labels=None, driver=None): + """ + Create a secret + + Args: + name (string): Name of the secret + data (bytes): Secret data to be stored + labels (dict): A mapping of labels to assign to the secret + driver (DriverConfig): A custom driver configuration. If + unspecified, the default ``internal`` driver will be used + + Returns (dict): ID of the newly created secret + """ + if not isinstance(data, bytes): + data = data.encode('utf-8') + + data = base64.b64encode(data) + data = data.decode('ascii') + body = { + 'Data': data, + 'Name': name, + 'Labels': labels + } + + if driver is not None: + if utils.version_lt(self._version, '1.31'): + raise errors.InvalidVersion( + 'Secret driver is only available for API version > 1.31' + ) + + body['Driver'] = driver + + url = self._url('/secrets/create') + return self._result( + self._post_json(url, data=body), True + ) + + @utils.minimum_version('1.25') + @utils.check_resource('id') + def inspect_secret(self, id): + """ + Retrieve secret metadata + + Args: + id (string): Full ID of the secret to inspect + + Returns (dict): A dictionary of metadata + + Raises: + :py:class:`docker.errors.NotFound` + if no secret with that ID exists + """ + url = self._url('/secrets/{0}', id) + return self._result(self._get(url), True) + + @utils.minimum_version('1.25') + @utils.check_resource('id') + def remove_secret(self, id): + """ + Remove a secret + + Args: + id (string): Full ID of the secret to remove + + Returns (boolean): True if successful + + Raises: + :py:class:`docker.errors.NotFound` + if no secret with that ID exists + """ + url = self._url('/secrets/{0}', id) + res = self._delete(url) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.25') + def secrets(self, filters=None): + """ + List secrets + + Args: + filters (dict): A map of filters to process on the secrets + list. Available filters: ``names`` + + Returns (list): A list of secrets + """ + url = self._url('/secrets') + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + return self._result(self._get(url, params=params), True) diff --git a/docker/api/service.py b/docker/api/service.py new file mode 100644 index 0000000000..3aed065175 --- /dev/null +++ b/docker/api/service.py @@ -0,0 +1,486 @@ +from .. import auth, errors, utils +from ..types import ServiceMode + + +def _check_api_features(version, task_template, update_config, endpoint_spec, + rollback_config): + + def raise_version_error(param, min_version): + raise errors.InvalidVersion( + f'{param} is not supported in API version < {min_version}' + ) + + if update_config is not None: + if utils.version_lt(version, '1.25'): + if 'MaxFailureRatio' in update_config: + raise_version_error('UpdateConfig.max_failure_ratio', '1.25') + if 'Monitor' in update_config: + raise_version_error('UpdateConfig.monitor', '1.25') + + if utils.version_lt(version, '1.28'): + if update_config.get('FailureAction') == 'rollback': + raise_version_error( + 'UpdateConfig.failure_action rollback', '1.28' + ) + + if utils.version_lt(version, '1.29'): + if 'Order' in update_config: + raise_version_error('UpdateConfig.order', '1.29') + + if rollback_config is not None: + if utils.version_lt(version, '1.28'): + raise_version_error('rollback_config', '1.28') + + if utils.version_lt(version, '1.29'): + if 'Order' in update_config: + raise_version_error('RollbackConfig.order', '1.29') + + if endpoint_spec is not None: + if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec: + if any(p.get('PublishMode') for p in endpoint_spec['Ports']): + raise_version_error('EndpointSpec.Ports[].mode', '1.32') + + if task_template is not None: + if 'ForceUpdate' in task_template and utils.version_lt( + version, '1.25'): + raise_version_error('force_update', '1.25') + + if task_template.get('Placement'): + if utils.version_lt(version, '1.30'): + if task_template['Placement'].get('Platforms'): + raise_version_error('Placement.platforms', '1.30') + if utils.version_lt(version, '1.27'): + if task_template['Placement'].get('Preferences'): + raise_version_error('Placement.preferences', '1.27') + + if task_template.get('ContainerSpec'): + container_spec = task_template.get('ContainerSpec') + + if utils.version_lt(version, '1.25'): + if container_spec.get('TTY'): + raise_version_error('ContainerSpec.tty', '1.25') + if container_spec.get('Hostname') is not None: + raise_version_error('ContainerSpec.hostname', '1.25') + if container_spec.get('Hosts') is not None: + raise_version_error('ContainerSpec.hosts', '1.25') + if container_spec.get('Groups') is not None: + raise_version_error('ContainerSpec.groups', '1.25') + if container_spec.get('DNSConfig') is not None: + raise_version_error('ContainerSpec.dns_config', '1.25') + if container_spec.get('Healthcheck') is not None: + raise_version_error('ContainerSpec.healthcheck', '1.25') + + if utils.version_lt(version, '1.28'): + if container_spec.get('ReadOnly') is not None: + raise_version_error('ContainerSpec.dns_config', '1.28') + if container_spec.get('StopSignal') is not None: + raise_version_error('ContainerSpec.stop_signal', '1.28') + + if utils.version_lt(version, '1.30'): + if container_spec.get('Configs') is not None: + raise_version_error('ContainerSpec.configs', '1.30') + if container_spec.get('Privileges') is not None: + raise_version_error('ContainerSpec.privileges', '1.30') + + if utils.version_lt(version, '1.35'): + if container_spec.get('Isolation') is not None: + raise_version_error('ContainerSpec.isolation', '1.35') + + if utils.version_lt(version, '1.38'): + if container_spec.get('Init') is not None: + raise_version_error('ContainerSpec.init', '1.38') + + if task_template.get('Resources'): + if utils.version_lt(version, '1.32'): + if task_template['Resources'].get('GenericResources'): + raise_version_error('Resources.generic_resources', '1.32') + + +def _merge_task_template(current, override): + merged = current.copy() + if override is not None: + for ts_key, ts_value in override.items(): + if ts_key == 'ContainerSpec': + if 'ContainerSpec' not in merged: + merged['ContainerSpec'] = {} + for cs_key, cs_value in override['ContainerSpec'].items(): + if cs_value is not None: + merged['ContainerSpec'][cs_key] = cs_value + elif ts_value is not None: + merged[ts_key] = ts_value + return merged + + +class ServiceApiMixin: + @utils.minimum_version('1.24') + def create_service( + self, task_template, name=None, labels=None, mode=None, + update_config=None, networks=None, endpoint_config=None, + endpoint_spec=None, rollback_config=None + ): + """ + Create a service. + + Args: + task_template (TaskTemplate): Specification of the task to start as + part of the new service. + name (string): User-defined name for the service. Optional. + labels (dict): A map of labels to associate with the service. + Optional. + mode (ServiceMode): Scheduling mode for the service (replicated + or global). Defaults to replicated. + update_config (UpdateConfig): Specification for the update strategy + of the service. Default: ``None`` + rollback_config (RollbackConfig): Specification for the rollback + strategy of the service. Default: ``None`` + networks (:py:class:`list`): List of network names or IDs or + :py:class:`~docker.types.NetworkAttachmentConfig` to attach the + service to. Default: ``None``. + endpoint_spec (EndpointSpec): Properties that can be configured to + access and load balance a service. Default: ``None``. + + Returns: + A dictionary containing an ``ID`` key for the newly created + service. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + _check_api_features( + self._version, task_template, update_config, endpoint_spec, + rollback_config + ) + + url = self._url('/services/create') + headers = {} + image = task_template.get('ContainerSpec', {}).get('Image', None) + if image is None: + raise errors.DockerException( + 'Missing mandatory Image key in ContainerSpec' + ) + if mode and not isinstance(mode, dict): + mode = ServiceMode(mode) + + registry, repo_name = auth.resolve_repository_name(image) + auth_header = auth.get_config_header(self, registry) + if auth_header: + headers['X-Registry-Auth'] = auth_header + if utils.version_lt(self._version, '1.25'): + networks = networks or task_template.pop('Networks', None) + data = { + 'Name': name, + 'Labels': labels, + 'TaskTemplate': task_template, + 'Mode': mode, + 'Networks': utils.convert_service_networks(networks), + 'EndpointSpec': endpoint_spec + } + + if update_config is not None: + data['UpdateConfig'] = update_config + + if rollback_config is not None: + data['RollbackConfig'] = rollback_config + + return self._result( + self._post_json(url, data=data, headers=headers), True + ) + + @utils.minimum_version('1.24') + @utils.check_resource('service') + def inspect_service(self, service, insert_defaults=None): + """ + Return information about a service. + + Args: + service (str): Service name or ID. + insert_defaults (boolean): If true, default values will be merged + into the service inspect output. + + Returns: + (dict): A dictionary of the server-side representation of the + service, including all relevant properties. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/services/{0}', service) + params = {} + if insert_defaults is not None: + if utils.version_lt(self._version, '1.29'): + raise errors.InvalidVersion( + 'insert_defaults is not supported in API version < 1.29' + ) + params['insertDefaults'] = insert_defaults + + return self._result(self._get(url, params=params), True) + + @utils.minimum_version('1.24') + @utils.check_resource('task') + def inspect_task(self, task): + """ + Retrieve information about a task. + + Args: + task (str): Task ID + + Returns: + (dict): Information about the task. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/tasks/{0}', task) + return self._result(self._get(url), True) + + @utils.minimum_version('1.24') + @utils.check_resource('service') + def remove_service(self, service): + """ + Stop and remove a service. + + Args: + service (str): Service name or ID + + Returns: + ``True`` if successful. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + url = self._url('/services/{0}', service) + resp = self._delete(url) + self._raise_for_status(resp) + return True + + @utils.minimum_version('1.24') + def services(self, filters=None, status=None): + """ + List services. + + Args: + filters (dict): Filters to process on the nodes list. Valid + filters: ``id``, ``name`` , ``label`` and ``mode``. + Default: ``None``. + status (bool): Include the service task count of running and + desired tasks. Default: ``None``. + + Returns: + A list of dictionaries containing data about each service. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = { + 'filters': utils.convert_filters(filters) if filters else None + } + if status is not None: + if utils.version_lt(self._version, '1.41'): + raise errors.InvalidVersion( + 'status is not supported in API version < 1.41' + ) + params['status'] = status + url = self._url('/services') + return self._result(self._get(url, params=params), True) + + @utils.minimum_version('1.25') + @utils.check_resource('service') + def service_logs(self, service, details=False, follow=False, stdout=False, + stderr=False, since=0, timestamps=False, tail='all', + is_tty=None): + """ + Get log stream for a service. + Note: This endpoint works only for services with the ``json-file`` + or ``journald`` logging drivers. + + Args: + service (str): ID or name of the service + details (bool): Show extra details provided to logs. + Default: ``False`` + follow (bool): Keep connection open to read logs as they are + sent by the Engine. Default: ``False`` + stdout (bool): Return logs from ``stdout``. Default: ``False`` + stderr (bool): Return logs from ``stderr``. Default: ``False`` + since (int): UNIX timestamp for the logs staring point. + Default: 0 + timestamps (bool): Add timestamps to every log line. + tail (string or int): Number of log lines to be returned, + counting from the current end of the logs. Specify an + integer or ``'all'`` to output all log lines. + Default: ``all`` + is_tty (bool): Whether the service's :py:class:`ContainerSpec` + enables the TTY option. If omitted, the method will query + the Engine for the information, causing an additional + roundtrip. + + Returns (generator): Logs for the service. + """ + params = { + 'details': details, + 'follow': follow, + 'stdout': stdout, + 'stderr': stderr, + 'since': since, + 'timestamps': timestamps, + 'tail': tail + } + + url = self._url('/services/{0}/logs', service) + res = self._get(url, params=params, stream=True) + if is_tty is None: + is_tty = self.inspect_service( + service + )['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False) + return self._get_result_tty(True, res, is_tty) + + @utils.minimum_version('1.24') + def tasks(self, filters=None): + """ + Retrieve a list of tasks. + + Args: + filters (dict): A map of filters to process on the tasks list. + Valid filters: ``id``, ``name``, ``service``, ``node``, + ``label`` and ``desired-state``. + + Returns: + (:py:class:`list`): List of task dictionaries. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + params = { + 'filters': utils.convert_filters(filters) if filters else None + } + url = self._url('/tasks') + return self._result(self._get(url, params=params), True) + + @utils.minimum_version('1.24') + @utils.check_resource('service') + def update_service(self, service, version, task_template=None, name=None, + labels=None, mode=None, update_config=None, + networks=None, endpoint_config=None, + endpoint_spec=None, fetch_current_spec=False, + rollback_config=None): + """ + Update a service. + + Args: + service (string): A service identifier (either its name or service + ID). + version (int): The version number of the service object being + updated. This is required to avoid conflicting writes. + task_template (TaskTemplate): Specification of the updated task to + start as part of the service. + name (string): New name for the service. Optional. + labels (dict): A map of labels to associate with the service. + Optional. + mode (ServiceMode): Scheduling mode for the service (replicated + or global). Defaults to replicated. + update_config (UpdateConfig): Specification for the update strategy + of the service. Default: ``None``. + rollback_config (RollbackConfig): Specification for the rollback + strategy of the service. Default: ``None`` + networks (:py:class:`list`): List of network names or IDs or + :py:class:`~docker.types.NetworkAttachmentConfig` to attach the + service to. Default: ``None``. + endpoint_spec (EndpointSpec): Properties that can be configured to + access and load balance a service. Default: ``None``. + fetch_current_spec (boolean): Use the undefined settings from the + current specification of the service. Default: ``False`` + + Returns: + A dictionary containing a ``Warnings`` key. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + _check_api_features( + self._version, task_template, update_config, endpoint_spec, + rollback_config + ) + + if fetch_current_spec: + inspect_defaults = True + if utils.version_lt(self._version, '1.29'): + inspect_defaults = None + current = self.inspect_service( + service, insert_defaults=inspect_defaults + )['Spec'] + + else: + current = {} + + url = self._url('/services/{0}/update', service) + data = {} + headers = {} + + data['Name'] = current.get('Name') if name is None else name + + data['Labels'] = current.get('Labels') if labels is None else labels + + if mode is not None: + if not isinstance(mode, dict): + mode = ServiceMode(mode) + data['Mode'] = mode + else: + data['Mode'] = current.get('Mode') + + data['TaskTemplate'] = _merge_task_template( + current.get('TaskTemplate', {}), task_template + ) + + container_spec = data['TaskTemplate'].get('ContainerSpec', {}) + image = container_spec.get('Image', None) + if image is not None: + registry, repo_name = auth.resolve_repository_name(image) + auth_header = auth.get_config_header(self, registry) + if auth_header: + headers['X-Registry-Auth'] = auth_header + + if update_config is not None: + data['UpdateConfig'] = update_config + else: + data['UpdateConfig'] = current.get('UpdateConfig') + + if rollback_config is not None: + data['RollbackConfig'] = rollback_config + else: + data['RollbackConfig'] = current.get('RollbackConfig') + + if networks is not None: + converted_networks = utils.convert_service_networks(networks) + if utils.version_lt(self._version, '1.25'): + data['Networks'] = converted_networks + else: + data['TaskTemplate']['Networks'] = converted_networks + elif utils.version_lt(self._version, '1.25'): + data['Networks'] = current.get('Networks') + elif data['TaskTemplate'].get('Networks') is None: + current_task_template = current.get('TaskTemplate', {}) + current_networks = current_task_template.get('Networks') + if current_networks is None: + current_networks = current.get('Networks') + if current_networks is not None: + data['TaskTemplate']['Networks'] = current_networks + + if endpoint_spec is not None: + data['EndpointSpec'] = endpoint_spec + else: + data['EndpointSpec'] = current.get('EndpointSpec') + + resp = self._post_json( + url, data=data, params={'version': version}, headers=headers + ) + return self._result(resp, json=True) diff --git a/docker/api/swarm.py b/docker/api/swarm.py new file mode 100644 index 0000000000..d60d18b619 --- /dev/null +++ b/docker/api/swarm.py @@ -0,0 +1,462 @@ +import http.client as http_client +import logging + +from .. import errors, types, utils +from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE + +log = logging.getLogger(__name__) + + +class SwarmApiMixin: + + def create_swarm_spec(self, *args, **kwargs): + """ + Create a :py:class:`docker.types.SwarmSpec` instance that can be used + as the ``swarm_spec`` argument in + :py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`. + + Args: + task_history_retention_limit (int): Maximum number of tasks + history stored. + snapshot_interval (int): Number of logs entries between snapshot. + keep_old_snapshots (int): Number of snapshots to keep beyond the + current snapshot. + log_entries_for_slow_followers (int): Number of log entries to + keep around to sync up slow followers after a snapshot is + created. + heartbeat_tick (int): Amount of ticks (in seconds) between each + heartbeat. + election_tick (int): Amount of ticks (in seconds) needed without a + leader to trigger a new election. + dispatcher_heartbeat_period (int): The delay for an agent to send + a heartbeat to the dispatcher. + node_cert_expiry (int): Automatic expiry for nodes certificates. + external_cas (:py:class:`list`): Configuration for forwarding + signing requests to an external certificate authority. Use + a list of :py:class:`docker.types.SwarmExternalCA`. + name (string): Swarm's name + labels (dict): User-defined key/value metadata. + signing_ca_cert (str): The desired signing CA certificate for all + swarm node TLS leaf certificates, in PEM format. + signing_ca_key (str): The desired signing CA key for all swarm + node TLS leaf certificates, in PEM format. + ca_force_rotate (int): An integer whose purpose is to force swarm + to generate a new signing CA certificate and key, if none have + been specified. + autolock_managers (boolean): If set, generate a key and use it to + lock data stored on the managers. + log_driver (DriverConfig): The default log driver to use for tasks + created in the orchestrator. + + Returns: + :py:class:`docker.types.SwarmSpec` + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> spec = client.api.create_swarm_spec( + snapshot_interval=5000, log_entries_for_slow_followers=1200 + ) + >>> client.api.init_swarm( + advertise_addr='eth0', listen_addr='0.0.0.0:5000', + force_new_cluster=False, swarm_spec=spec + ) + """ + ext_ca = kwargs.pop('external_ca', None) + if ext_ca: + kwargs['external_cas'] = [ext_ca] + return types.SwarmSpec(self._version, *args, **kwargs) + + @utils.minimum_version('1.24') + def get_unlock_key(self): + """ + Get the unlock key for this Swarm manager. + + Returns: + A ``dict`` containing an ``UnlockKey`` member + """ + return self._result(self._get(self._url('/swarm/unlockkey')), True) + + @utils.minimum_version('1.24') + def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377', + force_new_cluster=False, swarm_spec=None, + default_addr_pool=None, subnet_size=None, + data_path_addr=None, data_path_port=None): + """ + Initialize a new Swarm using the current connected engine as the first + node. + + Args: + advertise_addr (string): Externally reachable address advertised + to other nodes. This can either be an address/port combination + in the form ``192.168.1.1:4567``, or an interface followed by a + port number, like ``eth0:4567``. If the port number is omitted, + the port number from the listen address is used. If + ``advertise_addr`` is not specified, it will be automatically + detected when possible. Default: None + listen_addr (string): Listen address used for inter-manager + communication, as well as determining the networking interface + used for the VXLAN Tunnel Endpoint (VTEP). This can either be + an address/port combination in the form ``192.168.1.1:4567``, + or an interface followed by a port number, like ``eth0:4567``. + If the port number is omitted, the default swarm listening port + is used. Default: '0.0.0.0:2377' + force_new_cluster (bool): Force creating a new Swarm, even if + already part of one. Default: False + swarm_spec (dict): Configuration settings of the new Swarm. Use + ``APIClient.create_swarm_spec`` to generate a valid + configuration. Default: None + default_addr_pool (list of strings): Default Address Pool specifies + default subnet pools for global scope networks. Each pool + should be specified as a CIDR block, like '10.0.0.0/8'. + Default: None + subnet_size (int): SubnetSize specifies the subnet size of the + networks created from the default subnet pool. Default: None + data_path_addr (string): Address or interface to use for data path + traffic. For example, 192.168.1.1, or an interface, like eth0. + data_path_port (int): Port number to use for data path traffic. + Acceptable port range is 1024 to 49151. If set to ``None`` or + 0, the default port 4789 will be used. Default: None + + Returns: + (str): The ID of the created node. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + url = self._url('/swarm/init') + if swarm_spec is not None and not isinstance(swarm_spec, dict): + raise TypeError('swarm_spec must be a dictionary') + + if default_addr_pool is not None: + if utils.version_lt(self._version, '1.39'): + raise errors.InvalidVersion( + 'Address pool is only available for API version >= 1.39' + ) + # subnet_size becomes 0 if not set with default_addr_pool + if subnet_size is None: + subnet_size = DEFAULT_SWARM_SUBNET_SIZE + + if subnet_size is not None: + if utils.version_lt(self._version, '1.39'): + raise errors.InvalidVersion( + 'Subnet size is only available for API version >= 1.39' + ) + # subnet_size is ignored if set without default_addr_pool + if default_addr_pool is None: + default_addr_pool = DEFAULT_SWARM_ADDR_POOL + + data = { + 'AdvertiseAddr': advertise_addr, + 'ListenAddr': listen_addr, + 'DefaultAddrPool': default_addr_pool, + 'SubnetSize': subnet_size, + 'ForceNewCluster': force_new_cluster, + 'Spec': swarm_spec, + } + + if data_path_addr is not None: + if utils.version_lt(self._version, '1.30'): + raise errors.InvalidVersion( + 'Data address path is only available for ' + 'API version >= 1.30' + ) + data['DataPathAddr'] = data_path_addr + + if data_path_port is not None: + if utils.version_lt(self._version, '1.40'): + raise errors.InvalidVersion( + 'Data path port is only available for ' + 'API version >= 1.40' + ) + data['DataPathPort'] = data_path_port + + response = self._post_json(url, data=data) + return self._result(response, json=True) + + @utils.minimum_version('1.24') + def inspect_swarm(self): + """ + Retrieve low-level information about the current swarm. + + Returns: + A dictionary containing data about the swarm. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/swarm') + return self._result(self._get(url), True) + + @utils.check_resource('node_id') + @utils.minimum_version('1.24') + def inspect_node(self, node_id): + """ + Retrieve low-level information about a swarm node + + Args: + node_id (string): ID of the node to be inspected. + + Returns: + A dictionary containing data about this node. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/nodes/{0}', node_id) + return self._result(self._get(url), True) + + @utils.minimum_version('1.24') + def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377', + advertise_addr=None, data_path_addr=None): + """ + Make this Engine join a swarm that has already been created. + + Args: + remote_addrs (:py:class:`list`): Addresses of one or more manager + nodes already participating in the Swarm to join. + join_token (string): Secret token for joining this Swarm. + listen_addr (string): Listen address used for inter-manager + communication if the node gets promoted to manager, as well as + determining the networking interface used for the VXLAN Tunnel + Endpoint (VTEP). Default: ``'0.0.0.0:2377`` + advertise_addr (string): Externally reachable address advertised + to other nodes. This can either be an address/port combination + in the form ``192.168.1.1:4567``, or an interface followed by a + port number, like ``eth0:4567``. If the port number is omitted, + the port number from the listen address is used. If + AdvertiseAddr is not specified, it will be automatically + detected when possible. Default: ``None`` + data_path_addr (string): Address or interface to use for data path + traffic. For example, 192.168.1.1, or an interface, like eth0. + + Returns: + ``True`` if the request went through. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + data = { + 'RemoteAddrs': remote_addrs, + 'ListenAddr': listen_addr, + 'JoinToken': join_token, + 'AdvertiseAddr': advertise_addr, + } + + if data_path_addr is not None: + if utils.version_lt(self._version, '1.30'): + raise errors.InvalidVersion( + 'Data address path is only available for ' + 'API version >= 1.30' + ) + data['DataPathAddr'] = data_path_addr + + url = self._url('/swarm/join') + response = self._post_json(url, data=data) + self._raise_for_status(response) + return True + + @utils.minimum_version('1.24') + def leave_swarm(self, force=False): + """ + Leave a swarm. + + Args: + force (bool): Leave the swarm even if this node is a manager. + Default: ``False`` + + Returns: + ``True`` if the request went through. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/swarm/leave') + response = self._post(url, params={'force': force}) + # Ignore "this node is not part of a swarm" error + if force and response.status_code == http_client.NOT_ACCEPTABLE: + return True + # FIXME: Temporary workaround for 1.13.0-rc bug + # https://github.com/docker/docker/issues/29192 + if force and response.status_code == http_client.SERVICE_UNAVAILABLE: + return True + self._raise_for_status(response) + return True + + @utils.minimum_version('1.24') + def nodes(self, filters=None): + """ + List swarm nodes. + + Args: + filters (dict): Filters to process on the nodes list. Valid + filters: ``id``, ``name``, ``membership`` and ``role``. + Default: ``None`` + + Returns: + A list of dictionaries containing data about each swarm node. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/nodes') + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + + return self._result(self._get(url, params=params), True) + + @utils.check_resource('node_id') + @utils.minimum_version('1.24') + def remove_node(self, node_id, force=False): + """ + Remove a node from the swarm. + + Args: + node_id (string): ID of the node to be removed. + force (bool): Force remove an active node. Default: `False` + + Raises: + :py:class:`docker.errors.NotFound` + If the node referenced doesn't exist in the swarm. + + :py:class:`docker.errors.APIError` + If the server returns an error. + Returns: + `True` if the request was successful. + """ + url = self._url('/nodes/{0}', node_id) + params = { + 'force': force + } + res = self._delete(url, params=params) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.24') + def unlock_swarm(self, key): + """ + Unlock a locked swarm. + + Args: + key (string): The unlock key as provided by + :py:meth:`get_unlock_key` + + Raises: + :py:class:`docker.errors.InvalidArgument` + If the key argument is in an incompatible format + + :py:class:`docker.errors.APIError` + If the server returns an error. + + Returns: + `True` if the request was successful. + + Example: + + >>> key = client.api.get_unlock_key() + >>> client.unlock_swarm(key) + + """ + if isinstance(key, dict): + if 'UnlockKey' not in key: + raise errors.InvalidArgument('Invalid unlock key format') + else: + key = {'UnlockKey': key} + + url = self._url('/swarm/unlock') + res = self._post_json(url, data=key) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.24') + def update_node(self, node_id, version, node_spec=None): + """ + Update the node's configuration + + Args: + + node_id (string): ID of the node to be updated. + version (int): The version number of the node object being + updated. This is required to avoid conflicting writes. + node_spec (dict): Configuration settings to update. Any values + not provided will be removed. Default: ``None`` + + Returns: + `True` if the request went through. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> node_spec = {'Availability': 'active', + 'Name': 'node-name', + 'Role': 'manager', + 'Labels': {'foo': 'bar'} + } + >>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8, + node_spec=node_spec) + + """ + url = self._url('/nodes/{0}/update?version={1}', node_id, str(version)) + res = self._post_json(url, data=node_spec) + self._raise_for_status(res) + return True + + @utils.minimum_version('1.24') + def update_swarm(self, version, swarm_spec=None, + rotate_worker_token=False, + rotate_manager_token=False, + rotate_manager_unlock_key=False): + """ + Update the Swarm's configuration + + Args: + version (int): The version number of the swarm object being + updated. This is required to avoid conflicting writes. + swarm_spec (dict): Configuration settings to update. Use + :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to + generate a valid configuration. Default: ``None``. + rotate_worker_token (bool): Rotate the worker join token. Default: + ``False``. + rotate_manager_token (bool): Rotate the manager join token. + Default: ``False``. + rotate_manager_unlock_key (bool): Rotate the manager unlock key. + Default: ``False``. + + Returns: + ``True`` if the request went through. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + url = self._url('/swarm/update') + params = { + 'rotateWorkerToken': rotate_worker_token, + 'rotateManagerToken': rotate_manager_token, + 'version': version + } + if rotate_manager_unlock_key: + if utils.version_lt(self._version, '1.25'): + raise errors.InvalidVersion( + 'Rotate manager unlock key ' + 'is only available for API version >= 1.25' + ) + params['rotateManagerUnlockKey'] = rotate_manager_unlock_key + + response = self._post_json(url, data=swarm_spec, params=params) + self._raise_for_status(response) + return True diff --git a/docker/api/volume.py b/docker/api/volume.py new file mode 100644 index 0000000000..c6c036fad0 --- /dev/null +++ b/docker/api/volume.py @@ -0,0 +1,163 @@ +from .. import errors, utils + + +class VolumeApiMixin: + def volumes(self, filters=None): + """ + List volumes currently registered by the docker daemon. Similar to the + ``docker volume ls`` command. + + Args: + filters (dict): Server-side list filtering options. + + Returns: + (dict): Dictionary with list of volume objects as value of the + ``Volumes`` key. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.api.volumes() + {u'Volumes': [{u'Driver': u'local', + u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', + u'Name': u'foobar'}, + {u'Driver': u'local', + u'Mountpoint': u'/var/lib/docker/volumes/baz/_data', + u'Name': u'baz'}]} + """ + + params = { + 'filters': utils.convert_filters(filters) if filters else None + } + url = self._url('/volumes') + return self._result(self._get(url, params=params), True) + + def create_volume(self, name=None, driver=None, driver_opts=None, + labels=None): + """ + Create and register a named volume + + Args: + name (str): Name of the volume + driver (str): Name of the driver used to create the volume + driver_opts (dict): Driver options as a key-value dictionary + labels (dict): Labels to set on the volume + + Returns: + (dict): The created volume reference object + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> volume = client.api.create_volume( + ... name='foobar', + ... driver='local', + ... driver_opts={'foo': 'bar', 'baz': 'false'}, + ... labels={"key": "value"}, + ... ) + ... print(volume) + {u'Driver': u'local', + u'Labels': {u'key': u'value'}, + u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', + u'Name': u'foobar', + u'Scope': u'local'} + + """ + url = self._url('/volumes/create') + if driver_opts is not None and not isinstance(driver_opts, dict): + raise TypeError('driver_opts must be a dictionary') + + data = { + 'Name': name, + 'Driver': driver, + 'DriverOpts': driver_opts, + } + + if labels is not None: + if utils.compare_version('1.23', self._version) < 0: + raise errors.InvalidVersion( + 'volume labels were introduced in API 1.23' + ) + if not isinstance(labels, dict): + raise TypeError('labels must be a dictionary') + data["Labels"] = labels + + return self._result(self._post_json(url, data=data), True) + + def inspect_volume(self, name): + """ + Retrieve volume info by name. + + Args: + name (str): volume name + + Returns: + (dict): Volume information dictionary + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.api.inspect_volume('foobar') + {u'Driver': u'local', + u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', + u'Name': u'foobar'} + + """ + url = self._url('/volumes/{0}', name) + return self._result(self._get(url), True) + + @utils.minimum_version('1.25') + def prune_volumes(self, filters=None): + """ + Delete unused volumes + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted volume names and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + params = {} + if filters: + params['filters'] = utils.convert_filters(filters) + url = self._url('/volumes/prune') + return self._result(self._post(url, params=params), True) + + def remove_volume(self, name, force=False): + """ + Remove a volume. Similar to the ``docker volume rm`` command. + + Args: + name (str): The volume's name + force (bool): Force removal of volumes that were already removed + out of band by the volume driver plugin. + + Raises: + :py:class:`docker.errors.APIError` + If volume failed to remove. + """ + params = {} + if force: + if utils.version_lt(self._version, '1.25'): + raise errors.InvalidVersion( + 'force removal was introduced in API 1.25' + ) + params = {'force': force} + + url = self._url('/volumes/{0}', name, params=params) + resp = self._delete(url) + self._raise_for_status(resp) diff --git a/docker/auth.py b/docker/auth.py new file mode 100644 index 0000000000..96a6e3a656 --- /dev/null +++ b/docker/auth.py @@ -0,0 +1,378 @@ +import base64 +import json +import logging + +from . import credentials, errors +from .utils import config + +INDEX_NAME = 'docker.io' +INDEX_URL = f'https://index.{INDEX_NAME}/v1/' +TOKEN_USERNAME = '' + +log = logging.getLogger(__name__) + + +def resolve_repository_name(repo_name): + if '://' in repo_name: + raise errors.InvalidRepository( + f'Repository name cannot contain a scheme ({repo_name})' + ) + + index_name, remote_name = split_repo_name(repo_name) + if index_name[0] == '-' or index_name[-1] == '-': + raise errors.InvalidRepository( + f'Invalid index name ({index_name}). ' + 'Cannot begin or end with a hyphen.' + ) + return resolve_index_name(index_name), remote_name + + +def resolve_index_name(index_name): + index_name = convert_to_hostname(index_name) + if index_name == f"index.{INDEX_NAME}": + index_name = INDEX_NAME + return index_name + + +def get_config_header(client, registry): + log.debug('Looking for auth config') + if not client._auth_configs or client._auth_configs.is_empty: + log.debug( + "No auth config in memory - loading from filesystem" + ) + client._auth_configs = load_config(credstore_env=client.credstore_env) + authcfg = resolve_authconfig( + client._auth_configs, registry, credstore_env=client.credstore_env + ) + # Do not fail here if no authentication exists for this + # specific registry as we can have a readonly pull. Just + # put the header if we can. + if authcfg: + log.debug('Found auth config') + # auth_config needs to be a dict in the format used by + # auth.py username , password, serveraddress, email + return encode_header(authcfg) + log.debug('No auth config found') + return None + + +def split_repo_name(repo_name): + parts = repo_name.split('/', 1) + if len(parts) == 1 or ( + '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost' + ): + # This is a docker index repo (ex: username/foobar or ubuntu) + return INDEX_NAME, repo_name + return tuple(parts) + + +def get_credential_store(authconfig, registry): + if not isinstance(authconfig, AuthConfig): + authconfig = AuthConfig(authconfig) + return authconfig.get_credential_store(registry) + + +class AuthConfig(dict): + def __init__(self, dct, credstore_env=None): + if 'auths' not in dct: + dct['auths'] = {} + self.update(dct) + self._credstore_env = credstore_env + self._stores = {} + + @classmethod + def parse_auth(cls, entries, raise_on_error=False): + """ + Parses authentication entries + + Args: + entries: Dict of authentication entries. + raise_on_error: If set to true, an invalid format will raise + InvalidConfigFile + + Returns: + Authentication registry. + """ + + conf = {} + for registry, entry in entries.items(): + if not isinstance(entry, dict): + log.debug( + f'Config entry for key {registry} is not auth config' + ) + # We sometimes fall back to parsing the whole config as if it + # was the auth config by itself, for legacy purposes. In that + # case, we fail silently and return an empty conf if any of the + # keys is not formatted properly. + if raise_on_error: + raise errors.InvalidConfigFile( + f'Invalid configuration for registry {registry}' + ) + return {} + if 'identitytoken' in entry: + log.debug(f'Found an IdentityToken entry for registry {registry}') + conf[registry] = { + 'IdentityToken': entry['identitytoken'] + } + continue # Other values are irrelevant if we have a token + + if 'auth' not in entry: + # Starting with engine v1.11 (API 1.23), an empty dictionary is + # a valid value in the auths config. + # https://github.com/docker/compose/issues/3265 + log.debug( + f'Auth data for {registry} is absent. ' + f'Client might be using a credentials store instead.' + ) + conf[registry] = {} + continue + + username, password = decode_auth(entry['auth']) + log.debug( + f'Found entry (registry={registry!r}, username={username!r})' + ) + + conf[registry] = { + 'username': username, + 'password': password, + 'email': entry.get('email'), + 'serveraddress': registry, + } + return conf + + @classmethod + def load_config(cls, config_path, config_dict, credstore_env=None): + """ + Loads authentication data from a Docker configuration file in the given + root directory or if config_path is passed use given path. + Lookup priority: + explicit config_path parameter > DOCKER_CONFIG environment + variable > ~/.docker/config.json > ~/.dockercfg + """ + + if not config_dict: + config_file = config.find_config_file(config_path) + + if not config_file: + return cls({}, credstore_env) + try: + with open(config_file) as f: + config_dict = json.load(f) + except (OSError, KeyError, ValueError) as e: + # Likely missing new Docker config file or it's in an + # unknown format, continue to attempt to read old location + # and format. + log.debug(e) + return cls(_load_legacy_config(config_file), credstore_env) + + res = {} + if config_dict.get('auths'): + log.debug("Found 'auths' section") + res.update({ + 'auths': cls.parse_auth( + config_dict.pop('auths'), raise_on_error=True + ) + }) + if config_dict.get('credsStore'): + log.debug("Found 'credsStore' section") + res.update({'credsStore': config_dict.pop('credsStore')}) + if config_dict.get('credHelpers'): + log.debug("Found 'credHelpers' section") + res.update({'credHelpers': config_dict.pop('credHelpers')}) + if res: + return cls(res, credstore_env) + + log.debug( + "Couldn't find auth-related section ; attempting to interpret " + "as auth-only file" + ) + return cls({'auths': cls.parse_auth(config_dict)}, credstore_env) + + @property + def auths(self): + return self.get('auths', {}) + + @property + def creds_store(self): + return self.get('credsStore', None) + + @property + def cred_helpers(self): + return self.get('credHelpers', {}) + + @property + def is_empty(self): + return ( + not self.auths and not self.creds_store and not self.cred_helpers + ) + + def resolve_authconfig(self, registry=None): + """ + Returns the authentication data from the given auth configuration for a + specific registry. As with the Docker client, legacy entries in the + config with full URLs are stripped down to hostnames before checking + for a match. Returns None if no match was found. + """ + + if self.creds_store or self.cred_helpers: + store_name = self.get_credential_store(registry) + if store_name is not None: + log.debug( + f'Using credentials store "{store_name}"' + ) + cfg = self._resolve_authconfig_credstore(registry, store_name) + if cfg is not None: + return cfg + log.debug('No entry in credstore - fetching from auth dict') + + # Default to the public index server + registry = resolve_index_name(registry) if registry else INDEX_NAME + log.debug(f"Looking for auth entry for {repr(registry)}") + + if registry in self.auths: + log.debug(f"Found {repr(registry)}") + return self.auths[registry] + + for key, conf in self.auths.items(): + if resolve_index_name(key) == registry: + log.debug(f"Found {repr(key)}") + return conf + + log.debug("No entry found") + return None + + def _resolve_authconfig_credstore(self, registry, credstore_name): + if not registry or registry == INDEX_NAME: + # The ecosystem is a little schizophrenic with index.docker.io VS + # docker.io - in that case, it seems the full URL is necessary. + registry = INDEX_URL + log.debug(f"Looking for auth entry for {repr(registry)}") + store = self._get_store_instance(credstore_name) + try: + data = store.get(registry) + res = { + 'ServerAddress': registry, + } + if data['Username'] == TOKEN_USERNAME: + res['IdentityToken'] = data['Secret'] + else: + res.update({ + 'Username': data['Username'], + 'Password': data['Secret'], + }) + return res + except credentials.CredentialsNotFound: + log.debug('No entry found') + return None + except credentials.StoreError as e: + raise errors.DockerException( + f'Credentials store error: {repr(e)}' + ) from e + + def _get_store_instance(self, name): + if name not in self._stores: + self._stores[name] = credentials.Store( + name, environment=self._credstore_env + ) + return self._stores[name] + + def get_credential_store(self, registry): + if not registry or registry == INDEX_NAME: + registry = INDEX_URL + + return self.cred_helpers.get(registry) or self.creds_store + + def get_all_credentials(self): + auth_data = self.auths.copy() + if self.creds_store: + # Retrieve all credentials from the default store + store = self._get_store_instance(self.creds_store) + for k in store.list().keys(): + auth_data[k] = self._resolve_authconfig_credstore( + k, self.creds_store + ) + auth_data[convert_to_hostname(k)] = auth_data[k] + + # credHelpers entries take priority over all others + for reg, store_name in self.cred_helpers.items(): + auth_data[reg] = self._resolve_authconfig_credstore( + reg, store_name + ) + auth_data[convert_to_hostname(reg)] = auth_data[reg] + + return auth_data + + def add_auth(self, reg, data): + self['auths'][reg] = data + + +def resolve_authconfig(authconfig, registry=None, credstore_env=None): + if not isinstance(authconfig, AuthConfig): + authconfig = AuthConfig(authconfig, credstore_env) + return authconfig.resolve_authconfig(registry) + + +def convert_to_hostname(url): + return url.replace('http://', '').replace('https://', '').split('/', 1)[0] + + +def decode_auth(auth): + if isinstance(auth, str): + auth = auth.encode('ascii') + s = base64.b64decode(auth) + login, pwd = s.split(b':', 1) + return login.decode('utf8'), pwd.decode('utf8') + + +def encode_header(auth): + auth_json = json.dumps(auth).encode('ascii') + return base64.urlsafe_b64encode(auth_json) + + +def parse_auth(entries, raise_on_error=False): + """ + Parses authentication entries + + Args: + entries: Dict of authentication entries. + raise_on_error: If set to true, an invalid format will raise + InvalidConfigFile + + Returns: + Authentication registry. + """ + + return AuthConfig.parse_auth(entries, raise_on_error) + + +def load_config(config_path=None, config_dict=None, credstore_env=None): + return AuthConfig.load_config(config_path, config_dict, credstore_env) + + +def _load_legacy_config(config_file): + log.debug("Attempting to parse legacy auth file format") + try: + data = [] + with open(config_file) as f: + for line in f.readlines(): + data.append(line.strip().split(' = ')[1]) + if len(data) < 2: + # Not enough data + raise errors.InvalidConfigFile( + 'Invalid or empty configuration file!' + ) + + username, password = decode_auth(data[0]) + return {'auths': { + INDEX_NAME: { + 'username': username, + 'password': password, + 'email': data[1], + 'serveraddress': INDEX_URL, + } + }} + except Exception as e: + log.debug(e) + + log.debug("All parsing attempts failed - returning empty config") + return {} diff --git a/docker/auth/__init__.py b/docker/auth/__init__.py deleted file mode 100644 index d068b7fada..0000000000 --- a/docker/auth/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .auth import ( - INDEX_URL, - encode_header, - load_config, - resolve_authconfig, - resolve_repository_name, -) # flake8: noqa \ No newline at end of file diff --git a/docker/auth/auth.py b/docker/auth/auth.py deleted file mode 100644 index 1c29615546..0000000000 --- a/docker/auth/auth.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import fileinput -import json -import os - -import six - -from ..utils import utils -from .. import errors - -INDEX_URL = 'https://index.docker.io/v1/' -DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json') -LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg' - - -def expand_registry_url(hostname, insecure=False): - if hostname.startswith('http:') or hostname.startswith('https:'): - return hostname - if utils.ping_registry('https://' + hostname): - return 'https://' + hostname - elif insecure: - return 'http://' + hostname - else: - raise errors.DockerException( - "HTTPS endpoint unresponsive and insecure mode isn't enabled." - ) - - -def resolve_repository_name(repo_name, insecure=False): - if '://' in repo_name: - raise errors.InvalidRepository( - 'Repository name cannot contain a scheme ({0})'.format(repo_name)) - parts = repo_name.split('/', 1) - if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost': - # This is a docker index repo (ex: foo/bar or ubuntu) - return INDEX_URL, repo_name - if len(parts) < 2: - raise errors.InvalidRepository( - 'Invalid repository name ({0})'.format(repo_name)) - - if 'index.docker.io' in parts[0]: - raise errors.InvalidRepository( - 'Invalid repository name, try "{0}" instead'.format(parts[1]) - ) - - return expand_registry_url(parts[0], insecure), parts[1] - - -def resolve_authconfig(authconfig, registry=None): - """ - Returns the authentication data from the given auth configuration for a - specific registry. As with the Docker client, legacy entries in the config - with full URLs are stripped down to hostnames before checking for a match. - Returns None if no match was found. - """ - # Default to the public index server - registry = convert_to_hostname(registry) if registry else INDEX_URL - - if registry in authconfig: - return authconfig[registry] - - for key, config in six.iteritems(authconfig): - if convert_to_hostname(key) == registry: - return config - - return None - - -def convert_to_hostname(url): - return url.replace('http://', '').replace('https://', '').split('/', 1)[0] - - -def encode_auth(auth_info): - return base64.b64encode(auth_info.get('username', '') + b':' + - auth_info.get('password', '')) - - -def decode_auth(auth): - if isinstance(auth, six.string_types): - auth = auth.encode('ascii') - s = base64.b64decode(auth) - login, pwd = s.split(b':', 1) - return login.decode('ascii'), pwd.decode('ascii') - - -def encode_header(auth): - auth_json = json.dumps(auth).encode('ascii') - return base64.b64encode(auth_json) - - -def encode_full_header(auth): - """ Returns the given auth block encoded for the X-Registry-Config header. - """ - return encode_header({'configs': auth}) - - -def parse_auth(entries): - """ - Parses authentication entries - - Args: - entries: Dict of authentication entries. - - Returns: - Authentication registry. - """ - - conf = {} - for registry, entry in six.iteritems(entries): - username, password = decode_auth(entry['auth']) - conf[registry] = { - 'username': username, - 'password': password, - 'email': entry['email'], - 'serveraddress': registry, - } - return conf - - -def load_config(config_path=None): - """ - Loads authentication data from a Docker configuration file in the given - root directory or if config_path is passed use given path. - """ - conf = {} - data = None - - # Prefer ~/.docker/config.json. - config_file = config_path or os.path.join(os.path.expanduser('~'), - DOCKER_CONFIG_FILENAME) - - if os.path.exists(config_file): - try: - with open(config_file) as f: - for section, data in six.iteritems(json.load(f)): - if section != 'auths': - continue - return parse_auth(data) - except (IOError, KeyError, ValueError): - # Likely missing new Docker config file or it's in an - # unknown format, continue to attempt to read old location - # and format. - pass - - config_file = config_path or os.path.join(os.path.expanduser('~'), - LEGACY_DOCKER_CONFIG_FILENAME) - - # if config path doesn't exist return empty config - if not os.path.exists(config_file): - return {} - - # Try reading legacy location as JSON. - try: - with open(config_file) as f: - return parse_auth(json.load(f)) - except: - pass - - # If that fails, we assume the configuration file contains a single - # authentication token for the public registry in the following format: - # - # auth = AUTH_TOKEN - # email = email@domain.com - try: - data = [] - for line in fileinput.input(config_file): - data.append(line.strip().split(' = ')[1]) - if len(data) < 2: - # Not enough data - raise errors.InvalidConfigFile( - 'Invalid or empty configuration file!') - - username, password = decode_auth(data[0]) - conf[INDEX_URL] = { - 'username': username, - 'password': password, - 'email': data[1], - 'serveraddress': INDEX_URL, - } - return conf - except: - pass - - # If all fails, return an empty config - return {} diff --git a/docker/client.py b/docker/client.py index bb12e000ac..9012d24c9c 100644 --- a/docker/client.py +++ b/docker/client.py @@ -1,894 +1,222 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -import shlex -import warnings -from datetime import datetime - -import six - -from . import clientbase -from . import constants -from . import errors -from .auth import auth -from .utils import utils, check_resource - - -class Client(clientbase.ClientBase): - @check_resource - def attach(self, container, stdout=True, stderr=True, - stream=False, logs=False): - params = { - 'logs': logs and 1 or 0, - 'stdout': stdout and 1 or 0, - 'stderr': stderr and 1 or 0, - 'stream': stream and 1 or 0, - } - u = self._url("/containers/{0}/attach".format(container)) - response = self._post(u, params=params, stream=stream) - - # Stream multi-plexing was only introduced in API v1.6. Anything before - # that needs old-style streaming. - if utils.compare_version('1.6', self._version) < 0: - def stream_result(): - self._raise_for_status(response) - for line in response.iter_lines(chunk_size=1, - decode_unicode=True): - # filter out keep-alive new lines - if line: - yield line - - return stream_result() if stream else \ - self._result(response, binary=True) - - sep = bytes() if six.PY3 else str() - - if stream: - return self._multiplexed_response_stream_helper(response) - else: - return sep.join( - [x for x in self._multiplexed_buffer_helper(response)] - ) - - @check_resource - def attach_socket(self, container, params=None, ws=False): - if params is None: - params = { - 'stdout': 1, - 'stderr': 1, - 'stream': 1 - } - - if ws: - return self._attach_websocket(container, params) - - u = self._url("/containers/{0}/attach".format(container)) - return self._get_raw_response_socket(self.post( - u, None, params=self._attach_params(params), stream=True)) - - def build(self, path=None, tag=None, quiet=False, fileobj=None, - nocache=False, rm=False, stream=False, timeout=None, - custom_context=False, encoding=None, pull=False, - forcerm=False, dockerfile=None, container_limits=None, - decode=False): - remote = context = headers = None - container_limits = container_limits or {} - if path is None and fileobj is None: - raise TypeError("Either path or fileobj needs to be provided.") - - for key in container_limits.keys(): - if key not in constants.CONTAINER_LIMITS_KEYS: - raise errors.DockerException( - 'Invalid container_limits key {0}'.format(key) - ) - - if custom_context: - if not fileobj: - raise TypeError("You must specify fileobj with custom_context") - context = fileobj - elif fileobj is not None: - context = utils.mkbuildcontext(fileobj) - elif path.startswith(('http://', 'https://', - 'git://', 'github.com/', 'git@')): - remote = path - elif not os.path.isdir(path): - raise TypeError("You must specify a directory to build in path") - else: - dockerignore = os.path.join(path, '.dockerignore') - exclude = None - if os.path.exists(dockerignore): - with open(dockerignore, 'r') as f: - exclude = list(filter(bool, f.read().splitlines())) - # These are handled by the docker daemon and should not be - # excluded on the client - if 'Dockerfile' in exclude: - exclude.remove('Dockerfile') - if '.dockerignore' in exclude: - exclude.remove(".dockerignore") - context = utils.tar(path, exclude=exclude) - - if utils.compare_version('1.8', self._version) >= 0: - stream = True - - if dockerfile and utils.compare_version('1.17', self._version) < 0: - raise errors.InvalidVersion( - 'dockerfile was only introduced in API version 1.17' - ) - - if utils.compare_version('1.19', self._version) < 0: - pull = 1 if pull else 0 - - u = self._url('/build') - params = { - 't': tag, - 'remote': remote, - 'q': quiet, - 'nocache': nocache, - 'rm': rm, - 'forcerm': forcerm, - 'pull': pull, - 'dockerfile': dockerfile, - } - params.update(container_limits) - - if context is not None: - headers = {'Content-Type': 'application/tar'} - if encoding: - headers['Content-Encoding'] = encoding - - if utils.compare_version('1.9', self._version) >= 0: - # If we don't have any auth data so far, try reloading the config - # file one more time in case anything showed up in there. - if not self._auth_configs: - self._auth_configs = auth.load_config() - - # Send the full auth configuration (if any exists), since the build - # could use any (or all) of the registries. - if self._auth_configs: - if headers is None: - headers = {} - headers['X-Registry-Config'] = auth.encode_full_header( - self._auth_configs - ) - - response = self._post( - u, - data=context, - params=params, - headers=headers, - stream=stream, - timeout=timeout, - ) +from .api.client import APIClient +from .constants import DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS +from .models.configs import ConfigCollection +from .models.containers import ContainerCollection +from .models.images import ImageCollection +from .models.networks import NetworkCollection +from .models.nodes import NodeCollection +from .models.plugins import PluginCollection +from .models.secrets import SecretCollection +from .models.services import ServiceCollection +from .models.swarm import Swarm +from .models.volumes import VolumeCollection +from .utils import kwargs_from_env + + +class DockerClient: + """ + A client for communicating with a Docker server. + + Example: + + >>> import docker + >>> client = docker.DockerClient(base_url='unix://var/run/docker.sock') + + Args: + base_url (str): URL to the Docker server. For example, + ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``. + version (str): The version of the API to use. Set to ``auto`` to + automatically detect the server's version. Default: ``1.35`` + timeout (int): Default timeout for API calls, in seconds. + tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass + ``True`` to enable it with default options, or pass a + :py:class:`~docker.tls.TLSConfig` object to use custom + configuration. + user_agent (str): Set a custom user agent for requests to the server. + credstore_env (dict): Override environment variables when calling the + credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is made + via shelling out to the ssh client. Ensure the ssh client is + installed and configured on the host. + max_pool_size (int): The maximum number of connections + to save in the pool. + """ + def __init__(self, *args, **kwargs): + self.api = APIClient(*args, **kwargs) - if context is not None and not custom_context: - context.close() - - if stream: - return self._stream_helper(response, decode=decode) - else: - output = self._result(response) - srch = r'Successfully built ([0-9a-f]+)' - match = re.search(srch, output) - if not match: - return None, output - return match.group(1), output - - @check_resource - def commit(self, container, repository=None, tag=None, message=None, - author=None, conf=None): - params = { - 'container': container, - 'repo': repository, - 'tag': tag, - 'comment': message, - 'author': author - } - u = self._url("/commit") - return self._result(self._post_json(u, data=conf, params=params), - json=True) - - def containers(self, quiet=False, all=False, trunc=False, latest=False, - since=None, before=None, limit=-1, size=False, - filters=None): - params = { - 'limit': 1 if latest else limit, - 'all': 1 if all else 0, - 'size': 1 if size else 0, - 'trunc_cmd': 1 if trunc else 0, - 'since': since, - 'before': before - } - if filters: - params['filters'] = utils.convert_filters(filters) - u = self._url("/containers/json") - res = self._result(self._get(u, params=params), True) - - if quiet: - return [{'Id': x['Id']} for x in res] - if trunc: - for x in res: - x['Id'] = x['Id'][:12] - return res - - @check_resource - def copy(self, container, resource): - res = self._post_json( - self._url("/containers/{0}/copy".format(container)), - data={"Resource": resource}, - stream=True - ) - self._raise_for_status(res) - return res.raw - - def create_container(self, image, command=None, hostname=None, user=None, - detach=False, stdin_open=False, tty=False, - mem_limit=None, ports=None, environment=None, - dns=None, volumes=None, volumes_from=None, - network_disabled=False, name=None, entrypoint=None, - cpu_shares=None, working_dir=None, domainname=None, - memswap_limit=None, cpuset=None, host_config=None, - mac_address=None, labels=None, volume_driver=None): - - if isinstance(volumes, six.string_types): - volumes = [volumes, ] - - if host_config and utils.compare_version('1.15', self._version) < 0: - raise errors.InvalidVersion( - 'host_config is not supported in API < 1.15' - ) - - config = utils.create_container_config( - self._version, image, command, hostname, user, detach, stdin_open, - tty, mem_limit, ports, environment, dns, volumes, volumes_from, - network_disabled, entrypoint, cpu_shares, working_dir, domainname, - memswap_limit, cpuset, host_config, mac_address, labels, - volume_driver - ) - return self.create_container_from_config(config, name) - - def create_container_from_config(self, config, name=None): - u = self._url("/containers/create") - params = { - 'name': name - } - res = self._post_json(u, data=config, params=params) - return self._result(res, True) - - @check_resource - def diff(self, container): - return self._result(self._get(self._url("/containers/{0}/changes". - format(container))), True) - - def events(self, since=None, until=None, filters=None, decode=None): - if isinstance(since, datetime): - since = utils.datetime_to_timestamp(since) - - if isinstance(until, datetime): - until = utils.datetime_to_timestamp(until) - - if filters: - filters = utils.convert_filters(filters) - - params = { - 'since': since, - 'until': until, - 'filters': filters - } - - return self._stream_helper(self.get(self._url('/events'), - params=params, stream=True), - decode=decode) - - @check_resource - def execute(self, container, cmd, detach=False, stdout=True, stderr=True, - stream=False, tty=False): - warnings.warn( - 'Client.execute is being deprecated. Please use exec_create & ' - 'exec_start instead', DeprecationWarning - ) - create_res = self.exec_create( - container, cmd, stdout, stderr, tty - ) + @classmethod + def from_env(cls, **kwargs): + """ + Return a client configured from environment variables. - return self.exec_start(create_res, detach, tty, stream) - - def exec_create(self, container, cmd, stdout=True, stderr=True, tty=False, - privileged=False): - if utils.compare_version('1.15', self._version) < 0: - raise errors.InvalidVersion('Exec is not supported in API < 1.15') - if privileged and utils.compare_version('1.19', self._version) < 0: - raise errors.InvalidVersion( - 'Privileged exec is not supported in API < 1.19' - ) - if isinstance(cmd, six.string_types): - cmd = shlex.split(str(cmd)) - - data = { - 'Container': container, - 'User': '', - 'Privileged': privileged, - 'Tty': tty, - 'AttachStdin': False, - 'AttachStdout': stdout, - 'AttachStderr': stderr, - 'Cmd': cmd - } - - url = self._url('/containers/{0}/exec'.format(container)) - res = self._post_json(url, data=data) - return self._result(res, True) - - def exec_inspect(self, exec_id): - if utils.compare_version('1.15', self._version) < 0: - raise errors.InvalidVersion('Exec is not supported in API < 1.15') - if isinstance(exec_id, dict): - exec_id = exec_id.get('Id') - res = self._get(self._url("/exec/{0}/json".format(exec_id))) - return self._result(res, True) - - def exec_resize(self, exec_id, height=None, width=None): - if utils.compare_version('1.15', self._version) < 0: - raise errors.InvalidVersion('Exec is not supported in API < 1.15') - if isinstance(exec_id, dict): - exec_id = exec_id.get('Id') - - params = {'h': height, 'w': width} - url = self._url("/exec/{0}/resize".format(exec_id)) - res = self._post(url, params=params) - self._raise_for_status(res) - - def exec_start(self, exec_id, detach=False, tty=False, stream=False): - if utils.compare_version('1.15', self._version) < 0: - raise errors.InvalidVersion('Exec is not supported in API < 1.15') - if isinstance(exec_id, dict): - exec_id = exec_id.get('Id') - - data = { - 'Tty': tty, - 'Detach': detach - } - - res = self._post_json(self._url('/exec/{0}/start'.format(exec_id)), - data=data, stream=stream) - self._raise_for_status(res) - if stream: - return self._multiplexed_response_stream_helper(res) - elif six.PY3: - return bytes().join( - [x for x in self._multiplexed_buffer_helper(res)] - ) - else: - return str().join( - [x for x in self._multiplexed_buffer_helper(res)] - ) - - @check_resource - def export(self, container): - res = self._get(self._url("/containers/{0}/export".format(container)), - stream=True) - self._raise_for_status(res) - return res.raw - - @check_resource - def get_image(self, image): - res = self._get(self._url("/images/{0}/get".format(image)), - stream=True) - self._raise_for_status(res) - return res.raw - - @check_resource - def history(self, image): - res = self._get(self._url("/images/{0}/history".format(image))) - return self._result(res, True) - - def images(self, name=None, quiet=False, all=False, viz=False, - filters=None): - if viz: - if utils.compare_version('1.7', self._version) >= 0: - raise Exception('Viz output is not supported in API >= 1.7!') - return self._result(self._get(self._url("images/viz"))) - params = { - 'filter': name, - 'only_ids': 1 if quiet else 0, - 'all': 1 if all else 0, - } - if filters: - params['filters'] = utils.convert_filters(filters) - res = self._result(self._get(self._url("/images/json"), params=params), - True) - if quiet: - return [x['Id'] for x in res] - return res - - def import_image(self, src=None, repository=None, tag=None, image=None): - if src: - if isinstance(src, six.string_types): - try: - result = self.import_image_from_file( - src, repository=repository, tag=tag) - except IOError: - result = self.import_image_from_url( - src, repository=repository, tag=tag) - else: - result = self.import_image_from_data( - src, repository=repository, tag=tag) - elif image: - result = self.import_image_from_image( - image, repository=repository, tag=tag) - else: - raise Exception("Must specify a src or image") - - return result - - def import_image_from_data(self, data, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': '-', - 'repo': repository, - 'tag': tag - } - headers = { - 'Content-Type': 'application/tar', - } - return self._result( - self._post(u, data=data, params=params, headers=headers)) - - def import_image_from_file(self, filename, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': '-', - 'repo': repository, - 'tag': tag - } - headers = { - 'Content-Type': 'application/tar', - } - with open(filename, 'rb') as f: - return self._result( - self._post(u, data=f, params=params, headers=headers, - timeout=None)) - - def import_image_from_stream(self, stream, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': '-', - 'repo': repository, - 'tag': tag - } - headers = { - 'Content-Type': 'application/tar', - 'Transfer-Encoding': 'chunked', - } - return self._result( - self._post(u, data=stream, params=params, headers=headers)) - - def import_image_from_url(self, url, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromSrc': url, - 'repo': repository, - 'tag': tag - } - return self._result( - self._post(u, data=None, params=params)) - - def import_image_from_image(self, image, repository=None, tag=None): - u = self._url("/images/create") - params = { - 'fromImage': image, - 'repo': repository, - 'tag': tag - } - return self._result( - self._post(u, data=None, params=params)) - - def info(self): - return self._result(self._get(self._url("/info")), - True) - - @check_resource - def insert(self, image, url, path): - if utils.compare_version('1.12', self._version) >= 0: - raise errors.DeprecatedMethod( - 'insert is not available for API version >=1.12' - ) - api_url = self._url("/images/{0}/insert".format(image)) - params = { - 'url': url, - 'path': path - } - return self._result(self._post(api_url, params=params)) - - @check_resource - def inspect_container(self, container): - return self._result( - self._get(self._url("/containers/{0}/json".format(container))), - True) - - @check_resource - def inspect_image(self, image): - return self._result( - self._get( - self._url("/images/{0}/json".format(image.replace('/', '%2F'))) - ), - True - ) + The environment variables used are the same as those used by the + Docker command-line client. They are: - @check_resource - def kill(self, container, signal=None): - url = self._url("/containers/{0}/kill".format(container)) - params = {} - if signal is not None: - params['signal'] = signal - res = self._post(url, params=params) - - self._raise_for_status(res) - - def load_image(self, data): - res = self._post(self._url("/images/load"), data=data) - self._raise_for_status(res) - - def login(self, username, password=None, email=None, registry=None, - reauth=False, insecure_registry=False, dockercfg_path=None): - # If we don't have any auth data so far, try reloading the config file - # one more time in case anything showed up in there. - # If dockercfg_path is passed check to see if the config file exists, - # if so load that config. - if dockercfg_path and os.path.exists(dockercfg_path): - self._auth_configs = auth.load_config(dockercfg_path) - elif not self._auth_configs: - self._auth_configs = auth.load_config() - - registry = registry or auth.INDEX_URL - - authcfg = auth.resolve_authconfig(self._auth_configs, registry) - # If we found an existing auth config for this registry and username - # combination, we can return it immediately unless reauth is requested. - if authcfg and authcfg.get('username', None) == username \ - and not reauth: - return authcfg - - req_data = { - 'username': username, - 'password': password, - 'email': email, - 'serveraddress': registry, - } - - response = self._post_json(self._url('/auth'), data=req_data) - if response.status_code == 200: - self._auth_configs[registry] = req_data - return self._result(response, json=True) - - @check_resource - def logs(self, container, stdout=True, stderr=True, stream=False, - timestamps=False, tail='all'): - if utils.compare_version('1.11', self._version) >= 0: - params = {'stderr': stderr and 1 or 0, - 'stdout': stdout and 1 or 0, - 'timestamps': timestamps and 1 or 0, - 'follow': stream and 1 or 0, - } - if utils.compare_version('1.13', self._version) >= 0: - if tail != 'all' and (not isinstance(tail, int) or tail <= 0): - tail = 'all' - params['tail'] = tail - url = self._url("/containers/{0}/logs".format(container)) - res = self._get(url, params=params, stream=stream) - if stream: - return self._multiplexed_response_stream_helper(res) - elif six.PY3: - return bytes().join( - [x for x in self._multiplexed_buffer_helper(res)] - ) - else: - return str().join( - [x for x in self._multiplexed_buffer_helper(res)] - ) - return self.attach( - container, - stdout=stdout, - stderr=stderr, - stream=stream, - logs=True - ) + .. envvar:: DOCKER_HOST - @check_resource - def pause(self, container): - url = self._url('/containers/{0}/pause'.format(container)) - res = self._post(url) - self._raise_for_status(res) - - def ping(self): - return self._result(self._get(self._url('/_ping'))) - - @check_resource - def port(self, container, private_port): - res = self._get(self._url("/containers/{0}/json".format(container))) - self._raise_for_status(res) - json_ = res.json() - s_port = str(private_port) - h_ports = None - - # Port settings is None when the container is running with - # network_mode=host. - port_settings = json_.get('NetworkSettings', {}).get('Ports') - if port_settings is None: - return None - - h_ports = port_settings.get(s_port + '/udp') - if h_ports is None: - h_ports = port_settings.get(s_port + '/tcp') - - return h_ports - - def pull(self, repository, tag=None, stream=False, - insecure_registry=False, auth_config=None): - if not tag: - repository, tag = utils.parse_repository_tag(repository) - registry, repo_name = auth.resolve_repository_name( - repository, insecure=insecure_registry - ) - if repo_name.count(":") == 1: - repository, tag = repository.rsplit(":", 1) - - params = { - 'tag': tag, - 'fromImage': repository - } - headers = {} - - if utils.compare_version('1.5', self._version) >= 0: - # If we don't have any auth data so far, try reloading the config - # file one more time in case anything showed up in there. - if auth_config is None: - if not self._auth_configs: - self._auth_configs = auth.load_config() - authcfg = auth.resolve_authconfig(self._auth_configs, registry) - # Do not fail here if no authentication exists for this - # specific registry as we can have a readonly pull. Just - # put the header if we can. - if authcfg: - # auth_config needs to be a dict in the format used by - # auth.py username , password, serveraddress, email - headers['X-Registry-Auth'] = auth.encode_header( - authcfg - ) - else: - headers['X-Registry-Auth'] = auth.encode_header(auth_config) - - response = self._post( - self._url('/images/create'), params=params, headers=headers, - stream=stream, timeout=None - ) + The URL to the Docker host. - self._raise_for_status(response) + .. envvar:: DOCKER_TLS_VERIFY - if stream: - return self._stream_helper(response) + Verify the host against a CA certificate. - return self._result(response) + .. envvar:: DOCKER_CERT_PATH - def push(self, repository, tag=None, stream=False, - insecure_registry=False): - if not tag: - repository, tag = utils.parse_repository_tag(repository) - registry, repo_name = auth.resolve_repository_name( - repository, insecure=insecure_registry - ) - u = self._url("/images/{0}/push".format(repository)) - params = { - 'tag': tag - } - headers = {} - - if utils.compare_version('1.5', self._version) >= 0: - # If we don't have any auth data so far, try reloading the config - # file one more time in case anything showed up in there. - if not self._auth_configs: - self._auth_configs = auth.load_config() - authcfg = auth.resolve_authconfig(self._auth_configs, registry) - - # Do not fail here if no authentication exists for this specific - # registry as we can have a readonly pull. Just put the header if - # we can. - if authcfg: - headers['X-Registry-Auth'] = auth.encode_header(authcfg) - - response = self._post_json( - u, None, headers=headers, stream=stream, params=params - ) + A path to a directory containing TLS certificates to use when + connecting to the Docker host. + + Args: + version (str): The version of the API to use. Set to ``auto`` to + automatically detect the server's version. Default: ``auto`` + timeout (int): Default timeout for API calls, in seconds. + max_pool_size (int): The maximum number of connections + to save in the pool. + environment (dict): The environment to read environment variables + from. Default: the value of ``os.environ`` + credstore_env (dict): Override environment variables when calling + the credential store process. + use_ssh_client (bool): If set to `True`, an ssh connection is + made via shelling out to the ssh client. Ensure the ssh + client is installed and configured on the host. - self._raise_for_status(response) - - if stream: - return self._stream_helper(response) - - return self._result(response) - - @check_resource - def remove_container(self, container, v=False, link=False, force=False): - params = {'v': v, 'link': link, 'force': force} - res = self._delete(self._url("/containers/" + container), - params=params) - self._raise_for_status(res) - - @check_resource - def remove_image(self, image, force=False, noprune=False): - params = {'force': force, 'noprune': noprune} - res = self._delete(self._url("/images/" + image), params=params) - self._raise_for_status(res) - - @check_resource - def rename(self, container, name): - if utils.compare_version('1.17', self._version) < 0: - raise errors.InvalidVersion( - 'rename was only introduced in API version 1.17' - ) - url = self._url("/containers/{0}/rename".format(container)) - params = {'name': name} - res = self._post(url, params=params) - self._raise_for_status(res) - - @check_resource - def resize(self, container, height, width): - params = {'h': height, 'w': width} - url = self._url("/containers/{0}/resize".format(container)) - res = self._post(url, params=params) - self._raise_for_status(res) - - @check_resource - def restart(self, container, timeout=10): - params = {'t': timeout} - url = self._url("/containers/{0}/restart".format(container)) - res = self._post(url, params=params) - self._raise_for_status(res) - - def search(self, term): - return self._result(self._get(self._url("/images/search"), - params={'term': term}), - True) - - @check_resource - def start(self, container, binds=None, port_bindings=None, lxc_conf=None, - publish_all_ports=False, links=None, privileged=False, - dns=None, dns_search=None, volumes_from=None, network_mode=None, - restart_policy=None, cap_add=None, cap_drop=None, devices=None, - extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None, - security_opt=None, ulimits=None): - - if utils.compare_version('1.10', self._version) < 0: - if dns is not None: - raise errors.InvalidVersion( - 'dns is only supported for API version >= 1.10' - ) - if volumes_from is not None: - raise errors.InvalidVersion( - 'volumes_from is only supported for API version >= 1.10' - ) - - if utils.compare_version('1.15', self._version) < 0: - if security_opt is not None: - raise errors.InvalidVersion( - 'security_opt is only supported for API version >= 1.15' - ) - if ipc_mode: - raise errors.InvalidVersion( - 'ipc_mode is only supported for API version >= 1.15' - ) - - if utils.compare_version('1.17', self._version) < 0: - if read_only is not None: - raise errors.InvalidVersion( - 'read_only is only supported for API version >= 1.17' - ) - if pid_mode is not None: - raise errors.InvalidVersion( - 'pid_mode is only supported for API version >= 1.17' - ) - - if utils.compare_version('1.18', self._version) < 0: - if ulimits is not None: - raise errors.InvalidVersion( - 'ulimits is only supported for API version >= 1.18' - ) - - start_config = utils.create_host_config( - binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf, - publish_all_ports=publish_all_ports, links=links, dns=dns, - privileged=privileged, dns_search=dns_search, cap_add=cap_add, - cap_drop=cap_drop, volumes_from=volumes_from, devices=devices, - network_mode=network_mode, restart_policy=restart_policy, - extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode, - ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits + Example: + + >>> import docker + >>> client = docker.from_env() + + .. _`SSL version`: + https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1 + """ + timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS) + max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE) + version = kwargs.pop('version', None) + use_ssh_client = kwargs.pop('use_ssh_client', False) + return cls( + timeout=timeout, + max_pool_size=max_pool_size, + version=version, + use_ssh_client=use_ssh_client, + **kwargs_from_env(**kwargs) ) - url = self._url("/containers/{0}/start".format(container)) - if not start_config: - start_config = None - elif utils.compare_version('1.15', self._version) > 0: - warnings.warn( - 'Passing host config parameters in start() is deprecated. ' - 'Please use host_config in create_container instead!', - DeprecationWarning - ) - res = self._post_json(url, data=start_config) - self._raise_for_status(res) - - @check_resource - def stats(self, container, decode=None): - if utils.compare_version('1.17', self._version) < 0: - raise errors.InvalidVersion( - 'Stats retrieval is not supported in API < 1.17!') - - url = self._url("/containers/{0}/stats".format(container)) - return self._stream_helper(self._get(url, stream=True), decode=decode) - - @check_resource - def stop(self, container, timeout=10): - params = {'t': timeout} - url = self._url("/containers/{0}/stop".format(container)) - - res = self._post(url, params=params, - timeout=(timeout + (self.timeout or 0))) - self._raise_for_status(res) - - @check_resource - def tag(self, image, repository, tag=None, force=False): - params = { - 'tag': tag, - 'repo': repository, - 'force': 1 if force else 0 - } - url = self._url("/images/{0}/tag".format(image)) - res = self._post(url, params=params) - self._raise_for_status(res) - return res.status_code == 201 - - @check_resource - def top(self, container): - u = self._url("/containers/{0}/top".format(container)) - return self._result(self._get(u), True) - - def version(self, api_version=True): - url = self._url("/version", versioned_api=api_version) - return self._result(self._get(url), json=True) - - @check_resource - def unpause(self, container): - url = self._url('/containers/{0}/unpause'.format(container)) - res = self._post(url) - self._raise_for_status(res) - - @check_resource - def wait(self, container, timeout=None): - url = self._url("/containers/{0}/wait".format(container)) - res = self._post(url, timeout=timeout) - self._raise_for_status(res) - json_ = res.json() - if 'StatusCode' in json_: - return json_['StatusCode'] - return -1 - - -class AutoVersionClient(Client): - def __init__(self, *args, **kwargs): - if 'version' in kwargs and kwargs['version']: - raise errors.DockerException( - 'Can not specify version for AutoVersionClient' - ) - kwargs['version'] = 'auto' - super(AutoVersionClient, self).__init__(*args, **kwargs) + # Resources + @property + def configs(self): + """ + An object for managing configs on the server. See the + :doc:`configs documentation ` for full details. + """ + return ConfigCollection(client=self) + + @property + def containers(self): + """ + An object for managing containers on the server. See the + :doc:`containers documentation ` for full details. + """ + return ContainerCollection(client=self) + + @property + def images(self): + """ + An object for managing images on the server. See the + :doc:`images documentation ` for full details. + """ + return ImageCollection(client=self) + + @property + def networks(self): + """ + An object for managing networks on the server. See the + :doc:`networks documentation ` for full details. + """ + return NetworkCollection(client=self) + + @property + def nodes(self): + """ + An object for managing nodes on the server. See the + :doc:`nodes documentation ` for full details. + """ + return NodeCollection(client=self) + + @property + def plugins(self): + """ + An object for managing plugins on the server. See the + :doc:`plugins documentation ` for full details. + """ + return PluginCollection(client=self) + + @property + def secrets(self): + """ + An object for managing secrets on the server. See the + :doc:`secrets documentation ` for full details. + """ + return SecretCollection(client=self) + + @property + def services(self): + """ + An object for managing services on the server. See the + :doc:`services documentation ` for full details. + """ + return ServiceCollection(client=self) + + @property + def swarm(self): + """ + An object for managing a swarm on the server. See the + :doc:`swarm documentation ` for full details. + """ + return Swarm(client=self) + + @property + def volumes(self): + """ + An object for managing volumes on the server. See the + :doc:`volumes documentation ` for full details. + """ + return VolumeCollection(client=self) + + # Top-level methods + def events(self, *args, **kwargs): + return self.api.events(*args, **kwargs) + events.__doc__ = APIClient.events.__doc__ + + def df(self): + return self.api.df() + df.__doc__ = APIClient.df.__doc__ + + def info(self, *args, **kwargs): + return self.api.info(*args, **kwargs) + info.__doc__ = APIClient.info.__doc__ + + def login(self, *args, **kwargs): + return self.api.login(*args, **kwargs) + login.__doc__ = APIClient.login.__doc__ + + def ping(self, *args, **kwargs): + return self.api.ping(*args, **kwargs) + ping.__doc__ = APIClient.ping.__doc__ + + def version(self, *args, **kwargs): + return self.api.version(*args, **kwargs) + version.__doc__ = APIClient.version.__doc__ + + def close(self): + return self.api.close() + close.__doc__ = APIClient.close.__doc__ + + def __getattr__(self, name): + s = [f"'DockerClient' object has no attribute '{name}'"] + # If a user calls a method on APIClient, they + if hasattr(APIClient, name): + s.append("In Docker SDK for Python 2.0, this method is now on the " + "object APIClient. See the low-level API section of the " + "documentation for more details.") + raise AttributeError(' '.join(s)) + + +from_env = DockerClient.from_env diff --git a/docker/clientbase.py b/docker/clientbase.py deleted file mode 100644 index e51bf3ec84..0000000000 --- a/docker/clientbase.py +++ /dev/null @@ -1,235 +0,0 @@ -import json -import struct - -import requests -import requests.exceptions -import six -import websocket - - -from . import constants -from . import errors -from .auth import auth -from .unixconn import unixconn -from .ssladapter import ssladapter -from .utils import utils, check_resource -from .tls import TLSConfig - - -class ClientBase(requests.Session): - def __init__(self, base_url=None, version=None, - timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False): - super(ClientBase, self).__init__() - - if tls and not base_url.startswith('https://'): - raise errors.TLSParameterError( - 'If using TLS, the base_url argument must begin with ' - '"https://".') - - self.base_url = base_url - self.timeout = timeout - - self._auth_configs = auth.load_config() - - base_url = utils.parse_host(base_url) - if base_url.startswith('http+unix://'): - self._custom_adapter = unixconn.UnixAdapter(base_url, timeout) - self.mount('http+docker://', self._custom_adapter) - self.base_url = 'http+docker://localunixsocket' - else: - # Use SSLAdapter for the ability to specify SSL version - if isinstance(tls, TLSConfig): - tls.configure_client(self) - elif tls: - self._custom_adapter = ssladapter.SSLAdapter() - self.mount('https://', self._custom_adapter) - self.base_url = base_url - - # version detection needs to be after unix adapter mounting - if version is None: - self._version = constants.DEFAULT_DOCKER_API_VERSION - elif isinstance(version, six.string_types): - if version.lower() == 'auto': - self._version = self._retrieve_server_version() - else: - self._version = version - else: - raise errors.DockerException( - 'Version parameter must be a string or None. Found {0}'.format( - type(version).__name__ - ) - ) - - def _retrieve_server_version(self): - try: - return self.version(api_version=False)["ApiVersion"] - except KeyError: - raise errors.DockerException( - 'Invalid response from docker daemon: key "ApiVersion"' - ' is missing.' - ) - except Exception as e: - raise errors.DockerException( - 'Error while fetching server API version: {0}'.format(e) - ) - - def _set_request_timeout(self, kwargs): - """Prepare the kwargs for an HTTP request by inserting the timeout - parameter, if not already present.""" - kwargs.setdefault('timeout', self.timeout) - return kwargs - - def _post(self, url, **kwargs): - return self.post(url, **self._set_request_timeout(kwargs)) - - def _get(self, url, **kwargs): - return self.get(url, **self._set_request_timeout(kwargs)) - - def _delete(self, url, **kwargs): - return self.delete(url, **self._set_request_timeout(kwargs)) - - def _url(self, path, versioned_api=True): - if versioned_api: - return '{0}/v{1}{2}'.format(self.base_url, self._version, path) - else: - return '{0}{1}'.format(self.base_url, path) - - def _raise_for_status(self, response, explanation=None): - """Raises stored :class:`APIError`, if one occurred.""" - try: - response.raise_for_status() - except requests.exceptions.HTTPError as e: - raise errors.APIError(e, response, explanation=explanation) - - def _result(self, response, json=False, binary=False): - assert not (json and binary) - self._raise_for_status(response) - - if json: - return response.json() - if binary: - return response.content - return response.text - - def _post_json(self, url, data, **kwargs): - # Go <1.1 can't unserialize null to a string - # so we do this disgusting thing here. - data2 = {} - if data is not None: - for k, v in six.iteritems(data): - if v is not None: - data2[k] = v - - if 'headers' not in kwargs: - kwargs['headers'] = {} - kwargs['headers']['Content-Type'] = 'application/json' - return self._post(url, data=json.dumps(data2), **kwargs) - - def _attach_params(self, override=None): - return override or { - 'stdout': 1, - 'stderr': 1, - 'stream': 1 - } - - @check_resource - def _attach_websocket(self, container, params=None): - url = self._url("/containers/{0}/attach/ws".format(container)) - req = requests.Request("POST", url, params=self._attach_params(params)) - full_url = req.prepare().url - full_url = full_url.replace("http://", "ws://", 1) - full_url = full_url.replace("https://", "wss://", 1) - return self._create_websocket_connection(full_url) - - def _create_websocket_connection(self, url): - return websocket.create_connection(url) - - def _get_raw_response_socket(self, response): - self._raise_for_status(response) - if six.PY3: - sock = response.raw._fp.fp.raw - else: - sock = response.raw._fp.fp._sock - try: - # Keep a reference to the response to stop it being garbage - # collected. If the response is garbage collected, it will - # close TLS sockets. - sock._response = response - except AttributeError: - # UNIX sockets can't have attributes set on them, but that's - # fine because we won't be doing TLS over them - pass - - return sock - - def _stream_helper(self, response, decode=False): - """Generator for data coming from a chunked-encoded HTTP response.""" - if response.raw._fp.chunked: - reader = response.raw - while not reader.closed: - # this read call will block until we get a chunk - data = reader.read(1) - if not data: - break - if reader._fp.chunk_left: - data += reader.read(reader._fp.chunk_left) - if decode: - if six.PY3: - data = data.decode('utf-8') - data = json.loads(data) - yield data - else: - # Response isn't chunked, meaning we probably - # encountered an error immediately - yield self._result(response) - - def _multiplexed_buffer_helper(self, response): - """A generator of multiplexed data blocks read from a buffered - response.""" - buf = self._result(response, binary=True) - walker = 0 - while True: - if len(buf[walker:]) < 8: - break - _, length = struct.unpack_from('>BxxxL', buf[walker:]) - start = walker + constants.STREAM_HEADER_SIZE_BYTES - end = start + length - walker = end - yield buf[start:end] - - def _multiplexed_response_stream_helper(self, response): - """A generator of multiplexed data blocks coming from a response - stream.""" - - # Disable timeout on the underlying socket to prevent - # Read timed out(s) for long running processes - socket = self._get_raw_response_socket(response) - if six.PY3: - socket._sock.settimeout(None) - else: - socket.settimeout(None) - - while True: - header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES) - if not header: - break - _, length = struct.unpack('>BxxxL', header) - if not length: - break - data = response.raw.read(length) - if not data: - break - yield data - - def get_adapter(self, url): - try: - return super(ClientBase, self).get_adapter(url) - except requests.exceptions.InvalidSchema as e: - if self._custom_adapter: - return self._custom_adapter - else: - raise e - - @property - def api_version(self): - return self._version diff --git a/docker/constants.py b/docker/constants.py index f99f19226e..0e39dc2917 100644 --- a/docker/constants.py +++ b/docker/constants.py @@ -1,6 +1,45 @@ -DEFAULT_DOCKER_API_VERSION = '1.19' +import sys + +from .version import __version__ + +DEFAULT_DOCKER_API_VERSION = '1.45' +MINIMUM_DOCKER_API_VERSION = '1.24' DEFAULT_TIMEOUT_SECONDS = 60 STREAM_HEADER_SIZE_BYTES = 8 CONTAINER_LIMITS_KEYS = [ 'memory', 'memswap', 'cpushares', 'cpusetcpus' ] + +DEFAULT_HTTP_HOST = "127.0.0.1" +DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" +DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' + +BYTE_UNITS = { + 'b': 1, + 'k': 1024, + 'm': 1024 * 1024, + 'g': 1024 * 1024 * 1024 +} + + +INSECURE_REGISTRY_DEPRECATION_WARNING = \ + 'The `insecure_registry` argument to {} ' \ + 'is deprecated and non-functional. Please remove it.' + +IS_WINDOWS_PLATFORM = (sys.platform == 'win32') +WINDOWS_LONGPATH_PREFIX = '\\\\?\\' + +DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}" +DEFAULT_NUM_POOLS = 25 + +# The OpenSSH server default value for MaxSessions is 10 which means we can +# use up to 9, leaving the final session for the underlying SSH connection. +# For more details see: https://github.com/docker/docker-py/issues/2246 +DEFAULT_NUM_POOLS_SSH = 9 + +DEFAULT_MAX_POOL_SIZE = 10 + +DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 + +DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8'] +DEFAULT_SWARM_SUBNET_SIZE = 24 diff --git a/docker/context/__init__.py b/docker/context/__init__.py new file mode 100644 index 0000000000..46d462b0cf --- /dev/null +++ b/docker/context/__init__.py @@ -0,0 +1,2 @@ +from .api import ContextAPI +from .context import Context diff --git a/docker/context/api.py b/docker/context/api.py new file mode 100644 index 0000000000..9ac4ff470a --- /dev/null +++ b/docker/context/api.py @@ -0,0 +1,206 @@ +import json +import os + +from docker import errors + +from .config import ( + METAFILE, + get_current_context_name, + get_meta_dir, + write_context_name_to_docker_config, +) +from .context import Context + + +class ContextAPI: + """Context API. + Contains methods for context management: + create, list, remove, get, inspect. + """ + DEFAULT_CONTEXT = Context("default", "swarm") + + @classmethod + def create_context( + cls, name, orchestrator=None, host=None, tls_cfg=None, + default_namespace=None, skip_tls_verify=False): + """Creates a new context. + Returns: + (Context): a Context object. + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextAlreadyExists` + If a context with the name already exists. + :py:class:`docker.errors.ContextException` + If name is default. + + Example: + + >>> from docker.context import ContextAPI + >>> ctx = ContextAPI.create_context(name='test') + >>> print(ctx.Metadata) + { + "Name": "test", + "Metadata": {}, + "Endpoints": { + "docker": { + "Host": "unix:///var/run/docker.sock", + "SkipTLSVerify": false + } + } + } + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + raise errors.ContextException( + '"default" is a reserved context name') + ctx = Context.load_context(name) + if ctx: + raise errors.ContextAlreadyExists(name) + endpoint = "docker" + if orchestrator and orchestrator != "swarm": + endpoint = orchestrator + ctx = Context(name, orchestrator) + ctx.set_endpoint( + endpoint, host, tls_cfg, + skip_tls_verify=skip_tls_verify, + def_namespace=default_namespace) + ctx.save() + return ctx + + @classmethod + def get_context(cls, name=None): + """Retrieves a context object. + Args: + name (str): The name of the context + + Example: + + >>> from docker.context import ContextAPI + >>> ctx = ContextAPI.get_context(name='test') + >>> print(ctx.Metadata) + { + "Name": "test", + "Metadata": {}, + "Endpoints": { + "docker": { + "Host": "unix:///var/run/docker.sock", + "SkipTLSVerify": false + } + } + } + """ + if not name: + name = get_current_context_name() + if name == "default": + return cls.DEFAULT_CONTEXT + return Context.load_context(name) + + @classmethod + def contexts(cls): + """Context list. + Returns: + (Context): List of context objects. + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + names = [] + for dirname, dirnames, fnames in os.walk(get_meta_dir()): + for filename in fnames + dirnames: + if filename == METAFILE: + try: + data = json.load( + open(os.path.join(dirname, filename))) + names.append(data["Name"]) + except Exception as e: + raise errors.ContextException( + f"Failed to load metafile {filename}: {e}", + ) from e + + contexts = [cls.DEFAULT_CONTEXT] + for name in names: + contexts.append(Context.load_context(name)) + return contexts + + @classmethod + def get_current_context(cls): + """Get current context. + Returns: + (Context): current context object. + """ + return cls.get_context() + + @classmethod + def set_current_context(cls, name="default"): + ctx = cls.get_context(name) + if not ctx: + raise errors.ContextNotFound(name) + + err = write_context_name_to_docker_config(name) + if err: + raise errors.ContextException( + f'Failed to set current context: {err}') + + @classmethod + def remove_context(cls, name): + """Remove a context. Similar to the ``docker context rm`` command. + + Args: + name (str): The name of the context + + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextNotFound` + If a context with the name does not exist. + :py:class:`docker.errors.ContextException` + If name is default. + + Example: + + >>> from docker.context import ContextAPI + >>> ContextAPI.remove_context(name='test') + >>> + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + raise errors.ContextException( + 'context "default" cannot be removed') + ctx = Context.load_context(name) + if not ctx: + raise errors.ContextNotFound(name) + if name == get_current_context_name(): + write_context_name_to_docker_config(None) + ctx.remove() + + @classmethod + def inspect_context(cls, name="default"): + """Remove a context. Similar to the ``docker context inspect`` command. + + Args: + name (str): The name of the context + + Raises: + :py:class:`docker.errors.MissingContextParameter` + If a context name is not provided. + :py:class:`docker.errors.ContextNotFound` + If a context with the name does not exist. + + Example: + + >>> from docker.context import ContextAPI + >>> ContextAPI.remove_context(name='test') + >>> + """ + if not name: + raise errors.MissingContextParameter("name") + if name == "default": + return cls.DEFAULT_CONTEXT() + ctx = Context.load_context(name) + if not ctx: + raise errors.ContextNotFound(name) + + return ctx() diff --git a/docker/context/config.py b/docker/context/config.py new file mode 100644 index 0000000000..5a6373aa4e --- /dev/null +++ b/docker/context/config.py @@ -0,0 +1,81 @@ +import hashlib +import json +import os + +from docker import utils +from docker.constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM +from docker.utils.config import find_config_file + +METAFILE = "meta.json" + + +def get_current_context_name(): + name = "default" + docker_cfg_path = find_config_file() + if docker_cfg_path: + try: + with open(docker_cfg_path) as f: + name = json.load(f).get("currentContext", "default") + except Exception: + return "default" + return name + + +def write_context_name_to_docker_config(name=None): + if name == 'default': + name = None + docker_cfg_path = find_config_file() + config = {} + if docker_cfg_path: + try: + with open(docker_cfg_path) as f: + config = json.load(f) + except Exception as e: + return e + current_context = config.get("currentContext", None) + if current_context and not name: + del config["currentContext"] + elif name: + config["currentContext"] = name + else: + return + try: + with open(docker_cfg_path, "w") as f: + json.dump(config, f, indent=4) + except Exception as e: + return e + + +def get_context_id(name): + return hashlib.sha256(name.encode('utf-8')).hexdigest() + + +def get_context_dir(): + return os.path.join(os.path.dirname(find_config_file() or ""), "contexts") + + +def get_meta_dir(name=None): + meta_dir = os.path.join(get_context_dir(), "meta") + if name: + return os.path.join(meta_dir, get_context_id(name)) + return meta_dir + + +def get_meta_file(name): + return os.path.join(get_meta_dir(name), METAFILE) + + +def get_tls_dir(name=None, endpoint=""): + context_dir = get_context_dir() + if name: + return os.path.join(context_dir, "tls", get_context_id(name), endpoint) + return os.path.join(context_dir, "tls") + + +def get_context_host(path=None, tls=False): + host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls) + if host == DEFAULT_UNIX_SOCKET: + # remove http+ from default docker socket url + if host.startswith("http+"): + host = host[5:] + return host diff --git a/docker/context/context.py b/docker/context/context.py new file mode 100644 index 0000000000..da17d94781 --- /dev/null +++ b/docker/context/context.py @@ -0,0 +1,249 @@ +import json +import os +from shutil import copyfile, rmtree + +from docker.errors import ContextException +from docker.tls import TLSConfig + +from .config import ( + get_context_host, + get_meta_dir, + get_meta_file, + get_tls_dir, +) + + +class Context: + """A context.""" + + def __init__(self, name, orchestrator=None, host=None, endpoints=None, + tls=False): + if not name: + raise Exception("Name not provided") + self.name = name + self.context_type = None + self.orchestrator = orchestrator + self.endpoints = {} + self.tls_cfg = {} + self.meta_path = "IN MEMORY" + self.tls_path = "IN MEMORY" + + if not endpoints: + # set default docker endpoint if no endpoint is set + default_endpoint = "docker" if ( + not orchestrator or orchestrator == "swarm" + ) else orchestrator + + self.endpoints = { + default_endpoint: { + "Host": get_context_host(host, tls), + "SkipTLSVerify": not tls + } + } + return + + # check docker endpoints + for k, v in endpoints.items(): + if not isinstance(v, dict): + # unknown format + raise ContextException( + f"Unknown endpoint format for context {name}: {v}", + ) + + self.endpoints[k] = v + if k != "docker": + continue + + self.endpoints[k]["Host"] = v.get("Host", get_context_host( + host, tls)) + self.endpoints[k]["SkipTLSVerify"] = bool(v.get( + "SkipTLSVerify", not tls)) + + def set_endpoint( + self, name="docker", host=None, tls_cfg=None, + skip_tls_verify=False, def_namespace=None): + self.endpoints[name] = { + "Host": get_context_host(host, not skip_tls_verify), + "SkipTLSVerify": skip_tls_verify + } + if def_namespace: + self.endpoints[name]["DefaultNamespace"] = def_namespace + + if tls_cfg: + self.tls_cfg[name] = tls_cfg + + def inspect(self): + return self.__call__() + + @classmethod + def load_context(cls, name): + meta = Context._load_meta(name) + if meta: + instance = cls( + meta["Name"], + orchestrator=meta["Metadata"].get("StackOrchestrator", None), + endpoints=meta.get("Endpoints", None)) + instance.context_type = meta["Metadata"].get("Type", None) + instance._load_certs() + instance.meta_path = get_meta_dir(name) + return instance + return None + + @classmethod + def _load_meta(cls, name): + meta_file = get_meta_file(name) + if not os.path.isfile(meta_file): + return None + + metadata = {} + try: + with open(meta_file) as f: + metadata = json.load(f) + except (OSError, KeyError, ValueError) as e: + # unknown format + raise Exception( + f"Detected corrupted meta file for context {name} : {e}" + ) from e + + # for docker endpoints, set defaults for + # Host and SkipTLSVerify fields + for k, v in metadata["Endpoints"].items(): + if k != "docker": + continue + metadata["Endpoints"][k]["Host"] = v.get( + "Host", get_context_host(None, False)) + metadata["Endpoints"][k]["SkipTLSVerify"] = bool( + v.get("SkipTLSVerify", True)) + + return metadata + + def _load_certs(self): + certs = {} + tls_dir = get_tls_dir(self.name) + for endpoint in self.endpoints.keys(): + if not os.path.isdir(os.path.join(tls_dir, endpoint)): + continue + ca_cert = None + cert = None + key = None + for filename in os.listdir(os.path.join(tls_dir, endpoint)): + if filename.startswith("ca"): + ca_cert = os.path.join(tls_dir, endpoint, filename) + elif filename.startswith("cert"): + cert = os.path.join(tls_dir, endpoint, filename) + elif filename.startswith("key"): + key = os.path.join(tls_dir, endpoint, filename) + if all([ca_cert, cert, key]): + verify = None + if endpoint == "docker" and not self.endpoints["docker"].get( + "SkipTLSVerify", False): + verify = True + certs[endpoint] = TLSConfig( + client_cert=(cert, key), ca_cert=ca_cert, verify=verify) + self.tls_cfg = certs + self.tls_path = tls_dir + + def save(self): + meta_dir = get_meta_dir(self.name) + if not os.path.isdir(meta_dir): + os.makedirs(meta_dir) + with open(get_meta_file(self.name), "w") as f: + f.write(json.dumps(self.Metadata)) + + tls_dir = get_tls_dir(self.name) + for endpoint, tls in self.tls_cfg.items(): + if not os.path.isdir(os.path.join(tls_dir, endpoint)): + os.makedirs(os.path.join(tls_dir, endpoint)) + + ca_file = tls.ca_cert + if ca_file: + copyfile(ca_file, os.path.join( + tls_dir, endpoint, os.path.basename(ca_file))) + + if tls.cert: + cert_file, key_file = tls.cert + copyfile(cert_file, os.path.join( + tls_dir, endpoint, os.path.basename(cert_file))) + copyfile(key_file, os.path.join( + tls_dir, endpoint, os.path.basename(key_file))) + + self.meta_path = get_meta_dir(self.name) + self.tls_path = get_tls_dir(self.name) + + def remove(self): + if os.path.isdir(self.meta_path): + rmtree(self.meta_path) + if os.path.isdir(self.tls_path): + rmtree(self.tls_path) + + def __repr__(self): + return f"<{self.__class__.__name__}: '{self.name}'>" + + def __str__(self): + return json.dumps(self.__call__(), indent=2) + + def __call__(self): + result = self.Metadata + result.update(self.TLSMaterial) + result.update(self.Storage) + return result + + def is_docker_host(self): + return self.context_type is None + + @property + def Name(self): + return self.name + + @property + def Host(self): + if not self.orchestrator or self.orchestrator == "swarm": + endpoint = self.endpoints.get("docker", None) + if endpoint: + return endpoint.get("Host", None) + return None + + return self.endpoints[self.orchestrator].get("Host", None) + + @property + def Orchestrator(self): + return self.orchestrator + + @property + def Metadata(self): + meta = {} + if self.orchestrator: + meta = {"StackOrchestrator": self.orchestrator} + return { + "Name": self.name, + "Metadata": meta, + "Endpoints": self.endpoints + } + + @property + def TLSConfig(self): + key = self.orchestrator + if not key or key == "swarm": + key = "docker" + if key in self.tls_cfg.keys(): + return self.tls_cfg[key] + return None + + @property + def TLSMaterial(self): + certs = {} + for endpoint, tls in self.tls_cfg.items(): + cert, key = tls.cert + certs[endpoint] = list( + map(os.path.basename, [tls.ca_cert, cert, key])) + return { + "TLSMaterial": certs + } + + @property + def Storage(self): + return { + "Storage": { + "MetadataPath": self.meta_path, + "TLSPath": self.tls_path + }} diff --git a/docker/credentials/__init__.py b/docker/credentials/__init__.py new file mode 100644 index 0000000000..80d19e7986 --- /dev/null +++ b/docker/credentials/__init__.py @@ -0,0 +1,8 @@ +from .constants import ( + DEFAULT_LINUX_STORE, + DEFAULT_OSX_STORE, + DEFAULT_WIN32_STORE, + PROGRAM_PREFIX, +) +from .errors import CredentialsNotFound, StoreError +from .store import Store diff --git a/docker/credentials/constants.py b/docker/credentials/constants.py new file mode 100644 index 0000000000..6a82d8da42 --- /dev/null +++ b/docker/credentials/constants.py @@ -0,0 +1,4 @@ +PROGRAM_PREFIX = 'docker-credential-' +DEFAULT_LINUX_STORE = 'secretservice' +DEFAULT_OSX_STORE = 'osxkeychain' +DEFAULT_WIN32_STORE = 'wincred' diff --git a/docker/credentials/errors.py b/docker/credentials/errors.py new file mode 100644 index 0000000000..d059fd9fbb --- /dev/null +++ b/docker/credentials/errors.py @@ -0,0 +1,17 @@ +class StoreError(RuntimeError): + pass + + +class CredentialsNotFound(StoreError): + pass + + +class InitializationError(StoreError): + pass + + +def process_store_error(cpe, program): + message = cpe.output.decode('utf-8') + if 'credentials not found in native keychain' in message: + return CredentialsNotFound(f'No matching credentials in {program}') + return StoreError(f'Credentials store {program} exited with "{message}".') diff --git a/docker/credentials/store.py b/docker/credentials/store.py new file mode 100644 index 0000000000..00d693a4be --- /dev/null +++ b/docker/credentials/store.py @@ -0,0 +1,93 @@ +import errno +import json +import shutil +import subprocess +import warnings + +from . import constants, errors +from .utils import create_environment_dict + + +class Store: + def __init__(self, program, environment=None): + """ Create a store object that acts as an interface to + perform the basic operations for storing, retrieving + and erasing credentials using `program`. + """ + self.program = constants.PROGRAM_PREFIX + program + self.exe = shutil.which(self.program) + self.environment = environment + if self.exe is None: + warnings.warn( + f'{self.program} not installed or not available in PATH', + stacklevel=1, + ) + + def get(self, server): + """ Retrieve credentials for `server`. If no credentials are found, + a `StoreError` will be raised. + """ + if not isinstance(server, bytes): + server = server.encode('utf-8') + data = self._execute('get', server) + result = json.loads(data.decode('utf-8')) + + # docker-credential-pass will return an object for inexistent servers + # whereas other helpers will exit with returncode != 0. For + # consistency, if no significant data is returned, + # raise CredentialsNotFound + if result['Username'] == '' and result['Secret'] == '': + raise errors.CredentialsNotFound( + f'No matching credentials in {self.program}' + ) + + return result + + def store(self, server, username, secret): + """ Store credentials for `server`. Raises a `StoreError` if an error + occurs. + """ + data_input = json.dumps({ + 'ServerURL': server, + 'Username': username, + 'Secret': secret + }).encode('utf-8') + return self._execute('store', data_input) + + def erase(self, server): + """ Erase credentials for `server`. Raises a `StoreError` if an error + occurs. + """ + if not isinstance(server, bytes): + server = server.encode('utf-8') + self._execute('erase', server) + + def list(self): + """ List stored credentials. Requires v0.4.0+ of the helper. + """ + data = self._execute('list', None) + return json.loads(data.decode('utf-8')) + + def _execute(self, subcmd, data_input): + if self.exe is None: + raise errors.StoreError( + f'{self.program} not installed or not available in PATH' + ) + output = None + env = create_environment_dict(self.environment) + try: + output = subprocess.check_output( + [self.exe, subcmd], input=data_input, env=env, + ) + except subprocess.CalledProcessError as e: + raise errors.process_store_error(e, self.program) from e + except OSError as e: + if e.errno == errno.ENOENT: + raise errors.StoreError( + f'{self.program} not installed or not available in PATH' + ) from e + else: + raise errors.StoreError( + f'Unexpected OS error "{e.strerror}", errno={e.errno}' + ) from e + return output diff --git a/docker/credentials/utils.py b/docker/credentials/utils.py new file mode 100644 index 0000000000..5c83d05cfb --- /dev/null +++ b/docker/credentials/utils.py @@ -0,0 +1,10 @@ +import os + + +def create_environment_dict(overrides): + """ + Create and return a copy of os.environ with the specified overrides + """ + result = os.environ.copy() + result.update(overrides or {}) + return result diff --git a/docker/errors.py b/docker/errors.py index d15e332799..d03e10f693 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -1,55 +1,99 @@ -# Copyright 2014 dotCloud inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import requests +_image_not_found_explanation_fragments = frozenset( + fragment.lower() for fragment in [ + 'no such image', + 'not found: does not exist or no pull access', + 'repository does not exist', + 'was found but does not match the specified platform', + ] +) + -class APIError(requests.exceptions.HTTPError): - def __init__(self, message, response, explanation=None): +class DockerException(Exception): + """ + A base class from which all other exceptions inherit. + + If you want to catch all errors that the Docker SDK might raise, + catch this base exception. + """ + + +def create_api_error_from_http_exception(e): + """ + Create a suitable APIError from requests.exceptions.HTTPError. + """ + response = e.response + try: + explanation = response.json()['message'] + except ValueError: + explanation = (response.text or '').strip() + cls = APIError + if response.status_code == 404: + explanation_msg = (explanation or '').lower() + if any(fragment in explanation_msg + for fragment in _image_not_found_explanation_fragments): + cls = ImageNotFound + else: + cls = NotFound + raise cls(e, response=response, explanation=explanation) from e + + +class APIError(requests.exceptions.HTTPError, DockerException): + """ + An HTTP error from the API. + """ + def __init__(self, message, response=None, explanation=None): # requests 1.2 supports response as a keyword argument, but # requests 1.1 doesn't - super(APIError, self).__init__(message) + super().__init__(message) self.response = response - self.explanation = explanation - if self.explanation is None and response.content: - self.explanation = response.content.strip() - def __str__(self): - message = super(APIError, self).__str__() + message = super().__str__() if self.is_client_error(): - message = '{0} Client Error: {1}'.format( - self.response.status_code, self.response.reason) + message = ( + f'{self.response.status_code} Client Error for ' + f'{self.response.url}: {self.response.reason}' + ) elif self.is_server_error(): - message = '{0} Server Error: {1}'.format( - self.response.status_code, self.response.reason) + message = ( + f'{self.response.status_code} Server Error for ' + f'{self.response.url}: {self.response.reason}' + ) if self.explanation: - message = '{0} ("{1}")'.format(message, self.explanation) + message = f'{message} ("{self.explanation}")' return message + @property + def status_code(self): + if self.response is not None: + return self.response.status_code + + def is_error(self): + return self.is_client_error() or self.is_server_error() + def is_client_error(self): - return 400 <= self.response.status_code < 500 + if self.status_code is None: + return False + return 400 <= self.status_code < 500 def is_server_error(self): - return 500 <= self.response.status_code < 600 + if self.status_code is None: + return False + return 500 <= self.status_code < 600 -class DockerException(Exception): +class NotFound(APIError): + pass + + +class ImageNotFound(NotFound): pass @@ -65,6 +109,10 @@ class InvalidConfigFile(DockerException): pass +class InvalidArgument(DockerException): + pass + + class DeprecatedMethod(DockerException): pass @@ -76,9 +124,86 @@ def __init__(self, msg): def __str__(self): return self.msg + (". TLS configurations should map the Docker CLI " "client configurations. See " - "http://docs.docker.com/examples/https/ for " - "API details.") + "https://docs.docker.com/engine/articles/https/ " + "for API details.") class NullResource(DockerException, ValueError): pass + + +class ContainerError(DockerException): + """ + Represents a container that has exited with a non-zero exit code. + """ + def __init__(self, container, exit_status, command, image, stderr): + self.container = container + self.exit_status = exit_status + self.command = command + self.image = image + self.stderr = stderr + + err = f": {stderr}" if stderr is not None else "" + super().__init__( + f"Command '{command}' in image '{image}' " + f"returned non-zero exit status {exit_status}{err}" + ) + + +class StreamParseError(RuntimeError): + def __init__(self, reason): + self.msg = reason + + +class BuildError(DockerException): + def __init__(self, reason, build_log): + super().__init__(reason) + self.msg = reason + self.build_log = build_log + + +class ImageLoadError(DockerException): + pass + + +def create_unexpected_kwargs_error(name, kwargs): + quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)] + text = [f"{name}() "] + if len(quoted_kwargs) == 1: + text.append("got an unexpected keyword argument ") + else: + text.append("got unexpected keyword arguments ") + text.append(', '.join(quoted_kwargs)) + return TypeError(''.join(text)) + + +class MissingContextParameter(DockerException): + def __init__(self, param): + self.param = param + + def __str__(self): + return (f"missing parameter: {self.param}") + + +class ContextAlreadyExists(DockerException): + def __init__(self, name): + self.name = name + + def __str__(self): + return (f"context {self.name} already exists") + + +class ContextException(DockerException): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return (self.msg) + + +class ContextNotFound(DockerException): + def __init__(self, name): + self.name = name + + def __str__(self): + return (f"context '{self.name}' not found") diff --git a/tests/testdata/certs/ca.pem b/docker/models/__init__.py similarity index 100% rename from tests/testdata/certs/ca.pem rename to docker/models/__init__.py diff --git a/docker/models/configs.py b/docker/models/configs.py new file mode 100644 index 0000000000..4eba87f4e3 --- /dev/null +++ b/docker/models/configs.py @@ -0,0 +1,70 @@ +from ..api import APIClient +from .resource import Collection, Model + + +class Config(Model): + """A config.""" + id_attribute = 'ID' + + def __repr__(self): + return f"<{self.__class__.__name__}: '{self.name}'>" + + @property + def name(self): + return self.attrs['Spec']['Name'] + + def remove(self): + """ + Remove this config. + + Raises: + :py:class:`docker.errors.APIError` + If config failed to remove. + """ + return self.client.api.remove_config(self.id) + + +class ConfigCollection(Collection): + """Configs on the Docker server.""" + model = Config + + def create(self, **kwargs): + obj = self.client.api.create_config(**kwargs) + obj.setdefault("Spec", {})["Name"] = kwargs.get("name") + return self.prepare_model(obj) + create.__doc__ = APIClient.create_config.__doc__ + + def get(self, config_id): + """ + Get a config. + + Args: + config_id (str): Config ID. + + Returns: + (:py:class:`Config`): The config. + + Raises: + :py:class:`docker.errors.NotFound` + If the config does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(self.client.api.inspect_config(config_id)) + + def list(self, **kwargs): + """ + List configs. Similar to the ``docker config ls`` command. + + Args: + filters (dict): Server-side list filtering options. + + Returns: + (list of :py:class:`Config`): The configs. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.configs(**kwargs) + return [self.prepare_model(obj) for obj in resp] diff --git a/docker/models/containers.py b/docker/models/containers.py new file mode 100644 index 0000000000..9c9e92c90f --- /dev/null +++ b/docker/models/containers.py @@ -0,0 +1,1198 @@ +import copy +import ntpath +from collections import namedtuple + +from ..api import APIClient +from ..constants import DEFAULT_DATA_CHUNK_SIZE +from ..errors import ( + ContainerError, + DockerException, + ImageNotFound, + NotFound, + create_unexpected_kwargs_error, +) +from ..types import HostConfig, NetworkingConfig +from ..utils import version_gte +from .images import Image +from .resource import Collection, Model + + +class Container(Model): + """ Local representation of a container object. Detailed configuration may + be accessed through the :py:attr:`attrs` attribute. Note that local + attributes are cached; users may call :py:meth:`reload` to + query the Docker daemon for the current properties, causing + :py:attr:`attrs` to be refreshed. + """ + + @property + def name(self): + """ + The name of the container. + """ + if self.attrs.get('Name') is not None: + return self.attrs['Name'].lstrip('/') + + @property + def image(self): + """ + The image of the container. + """ + image_id = self.attrs.get('ImageID', self.attrs['Image']) + if image_id is None: + return None + return self.client.images.get(image_id.split(':')[1]) + + @property + def labels(self): + """ + The labels of a container as dictionary. + """ + try: + result = self.attrs['Config'].get('Labels') + return result or {} + except KeyError as ke: + raise DockerException( + 'Label data is not available for sparse objects. Call reload()' + ' to retrieve all information' + ) from ke + + @property + def status(self): + """ + The status of the container. For example, ``running``, or ``exited``. + """ + if isinstance(self.attrs['State'], dict): + return self.attrs['State']['Status'] + return self.attrs['State'] + + @property + def health(self): + """ + The healthcheck status of the container. + + For example, ``healthy`, or ``unhealthy`. + """ + return self.attrs.get('State', {}).get('Health', {}).get('Status', 'unknown') + + @property + def ports(self): + """ + The ports that the container exposes as a dictionary. + """ + return self.attrs.get('NetworkSettings', {}).get('Ports', {}) + + def attach(self, **kwargs): + """ + Attach to this container. + + :py:meth:`logs` is a wrapper around this method, which you can + use instead if you want to fetch/stream container output without first + retrieving the entire backlog. + + Args: + stdout (bool): Include stdout. + stderr (bool): Include stderr. + stream (bool): Return container output progressively as an iterator + of strings, rather than a single string. + logs (bool): Include the container's previous output. + + Returns: + By default, the container's output as a single string. + + If ``stream=True``, an iterator of output strings. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.attach(self.id, **kwargs) + + def attach_socket(self, **kwargs): + """ + Like :py:meth:`attach`, but returns the underlying socket-like object + for the HTTP request. + + Args: + params (dict): Dictionary of request parameters (e.g. ``stdout``, + ``stderr``, ``stream``). + ws (bool): Use websockets instead of raw HTTP. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.attach_socket(self.id, **kwargs) + + def commit(self, repository=None, tag=None, **kwargs): + """ + Commit a container to an image. Similar to the ``docker commit`` + command. + + Args: + repository (str): The repository to push the image to + tag (str): The tag to push + message (str): A commit message + author (str): The name of the author + pause (bool): Whether to pause the container before committing + changes (str): Dockerfile instructions to apply while committing + conf (dict): The configuration for the container. See the + `Engine API documentation + `_ + for full details. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + resp = self.client.api.commit(self.id, repository=repository, tag=tag, + **kwargs) + return self.client.images.get(resp['Id']) + + def diff(self): + """ + Inspect changes on a container's filesystem. + + Returns: + (list) A list of dictionaries containing the attributes `Path` + and `Kind`. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.diff(self.id) + + def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, + privileged=False, user='', detach=False, stream=False, + socket=False, environment=None, workdir=None, demux=False): + """ + Run a command inside this container. Similar to + ``docker exec``. + + Args: + cmd (str or list): Command to be executed + stdout (bool): Attach to stdout. Default: ``True`` + stderr (bool): Attach to stderr. Default: ``True`` + stdin (bool): Attach to stdin. Default: ``False`` + tty (bool): Allocate a pseudo-TTY. Default: False + privileged (bool): Run as privileged. + user (str): User to execute command as. Default: root + detach (bool): If true, detach from the exec command. + Default: False + stream (bool): Stream response data. Ignored if ``detach`` is true. + Default: False + socket (bool): Return the connection socket to allow custom + read/write operations. Default: False + environment (dict or list): A dictionary or a list of strings in + the following format ``["PASSWORD=xxx"]`` or + ``{"PASSWORD": "xxx"}``. + workdir (str): Path to working directory for this exec session + demux (bool): Return stdout and stderr separately + + Returns: + (ExecResult): A tuple of (exit_code, output) + exit_code: (int): + Exit code for the executed command or ``None`` if + either ``stream`` or ``socket`` is ``True``. + output: (generator, bytes, or tuple): + If ``stream=True``, a generator yielding response chunks. + If ``socket=True``, a socket object for the connection. + If ``demux=True``, a tuple of two bytes: stdout and stderr. + A bytestring containing response data otherwise. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.exec_create( + self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty, + privileged=privileged, user=user, environment=environment, + workdir=workdir, + ) + exec_output = self.client.api.exec_start( + resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket, + demux=demux + ) + if socket or stream: + return ExecResult(None, exec_output) + + return ExecResult( + self.client.api.exec_inspect(resp['Id'])['ExitCode'], + exec_output + ) + + def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + """ + Export the contents of the container's filesystem as a tar archive. + + Args: + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + + Returns: + (str): The filesystem tar archive + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.export(self.id, chunk_size) + + def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE, + encode_stream=False): + """ + Retrieve a file or folder from the container in the form of a tar + archive. + + Args: + path (str): Path to the file or folder to retrieve + chunk_size (int): The number of bytes returned by each iteration + of the generator. If ``None``, data will be streamed as it is + received. Default: 2 MB + encode_stream (bool): Determines if data should be encoded + (gzip-compressed) during transmission. Default: False + + Returns: + (tuple): First element is a raw tar data stream. Second element is + a dict containing ``stat`` information on the specified ``path``. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> f = open('./sh_bin.tar', 'wb') + >>> bits, stat = container.get_archive('/bin/sh') + >>> print(stat) + {'name': 'sh', 'size': 1075464, 'mode': 493, + 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} + >>> for chunk in bits: + ... f.write(chunk) + >>> f.close() + """ + return self.client.api.get_archive(self.id, path, + chunk_size, encode_stream) + + def kill(self, signal=None): + """ + Kill or send a signal to the container. + + Args: + signal (str or int): The signal to send. Defaults to ``SIGKILL`` + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + return self.client.api.kill(self.id, signal=signal) + + def logs(self, **kwargs): + """ + Get logs from this container. Similar to the ``docker logs`` command. + + The ``stream`` parameter makes the ``logs`` function return a blocking + generator you can iterate over to retrieve log output as it happens. + + Args: + stdout (bool): Get ``STDOUT``. Default ``True`` + stderr (bool): Get ``STDERR``. Default ``True`` + stream (bool): Stream the response. Default ``False`` + timestamps (bool): Show timestamps. Default ``False`` + tail (str or int): Output specified number of lines at the end of + logs. Either an integer of number of lines or the string + ``all``. Default ``all`` + since (datetime, int, or float): Show logs since a given datetime, + integer epoch (in seconds) or float (in nanoseconds) + follow (bool): Follow log output. Default ``False`` + until (datetime, int, or float): Show logs that occurred before + the given datetime, integer epoch (in seconds), or + float (in nanoseconds) + + Returns: + (generator of bytes or bytes): Logs from the container. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.logs(self.id, **kwargs) + + def pause(self): + """ + Pauses all processes within this container. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.pause(self.id) + + def put_archive(self, path, data): + """ + Insert a file or folder in this container using a tar archive as + source. + + Args: + path (str): Path inside the container where the file(s) will be + extracted. Must exist. + data (bytes or stream): tar data to be extracted + + Returns: + (bool): True if the call succeeds. + + Raises: + :py:class:`~docker.errors.APIError` If an error occurs. + """ + return self.client.api.put_archive(self.id, path, data) + + def remove(self, **kwargs): + """ + Remove this container. Similar to the ``docker rm`` command. + + Args: + v (bool): Remove the volumes associated with the container + link (bool): Remove the specified link and not the underlying + container + force (bool): Force the removal of a running container (uses + ``SIGKILL``) + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_container(self.id, **kwargs) + + def rename(self, name): + """ + Rename this container. Similar to the ``docker rename`` command. + + Args: + name (str): New name for the container + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.rename(self.id, name) + + def resize(self, height, width): + """ + Resize the tty session. + + Args: + height (int): Height of tty session + width (int): Width of tty session + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.resize(self.id, height, width) + + def restart(self, **kwargs): + """ + Restart this container. Similar to the ``docker restart`` command. + + Args: + timeout (int): Number of seconds to try to stop for before killing + the container. Once killed it will then be restarted. Default + is 10 seconds. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.restart(self.id, **kwargs) + + def start(self, **kwargs): + """ + Start this container. Similar to the ``docker start`` command, but + doesn't support attach options. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.start(self.id, **kwargs) + + def stats(self, **kwargs): + """ + Stream statistics for this container. Similar to the + ``docker stats`` command. + + Args: + decode (bool): If set to true, stream will be decoded into dicts + on the fly. Only applicable if ``stream`` is True. + False by default. + stream (bool): If set to false, only the current stats will be + returned instead of a stream. True by default. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.stats(self.id, **kwargs) + + def stop(self, **kwargs): + """ + Stops a container. Similar to the ``docker stop`` command. + + Args: + timeout (int): Timeout in seconds to wait for the container to + stop before sending a ``SIGKILL``. Default: 10 + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.stop(self.id, **kwargs) + + def top(self, **kwargs): + """ + Display the running processes of the container. + + Args: + ps_args (str): An optional arguments passed to ps (e.g. ``aux``) + + Returns: + (str): The output of the top + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.top(self.id, **kwargs) + + def unpause(self): + """ + Unpause all processes within the container. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.unpause(self.id) + + def update(self, **kwargs): + """ + Update resource configuration of the containers. + + Args: + blkio_weight (int): Block IO (relative weight), between 10 and 1000 + cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period + cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota + cpu_shares (int): CPU shares (relative weight) + cpuset_cpus (str): CPUs in which to allow execution + cpuset_mems (str): MEMs in which to allow execution + mem_limit (int or str): Memory limit + mem_reservation (int or str): Memory soft limit + memswap_limit (int or str): Total memory (memory + swap), -1 to + disable swap + kernel_memory (int or str): Kernel memory limit + restart_policy (dict): Restart policy dictionary + + Returns: + (dict): Dictionary containing a ``Warnings`` key. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.update_container(self.id, **kwargs) + + def wait(self, **kwargs): + """ + Block until the container stops, then return its exit code. Similar to + the ``docker wait`` command. + + Args: + timeout (int): Request timeout + condition (str): Wait until a container state reaches the given + condition, either ``not-running`` (default), ``next-exit``, + or ``removed`` + + Returns: + (dict): The API's response as a Python dictionary, including + the container's exit code under the ``StatusCode`` attribute. + + Raises: + :py:class:`requests.exceptions.ReadTimeout` + If the timeout is exceeded. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.wait(self.id, **kwargs) + + +class ContainerCollection(Collection): + model = Container + + def run(self, image, command=None, stdout=True, stderr=False, + remove=False, **kwargs): + """ + Run a container. By default, it will wait for the container to finish + and return its logs, similar to ``docker run``. + + If the ``detach`` argument is ``True``, it will start the container + and immediately return a :py:class:`Container` object, similar to + ``docker run -d``. + + Example: + Run a container and get its output: + + >>> import docker + >>> client = docker.from_env() + >>> client.containers.run('alpine', 'echo hello world') + b'hello world\\n' + + Run a container and detach: + + >>> container = client.containers.run('bfirsh/reticulate-splines', + detach=True) + >>> container.logs() + 'Reticulating spline 1...\\nReticulating spline 2...\\n' + + Args: + image (str): The image to run. + command (str or list): The command to run in the container. + auto_remove (bool): enable auto-removal of the container on daemon + side when the container's process exits. + blkio_weight_device: Block IO weight (relative device weight) in + the form of: ``[{"Path": "device_path", "Weight": weight}]``. + blkio_weight: Block IO weight (relative weight), accepts a weight + value between 10 and 1000. + cap_add (list of str): Add kernel capabilities. For example, + ``["SYS_ADMIN", "MKNOD"]``. + cap_drop (list of str): Drop kernel capabilities. + cgroup_parent (str): Override the default parent cgroup. + cgroupns (str): Override the default cgroup namespace mode for the + container. One of: + - ``private`` the container runs in its own private cgroup + namespace. + - ``host`` use the host system's cgroup namespace. + cpu_count (int): Number of usable CPUs (Windows only). + cpu_percent (int): Usable percentage of the available CPUs + (Windows only). + cpu_period (int): The length of a CPU period in microseconds. + cpu_quota (int): Microseconds of CPU time that the container can + get in a CPU period. + cpu_rt_period (int): Limit CPU real-time period in microseconds. + cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds. + cpu_shares (int): CPU shares (relative weight). + cpuset_cpus (str): CPUs in which to allow execution (``0-3``, + ``0,1``). + cpuset_mems (str): Memory nodes (MEMs) in which to allow execution + (``0-3``, ``0,1``). Only effective on NUMA systems. + detach (bool): Run container in the background and return a + :py:class:`Container` object. + device_cgroup_rules (:py:class:`list`): A list of cgroup rules to + apply to the container. + device_read_bps: Limit read rate (bytes per second) from a device + in the form of: `[{"Path": "device_path", "Rate": rate}]` + device_read_iops: Limit read rate (IO per second) from a device. + device_write_bps: Limit write rate (bytes per second) from a + device. + device_write_iops: Limit write rate (IO per second) from a device. + devices (:py:class:`list`): Expose host devices to the container, + as a list of strings in the form + ``::``. + + For example, ``/dev/sda:/dev/xvda:rwm`` allows the container + to have read-write access to the host's ``/dev/sda`` via a + node named ``/dev/xvda`` inside the container. + device_requests (:py:class:`list`): Expose host resources such as + GPUs to the container, as a list of + :py:class:`docker.types.DeviceRequest` instances. + dns (:py:class:`list`): Set custom DNS servers. + dns_opt (:py:class:`list`): Additional options to be added to the + container's ``resolv.conf`` file. + dns_search (:py:class:`list`): DNS search domains. + domainname (str or list): Set custom DNS search domains. + entrypoint (str or list): The entrypoint for the container. + environment (dict or list): Environment variables to set inside + the container, as a dictionary or a list of strings in the + format ``["SOMEVARIABLE=xxx"]``. + extra_hosts (dict): Additional hostnames to resolve inside the + container, as a mapping of hostname to IP address. + group_add (:py:class:`list`): List of additional group names and/or + IDs that the container process will run as. + healthcheck (dict): Specify a test to perform to check that the + container is healthy. The dict takes the following keys: + + - test (:py:class:`list` or str): Test to perform to determine + container health. Possible values: + + - Empty list: Inherit healthcheck from parent image + - ``["NONE"]``: Disable healthcheck + - ``["CMD", args...]``: exec arguments directly. + - ``["CMD-SHELL", command]``: Run command in the system's + default shell. + + If a string is provided, it will be used as a ``CMD-SHELL`` + command. + - interval (int): The time to wait between checks in + nanoseconds. It should be 0 or at least 1000000 (1 ms). + - timeout (int): The time to wait before considering the check + to have hung. It should be 0 or at least 1000000 (1 ms). + - retries (int): The number of consecutive failures needed to + consider a container as unhealthy. + - start_period (int): Start period for the container to + initialize before starting health-retries countdown in + nanoseconds. It should be 0 or at least 1000000 (1 ms). + hostname (str): Optional hostname for the container. + init (bool): Run an init inside the container that forwards + signals and reaps processes + init_path (str): Path to the docker-init binary + ipc_mode (str): Set the IPC mode for the container. + isolation (str): Isolation technology to use. Default: `None`. + kernel_memory (int or str): Kernel memory limit + labels (dict or list): A dictionary of name-value labels (e.g. + ``{"label1": "value1", "label2": "value2"}``) or a list of + names of labels to set with empty values (e.g. + ``["label1", "label2"]``) + links (dict): Mapping of links using the + ``{'container': 'alias'}`` format. The alias is optional. + Containers declared in this dict will be linked to the new + container using the provided alias. Default: ``None``. + log_config (LogConfig): Logging configuration. + lxc_conf (dict): LXC config. + mac_address (str): MAC address to assign to the container. + mem_limit (int or str): Memory limit. Accepts float values + (which represent the memory limit of the created container in + bytes) or a string with a units identification char + (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is + specified without a units character, bytes are assumed as an + intended unit. + mem_reservation (int or str): Memory soft limit. + mem_swappiness (int): Tune a container's memory swappiness + behavior. Accepts number between 0 and 100. + memswap_limit (str or int): Maximum amount of memory + swap a + container is allowed to consume. + mounts (:py:class:`list`): Specification for mounts to be added to + the container. More powerful alternative to ``volumes``. Each + item in the list is expected to be a + :py:class:`docker.types.Mount` object. + name (str): The name for this container. + nano_cpus (int): CPU quota in units of 1e-9 CPUs. + network (str): Name of the network this container will be connected + to at creation time. You can connect to additional networks + using :py:meth:`Network.connect`. Incompatible with + ``network_mode``. + network_disabled (bool): Disable networking. + network_mode (str): One of: + + - ``bridge`` Create a new network stack for the container on + the bridge network. + - ``none`` No networking for this container. + - ``container:`` Reuse another container's network + stack. + - ``host`` Use the host network stack. + This mode is incompatible with ``ports``. + + Incompatible with ``network``. + networking_config (Dict[str, EndpointConfig]): + Dictionary of EndpointConfig objects for each container network. + The key is the name of the network. + Defaults to ``None``. + + Used in conjuction with ``network``. + + Incompatible with ``network_mode``. + oom_kill_disable (bool): Whether to disable OOM killer. + oom_score_adj (int): An integer value containing the score given + to the container in order to tune OOM killer preferences. + pid_mode (str): If set to ``host``, use the host PID namespace + inside the container. + pids_limit (int): Tune a container's pids limit. Set ``-1`` for + unlimited. + platform (str): Platform in the format ``os[/arch[/variant]]``. + Only used if the method needs to pull the requested image. + ports (dict): Ports to bind inside the container. + + The keys of the dictionary are the ports to bind inside the + container, either as an integer or a string in the form + ``port/protocol``, where the protocol is either ``tcp``, + ``udp``, or ``sctp``. + + The values of the dictionary are the corresponding ports to + open on the host, which can be either: + + - The port number, as an integer. For example, + ``{'2222/tcp': 3333}`` will expose port 2222 inside the + container as port 3333 on the host. + - ``None``, to assign a random host port. For example, + ``{'2222/tcp': None}``. + - A tuple of ``(address, port)`` if you want to specify the + host interface. For example, + ``{'1111/tcp': ('127.0.0.1', 1111)}``. + - A list of integers, if you want to bind multiple host ports + to a single container port. For example, + ``{'1111/tcp': [1234, 4567]}``. + + Incompatible with ``host`` network mode. + privileged (bool): Give extended privileges to this container. + publish_all_ports (bool): Publish all ports to the host. + read_only (bool): Mount the container's root filesystem as read + only. + remove (bool): Remove the container when it has finished running. + Default: ``False``. + restart_policy (dict): Restart the container when it exits. + Configured as a dictionary with keys: + + - ``Name`` One of ``on-failure``, or ``always``. + - ``MaximumRetryCount`` Number of times to restart the + container on failure. + + For example: + ``{"Name": "on-failure", "MaximumRetryCount": 5}`` + + runtime (str): Runtime to use with this container. + security_opt (:py:class:`list`): A list of string values to + customize labels for MLS systems, such as SELinux. + shm_size (str or int): Size of /dev/shm (e.g. ``1G``). + stdin_open (bool): Keep ``STDIN`` open even if not attached. + stdout (bool): Return logs from ``STDOUT`` when ``detach=False``. + Default: ``True``. + stderr (bool): Return logs from ``STDERR`` when ``detach=False``. + Default: ``False``. + stop_signal (str): The stop signal to use to stop the container + (e.g. ``SIGINT``). + storage_opt (dict): Storage driver options per container as a + key-value mapping. + stream (bool): If true and ``detach`` is false, return a log + generator instead of a string. Ignored if ``detach`` is true. + Default: ``False``. + sysctls (dict): Kernel parameters to set in the container. + tmpfs (dict): Temporary filesystems to mount, as a dictionary + mapping a path inside the container to options for that path. + + For example: + + .. code-block:: python + + { + '/mnt/vol2': '', + '/mnt/vol1': 'size=3G,uid=1000' + } + + tty (bool): Allocate a pseudo-TTY. + ulimits (:py:class:`list`): Ulimits to set inside the container, + as a list of :py:class:`docker.types.Ulimit` instances. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. + user (str or int): Username or UID to run commands as inside the + container. + userns_mode (str): Sets the user namespace mode for the container + when user namespace remapping option is enabled. Supported + values are: ``host`` + uts_mode (str): Sets the UTS namespace mode for the container. + Supported values are: ``host`` + version (str): The version of the API to use. Set to ``auto`` to + automatically detect the server's version. Default: ``1.35`` + volume_driver (str): The name of a volume driver/plugin. + volumes (dict or list): A dictionary to configure volumes mounted + inside the container. The key is either the host path or a + volume name, and the value is a dictionary with the keys: + + - ``bind`` The path to mount the volume inside the container + - ``mode`` Either ``rw`` to mount the volume read/write, or + ``ro`` to mount it read-only. + + For example: + + .. code-block:: python + + {'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'}, + '/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}} + + Or a list of strings which each one of its elements specifies a + mount volume. + + For example: + + .. code-block:: python + + ['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1'] + + volumes_from (:py:class:`list`): List of container names or IDs to + get volumes from. + working_dir (str): Path to the working directory. + + Returns: + The container logs, either ``STDOUT``, ``STDERR``, or both, + depending on the value of the ``stdout`` and ``stderr`` arguments. + + ``STDOUT`` and ``STDERR`` may be read only if either ``json-file`` + or ``journald`` logging driver used. Thus, if you are using none of + these drivers, a ``None`` object is returned instead. See the + `Engine API documentation + `_ + for full details. + + If ``detach`` is ``True``, a :py:class:`Container` object is + returned instead. + + Raises: + :py:class:`docker.errors.ContainerError` + If the container exits with a non-zero exit code and + ``detach`` is ``False``. + :py:class:`docker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if isinstance(image, Image): + image = image.id + stream = kwargs.pop('stream', False) + detach = kwargs.pop('detach', False) + platform = kwargs.get('platform', None) + + if detach and remove: + if version_gte(self.client.api._version, '1.25'): + kwargs["auto_remove"] = True + else: + raise RuntimeError("The options 'detach' and 'remove' cannot " + "be used together in api versions < 1.25.") + + if kwargs.get('network') and kwargs.get('network_mode'): + raise RuntimeError( + 'The options "network" and "network_mode" can not be used ' + 'together.' + ) + + if kwargs.get('networking_config') and not kwargs.get('network'): + raise RuntimeError( + 'The option "networking_config" can not be used ' + 'without "network".' + ) + + try: + container = self.create(image=image, command=command, + detach=detach, **kwargs) + except ImageNotFound: + self.client.images.pull(image, platform=platform) + container = self.create(image=image, command=command, + detach=detach, **kwargs) + + container.start() + + if detach: + return container + + logging_driver = container.attrs['HostConfig']['LogConfig']['Type'] + + out = None + if logging_driver == 'json-file' or logging_driver == 'journald': + out = container.logs( + stdout=stdout, stderr=stderr, stream=True, follow=True + ) + + exit_status = container.wait()['StatusCode'] + if exit_status != 0: + out = None + if not kwargs.get('auto_remove'): + out = container.logs(stdout=False, stderr=True) + + if remove: + container.remove() + if exit_status != 0: + raise ContainerError( + container, exit_status, command, image, out + ) + + if stream or out is None: + return out + return b''.join(out) + + def create(self, image, command=None, **kwargs): + """ + Create a container without starting it. Similar to ``docker create``. + + Takes the same arguments as :py:meth:`run`, except for ``stdout``, + ``stderr``, and ``remove``. + + Returns: + A :py:class:`Container` object. + + Raises: + :py:class:`docker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if isinstance(image, Image): + image = image.id + kwargs['image'] = image + kwargs['command'] = command + kwargs['version'] = self.client.api._version + create_kwargs = _create_container_args(kwargs) + resp = self.client.api.create_container(**create_kwargs) + return self.get(resp['Id']) + + def get(self, container_id): + """ + Get a container by name or ID. + + Args: + container_id (str): Container name or ID. + + Returns: + A :py:class:`Container` object. + + Raises: + :py:class:`docker.errors.NotFound` + If the container does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.inspect_container(container_id) + return self.prepare_model(resp) + + def list(self, all=False, before=None, filters=None, limit=-1, since=None, + sparse=False, ignore_removed=False): + """ + List containers. Similar to the ``docker ps`` command. + + Args: + all (bool): Show all containers. Only running containers are shown + by default + since (str): Show only containers created since Id or Name, include + non-running ones + before (str): Show only container created before Id or Name, + include non-running ones + limit (int): Show `limit` last created containers, include + non-running ones + filters (dict): Filters to be processed on the image list. + Available filters: + + - `exited` (int): Only containers with specified exit code + - `status` (str): One of ``restarting``, ``running``, + ``paused``, ``exited`` + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. + - `id` (str): The id of the container. + - `name` (str): The name of the container. + - `ancestor` (str): Filter by container ancestor. Format of + ``[:tag]``, ````, or + ````. + - `before` (str): Only containers created before a particular + container. Give the container name or id. + - `since` (str): Only containers created after a particular + container. Give container name or id. + + A comprehensive list can be found in the documentation for + `docker ps + `_. + + sparse (bool): Do not inspect containers. Returns partial + information, but guaranteed not to block. Use + :py:meth:`Container.reload` on resulting objects to retrieve + all attributes. Default: ``False`` + ignore_removed (bool): Ignore failures due to missing containers + when attempting to inspect containers from the original list. + Set to ``True`` if race conditions are likely. Has no effect + if ``sparse=True``. Default: ``False`` + + Returns: + (list of :py:class:`Container`) + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.containers(all=all, before=before, + filters=filters, limit=limit, + since=since) + if sparse: + return [self.prepare_model(r) for r in resp] + else: + containers = [] + for r in resp: + try: + containers.append(self.get(r['Id'])) + # a container may have been removed while iterating + except NotFound: + if not ignore_removed: + raise + return containers + + def prune(self, filters=None): + return self.client.api.prune_containers(filters=filters) + + prune.__doc__ = APIClient.prune_containers.__doc__ + + +# kwargs to copy straight from run to create +RUN_CREATE_KWARGS = [ + 'command', + 'detach', + 'domainname', + 'entrypoint', + 'environment', + 'healthcheck', + 'hostname', + 'image', + 'labels', + 'mac_address', + 'name', + 'network_disabled', + 'platform', + 'stdin_open', + 'stop_signal', + 'tty', + 'use_config_proxy', + 'user', + 'working_dir', +] + +# kwargs to copy straight from run to host_config +RUN_HOST_CONFIG_KWARGS = [ + 'auto_remove', + 'blkio_weight_device', + 'blkio_weight', + 'cap_add', + 'cap_drop', + 'cgroup_parent', + 'cgroupns', + 'cpu_count', + 'cpu_percent', + 'cpu_period', + 'cpu_quota', + 'cpu_shares', + 'cpuset_cpus', + 'cpuset_mems', + 'cpu_rt_period', + 'cpu_rt_runtime', + 'device_cgroup_rules', + 'device_read_bps', + 'device_read_iops', + 'device_write_bps', + 'device_write_iops', + 'devices', + 'device_requests', + 'dns_opt', + 'dns_search', + 'dns', + 'extra_hosts', + 'group_add', + 'init', + 'init_path', + 'ipc_mode', + 'isolation', + 'kernel_memory', + 'links', + 'log_config', + 'lxc_conf', + 'mem_limit', + 'mem_reservation', + 'mem_swappiness', + 'memswap_limit', + 'mounts', + 'nano_cpus', + 'network_mode', + 'oom_kill_disable', + 'oom_score_adj', + 'pid_mode', + 'pids_limit', + 'privileged', + 'publish_all_ports', + 'read_only', + 'restart_policy', + 'security_opt', + 'shm_size', + 'storage_opt', + 'sysctls', + 'tmpfs', + 'ulimits', + 'userns_mode', + 'uts_mode', + 'version', + 'volume_driver', + 'volumes_from', + 'runtime' +] + + +def _create_container_args(kwargs): + """ + Convert arguments to create() to arguments to create_container(). + """ + # Copy over kwargs which can be copied directly + create_kwargs = {} + for key in copy.copy(kwargs): + if key in RUN_CREATE_KWARGS: + create_kwargs[key] = kwargs.pop(key) + host_config_kwargs = {} + for key in copy.copy(kwargs): + if key in RUN_HOST_CONFIG_KWARGS: + host_config_kwargs[key] = kwargs.pop(key) + + # Process kwargs which are split over both create and host_config + ports = kwargs.pop('ports', {}) + if ports: + host_config_kwargs['port_bindings'] = ports + + volumes = kwargs.pop('volumes', {}) + if volumes: + host_config_kwargs['binds'] = volumes + + network = kwargs.pop('network', None) + networking_config = kwargs.pop('networking_config', None) + if network: + if networking_config: + # Sanity check: check if the network is defined in the + # networking config dict, otherwise switch to None + if network not in networking_config: + networking_config = None + + create_kwargs['networking_config'] = NetworkingConfig( + networking_config + ) if networking_config else {network: None} + host_config_kwargs['network_mode'] = network + + # All kwargs should have been consumed by this point, so raise + # error if any are left + if kwargs: + raise create_unexpected_kwargs_error('run', kwargs) + + create_kwargs['host_config'] = HostConfig(**host_config_kwargs) + + # Fill in any kwargs which need processing by create_host_config first + port_bindings = create_kwargs['host_config'].get('PortBindings') + if port_bindings: + # sort to make consistent for tests + create_kwargs['ports'] = [tuple(p.split('/', 1)) + for p in sorted(port_bindings.keys())] + if volumes: + if isinstance(volumes, dict): + create_kwargs['volumes'] = [ + v.get('bind') for v in volumes.values() + ] + else: + create_kwargs['volumes'] = [ + _host_volume_from_bind(v) for v in volumes + ] + return create_kwargs + + +def _host_volume_from_bind(bind): + drive, rest = ntpath.splitdrive(bind) + bits = rest.split(':', 1) + if len(bits) == 1 or bits[1] in ('ro', 'rw'): + return drive + bits[0] + elif bits[1].endswith(':ro') or bits[1].endswith(':rw'): + return bits[1][:-3] + else: + return bits[1] + + +ExecResult = namedtuple('ExecResult', 'exit_code,output') +""" A result of Container.exec_run with the properties ``exit_code`` and + ``output``. """ diff --git a/docker/models/images.py b/docker/models/images.py new file mode 100644 index 0000000000..0e8cce3f82 --- /dev/null +++ b/docker/models/images.py @@ -0,0 +1,505 @@ +import itertools +import re +import warnings + +from ..api import APIClient +from ..constants import DEFAULT_DATA_CHUNK_SIZE +from ..errors import BuildError, ImageLoadError, InvalidArgument +from ..utils import parse_repository_tag +from ..utils.json_stream import json_stream +from .resource import Collection, Model + + +class Image(Model): + """ + An image on the server. + """ + def __repr__(self): + tag_str = "', '".join(self.tags) + return f"<{self.__class__.__name__}: '{tag_str}'>" + + @property + def labels(self): + """ + The labels of an image as dictionary. + """ + result = self.attrs['Config'].get('Labels') + return result or {} + + @property + def short_id(self): + """ + The ID of the image truncated to 12 characters, plus the ``sha256:`` + prefix. + """ + if self.id.startswith('sha256:'): + return self.id[:19] + return self.id[:12] + + @property + def tags(self): + """ + The image's tags. + """ + tags = self.attrs.get('RepoTags') + if tags is None: + tags = [] + return [tag for tag in tags if tag != ':'] + + def history(self): + """ + Show the history of an image. + + Returns: + (list): The history of the image. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.history(self.id) + + def remove(self, force=False, noprune=False): + """ + Remove this image. + + Args: + force (bool): Force removal of the image + noprune (bool): Do not delete untagged parents + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_image( + self.id, + force=force, + noprune=noprune, + ) + + def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False): + """ + Get a tarball of an image. Similar to the ``docker save`` command. + + Args: + chunk_size (int): The generator will return up to that much data + per iteration, but may return less. If ``None``, data will be + streamed as it is received. Default: 2 MB + named (str or bool): If ``False`` (default), the tarball will not + retain repository and tag information for this image. If set + to ``True``, the first tag in the :py:attr:`~tags` list will + be used to identify the image. Alternatively, any element of + the :py:attr:`~tags` list can be used as an argument to use + that specific tag as the saved identifier. + + Returns: + (generator): A stream of raw archive data. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> image = cli.images.get("busybox:latest") + >>> f = open('/tmp/busybox-latest.tar', 'wb') + >>> for chunk in image.save(): + >>> f.write(chunk) + >>> f.close() + """ + img = self.id + if named: + img = self.tags[0] if self.tags else img + if isinstance(named, str): + if named not in self.tags: + raise InvalidArgument( + f"{named} is not a valid tag for this image" + ) + img = named + + return self.client.api.get_image(img, chunk_size) + + def tag(self, repository, tag=None, **kwargs): + """ + Tag this image into a repository. Similar to the ``docker tag`` + command. + + Args: + repository (str): The repository to set for the tag + tag (str): The tag name + force (bool): Force + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Returns: + (bool): ``True`` if successful + """ + return self.client.api.tag(self.id, repository, tag=tag, **kwargs) + + +class RegistryData(Model): + """ + Image metadata stored on the registry, including available platforms. + """ + def __init__(self, image_name, *args, **kwargs): + super().__init__(*args, **kwargs) + self.image_name = image_name + + @property + def id(self): + """ + The ID of the object. + """ + return self.attrs['Descriptor']['digest'] + + @property + def short_id(self): + """ + The ID of the image truncated to 12 characters, plus the ``sha256:`` + prefix. + """ + return self.id[:19] + + def pull(self, platform=None): + """ + Pull the image digest. + + Args: + platform (str): The platform to pull the image for. + Default: ``None`` + + Returns: + (:py:class:`Image`): A reference to the pulled image. + """ + repository, _ = parse_repository_tag(self.image_name) + return self.collection.pull(repository, tag=self.id, platform=platform) + + def has_platform(self, platform): + """ + Check whether the given platform identifier is available for this + digest. + + Args: + platform (str or dict): A string using the ``os[/arch[/variant]]`` + format, or a platform dictionary. + + Returns: + (bool): ``True`` if the platform is recognized as available, + ``False`` otherwise. + + Raises: + :py:class:`docker.errors.InvalidArgument` + If the platform argument is not a valid descriptor. + """ + if platform and not isinstance(platform, dict): + parts = platform.split('/') + if len(parts) > 3 or len(parts) < 1: + raise InvalidArgument( + f'"{platform}" is not a valid platform descriptor' + ) + platform = {'os': parts[0]} + if len(parts) > 2: + platform['variant'] = parts[2] + if len(parts) > 1: + platform['architecture'] = parts[1] + return normalize_platform( + platform, self.client.version() + ) in self.attrs['Platforms'] + + def reload(self): + self.attrs = self.client.api.inspect_distribution(self.image_name) + + reload.__doc__ = Model.reload.__doc__ + + +class ImageCollection(Collection): + model = Image + + def build(self, **kwargs): + """ + Build an image and return it. Similar to the ``docker build`` + command. Either ``path`` or ``fileobj`` must be set. + + If you already have a tar file for the Docker build context (including + a Dockerfile), pass a readable file-like object to ``fileobj`` + and also pass ``custom_context=True``. If the stream is also + compressed, set ``encoding`` to the correct value (e.g ``gzip``). + + If you want to get the raw output of the build, use the + :py:meth:`~docker.api.build.BuildApiMixin.build` method in the + low-level API. + + Args: + path (str): Path to the directory containing the Dockerfile + fileobj: A file object to use as the Dockerfile. (Or a file-like + object) + tag (str): A tag to add to the final image + quiet (bool): Whether to return the status + nocache (bool): Don't use the cache when set to ``True`` + rm (bool): Remove intermediate containers. The ``docker build`` + command now defaults to ``--rm=true``, but we have kept the old + default of `False` to preserve backward compatibility + timeout (int): HTTP timeout + custom_context (bool): Optional if using ``fileobj`` + encoding (str): The encoding for a stream. Set to ``gzip`` for + compressing + pull (bool): Downloads any updates to the FROM image in Dockerfiles + forcerm (bool): Always remove intermediate containers, even after + unsuccessful builds + dockerfile (str): path within the build context to the Dockerfile + buildargs (dict): A dictionary of build arguments + container_limits (dict): A dictionary of limits applied to each + container created by the build process. Valid keys: + + - memory (int): set memory limit for build + - memswap (int): Total memory (memory + swap), -1 to disable + swap + - cpushares (int): CPU shares (relative weight) + - cpusetcpus (str): CPUs in which to allow execution, e.g., + ``"0-3"``, ``"0,1"`` + shmsize (int): Size of `/dev/shm` in bytes. The size must be + greater than 0. If omitted the system uses 64MB + labels (dict): A dictionary of labels to set on the image + cache_from (list): A list of images used for build cache + resolution + target (str): Name of the build-stage to build in a multi-stage + Dockerfile + network_mode (str): networking mode for the run commands during + build + squash (bool): Squash the resulting images layers into a + single layer. + extra_hosts (dict): Extra hosts to add to /etc/hosts in building + containers, as a mapping of hostname to IP address. + platform (str): Platform in the format ``os[/arch[/variant]]``. + isolation (str): Isolation technology used during build. + Default: `None`. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. + + Returns: + (tuple): The first item is the :py:class:`Image` object for the + image that was built. The second item is a generator of the + build logs as JSON-decoded objects. + + Raises: + :py:class:`docker.errors.BuildError` + If there is an error during the build. + :py:class:`docker.errors.APIError` + If the server returns any other error. + ``TypeError`` + If neither ``path`` nor ``fileobj`` is specified. + """ + resp = self.client.api.build(**kwargs) + if isinstance(resp, str): + return self.get(resp) + last_event = None + image_id = None + result_stream, internal_stream = itertools.tee(json_stream(resp)) + for chunk in internal_stream: + if 'error' in chunk: + raise BuildError(chunk['error'], result_stream) + if 'stream' in chunk: + match = re.search( + r'(^Successfully built |sha256:)([0-9a-f]+)$', + chunk['stream'] + ) + if match: + image_id = match.group(2) + last_event = chunk + if image_id: + return (self.get(image_id), result_stream) + raise BuildError(last_event or 'Unknown', result_stream) + + def get(self, name): + """ + Gets an image. + + Args: + name (str): The name of the image. + + Returns: + (:py:class:`Image`): The image. + + Raises: + :py:class:`docker.errors.ImageNotFound` + If the image does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(self.client.api.inspect_image(name)) + + def get_registry_data(self, name, auth_config=None): + """ + Gets the registry data for an image. + + Args: + name (str): The name of the image. + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. + + Returns: + (:py:class:`RegistryData`): The data object. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return RegistryData( + image_name=name, + attrs=self.client.api.inspect_distribution(name, auth_config), + client=self.client, + collection=self, + ) + + def list(self, name=None, all=False, filters=None): + """ + List images on the server. + + Args: + name (str): Only show images belonging to the repository ``name`` + all (bool): Show intermediate image layers. By default, these are + filtered out. + filters (dict): Filters to be processed on the image list. + Available filters: + - ``dangling`` (bool) + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. + + Returns: + (list of :py:class:`Image`): The images. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.images(name=name, all=all, filters=filters) + return [self.get(r["Id"]) for r in resp] + + def load(self, data): + """ + Load an image that was previously saved using + :py:meth:`~docker.models.images.Image.save` (or ``docker save``). + Similar to ``docker load``. + + Args: + data (binary): Image data to be loaded. + + Returns: + (list of :py:class:`Image`): The images. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.load_image(data) + images = [] + for chunk in resp: + if 'stream' in chunk: + match = re.search( + r'(^Loaded image ID: |^Loaded image: )(.+)$', + chunk['stream'] + ) + if match: + image_id = match.group(2) + images.append(image_id) + if 'errorDetail' in chunk: + raise ImageLoadError(chunk['errorDetail']['message']) + + return [self.get(i) for i in images] + + def pull(self, repository, tag=None, all_tags=False, **kwargs): + """ + Pull an image of the given name and return it. Similar to the + ``docker pull`` command. + If ``tag`` is ``None`` or empty, it is set to ``latest``. + If ``all_tags`` is set, the ``tag`` parameter is ignored and all image + tags will be pulled. + + If you want to get the raw pull output, use the + :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the + low-level API. + + Args: + repository (str): The repository to pull + tag (str): The tag to pull + auth_config (dict): Override the credentials that are found in the + config for this request. ``auth_config`` should contain the + ``username`` and ``password`` keys to be valid. + platform (str): Platform in the format ``os[/arch[/variant]]`` + all_tags (bool): Pull all image tags + + Returns: + (:py:class:`Image` or list): The image that has been pulled. + If ``all_tags`` is True, the method will return a list + of :py:class:`Image` objects belonging to this repository. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> # Pull the image tagged `latest` in the busybox repo + >>> image = client.images.pull('busybox') + + >>> # Pull all tags in the busybox repo + >>> images = client.images.pull('busybox', all_tags=True) + """ + repository, image_tag = parse_repository_tag(repository) + tag = tag or image_tag or 'latest' + + if 'stream' in kwargs: + warnings.warn( + '`stream` is not a valid parameter for this method' + ' and will be overridden', + stacklevel=1, + ) + del kwargs['stream'] + + pull_log = self.client.api.pull( + repository, tag=tag, stream=True, all_tags=all_tags, **kwargs + ) + for _ in pull_log: + # We don't do anything with the logs, but we need + # to keep the connection alive and wait for the image + # to be pulled. + pass + if not all_tags: + sep = '@' if tag.startswith('sha256:') else ':' + return self.get(f'{repository}{sep}{tag}') + return self.list(repository) + + def push(self, repository, tag=None, **kwargs): + return self.client.api.push(repository, tag=tag, **kwargs) + push.__doc__ = APIClient.push.__doc__ + + def remove(self, *args, **kwargs): + self.client.api.remove_image(*args, **kwargs) + remove.__doc__ = APIClient.remove_image.__doc__ + + def search(self, *args, **kwargs): + return self.client.api.search(*args, **kwargs) + search.__doc__ = APIClient.search.__doc__ + + def prune(self, filters=None): + return self.client.api.prune_images(filters=filters) + prune.__doc__ = APIClient.prune_images.__doc__ + + def prune_builds(self, *args, **kwargs): + return self.client.api.prune_builds(*args, **kwargs) + prune_builds.__doc__ = APIClient.prune_builds.__doc__ + + +def normalize_platform(platform, engine_info): + if platform is None: + platform = {} + if 'os' not in platform: + platform['os'] = engine_info['Os'] + if 'architecture' not in platform: + platform['architecture'] = engine_info['Arch'] + return platform diff --git a/docker/models/networks.py b/docker/models/networks.py new file mode 100644 index 0000000000..9b3ed7829c --- /dev/null +++ b/docker/models/networks.py @@ -0,0 +1,218 @@ +from ..api import APIClient +from ..utils import version_gte +from .containers import Container +from .resource import Collection, Model + + +class Network(Model): + """ + A Docker network. + """ + @property + def name(self): + """ + The name of the network. + """ + return self.attrs.get('Name') + + @property + def containers(self): + """ + The containers that are connected to the network, as a list of + :py:class:`~docker.models.containers.Container` objects. + """ + return [ + self.client.containers.get(cid) for cid in + (self.attrs.get('Containers') or {}).keys() + ] + + def connect(self, container, *args, **kwargs): + """ + Connect a container to this network. + + Args: + container (str): Container to connect to this network, as either + an ID, name, or :py:class:`~docker.models.containers.Container` + object. + aliases (:py:class:`list`): A list of aliases for this endpoint. + Names in that list can be used within the network to reach the + container. Defaults to ``None``. + links (:py:class:`list`): A list of links for this endpoint. + Containers declared in this list will be linkedto this + container. Defaults to ``None``. + ipv4_address (str): The IP address of this container on the + network, using the IPv4 protocol. Defaults to ``None``. + ipv6_address (str): The IP address of this container on the + network, using the IPv6 protocol. Defaults to ``None``. + link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) + addresses. + driver_opt (dict): A dictionary of options to provide to the + network driver. Defaults to ``None``. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if isinstance(container, Container): + container = container.id + return self.client.api.connect_container_to_network( + container, self.id, *args, **kwargs + ) + + def disconnect(self, container, *args, **kwargs): + """ + Disconnect a container from this network. + + Args: + container (str): Container to disconnect from this network, as + either an ID, name, or + :py:class:`~docker.models.containers.Container` object. + force (bool): Force the container to disconnect from a network. + Default: ``False`` + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if isinstance(container, Container): + container = container.id + return self.client.api.disconnect_container_from_network( + container, self.id, *args, **kwargs + ) + + def remove(self): + """ + Remove this network. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_network(self.id) + + +class NetworkCollection(Collection): + """ + Networks on the Docker server. + """ + model = Network + + def create(self, name, *args, **kwargs): + """ + Create a network. Similar to the ``docker network create``. + + Args: + name (str): Name of the network + driver (str): Name of the driver used to create the network + options (dict): Driver options as a key-value dictionary + ipam (IPAMConfig): Optional custom IP scheme for the network. + check_duplicate (bool): Request daemon to check for networks with + same name. Default: ``None``. + internal (bool): Restrict external access to the network. Default + ``False``. + labels (dict): Map of labels to set on the network. Default + ``None``. + enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``. + attachable (bool): If enabled, and the network is in the global + scope, non-service containers on worker nodes will be able to + connect to the network. + scope (str): Specify the network's scope (``local``, ``global`` or + ``swarm``) + ingress (bool): If set, create an ingress network which provides + the routing-mesh in swarm mode. + + Returns: + (:py:class:`Network`): The network that was created. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + A network using the bridge driver: + + >>> client.networks.create("network1", driver="bridge") + + You can also create more advanced networks with custom IPAM + configurations. For example, setting the subnet to + ``192.168.52.0/24`` and gateway address to ``192.168.52.254``. + + .. code-block:: python + + >>> ipam_pool = docker.types.IPAMPool( + subnet='192.168.52.0/24', + gateway='192.168.52.254' + ) + >>> ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + >>> client.networks.create( + "network1", + driver="bridge", + ipam=ipam_config + ) + + """ + resp = self.client.api.create_network(name, *args, **kwargs) + return self.get(resp['Id']) + + def get(self, network_id, *args, **kwargs): + """ + Get a network by its ID. + + Args: + network_id (str): The ID of the network. + verbose (bool): Retrieve the service details across the cluster in + swarm mode. + scope (str): Filter the network by scope (``swarm``, ``global`` + or ``local``). + + Returns: + (:py:class:`Network`) The network. + + Raises: + :py:class:`docker.errors.NotFound` + If the network does not exist. + + :py:class:`docker.errors.APIError` + If the server returns an error. + + """ + return self.prepare_model( + self.client.api.inspect_network(network_id, *args, **kwargs) + ) + + def list(self, *args, **kwargs): + """ + List networks. Similar to the ``docker network ls`` command. + + Args: + names (:py:class:`list`): List of names to filter by. + ids (:py:class:`list`): List of ids to filter by. + filters (dict): Filters to be processed on the network list. + Available filters: + - ``driver=[]`` Matches a network's driver. + - `label` (str|list): format either ``"key"``, ``"key=value"`` + or a list of such. + - ``type=["custom"|"builtin"]`` Filters networks by type. + greedy (bool): Fetch more details for each network individually. + You might want this to get the containers attached to them. + + Returns: + (list of :py:class:`Network`) The networks on the server. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + greedy = kwargs.pop('greedy', False) + resp = self.client.api.networks(*args, **kwargs) + networks = [self.prepare_model(item) for item in resp] + if greedy and version_gte(self.client.api._version, '1.28'): + for net in networks: + net.reload() + return networks + + def prune(self, filters=None): + return self.client.api.prune_networks(filters=filters) + prune.__doc__ = APIClient.prune_networks.__doc__ diff --git a/docker/models/nodes.py b/docker/models/nodes.py new file mode 100644 index 0000000000..2fa480c544 --- /dev/null +++ b/docker/models/nodes.py @@ -0,0 +1,107 @@ +from .resource import Collection, Model + + +class Node(Model): + """A node in a swarm.""" + id_attribute = 'ID' + + @property + def version(self): + """ + The version number of the service. If this is not the same as the + server, the :py:meth:`update` function will not work and you will + need to call :py:meth:`reload` before calling it again. + """ + return self.attrs.get('Version').get('Index') + + def update(self, node_spec): + """ + Update the node's configuration. + + Args: + node_spec (dict): Configuration settings to update. Any values + not provided will be removed. Default: ``None`` + + Returns: + `True` if the request went through. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> node_spec = {'Availability': 'active', + 'Name': 'node-name', + 'Role': 'manager', + 'Labels': {'foo': 'bar'} + } + >>> node.update(node_spec) + + """ + return self.client.api.update_node(self.id, self.version, node_spec) + + def remove(self, force=False): + """ + Remove this node from the swarm. + + Args: + force (bool): Force remove an active node. Default: `False` + + Returns: + `True` if the request was successful. + + Raises: + :py:class:`docker.errors.NotFound` + If the node doesn't exist in the swarm. + + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_node(self.id, force=force) + + +class NodeCollection(Collection): + """Nodes on the Docker server.""" + model = Node + + def get(self, node_id): + """ + Get a node. + + Args: + node_id (string): ID of the node to be inspected. + + Returns: + A :py:class:`Node` object. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(self.client.api.inspect_node(node_id)) + + def list(self, *args, **kwargs): + """ + List swarm nodes. + + Args: + filters (dict): Filters to process on the nodes list. Valid + filters: ``id``, ``name``, ``membership`` and ``role``. + Default: ``None`` + + Returns: + A list of :py:class:`Node` objects. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.nodes.list(filters={'role': 'manager'}) + """ + return [ + self.prepare_model(n) + for n in self.client.api.nodes(*args, **kwargs) + ] diff --git a/docker/models/plugins.py b/docker/models/plugins.py new file mode 100644 index 0000000000..85d768c935 --- /dev/null +++ b/docker/models/plugins.py @@ -0,0 +1,206 @@ +from .. import errors +from .resource import Collection, Model + + +class Plugin(Model): + """ + A plugin on the server. + """ + def __repr__(self): + return f"<{self.__class__.__name__}: '{self.name}'>" + + @property + def name(self): + """ + The plugin's name. + """ + return self.attrs.get('Name') + + @property + def enabled(self): + """ + Whether the plugin is enabled. + """ + return self.attrs.get('Enabled') + + @property + def settings(self): + """ + A dictionary representing the plugin's configuration. + """ + return self.attrs.get('Settings') + + def configure(self, options): + """ + Update the plugin's settings. + + Args: + options (dict): A key-value mapping of options. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + self.client.api.configure_plugin(self.name, options) + self.reload() + + def disable(self, force=False): + """ + Disable the plugin. + + Args: + force (bool): Force disable. Default: False + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + + self.client.api.disable_plugin(self.name, force) + self.reload() + + def enable(self, timeout=0): + """ + Enable the plugin. + + Args: + timeout (int): Timeout in seconds. Default: 0 + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + self.client.api.enable_plugin(self.name, timeout) + self.reload() + + def push(self): + """ + Push the plugin to a remote registry. + + Returns: + A dict iterator streaming the status of the upload. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.push_plugin(self.name) + + def remove(self, force=False): + """ + Remove the plugin from the server. + + Args: + force (bool): Remove even if the plugin is enabled. + Default: False + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_plugin(self.name, force=force) + + def upgrade(self, remote=None): + """ + Upgrade the plugin. + + Args: + remote (string): Remote reference to upgrade to. The + ``:latest`` tag is optional and is the default if omitted. + Default: this plugin's name. + + Returns: + A generator streaming the decoded API logs + """ + if self.enabled: + raise errors.DockerError( + 'Plugin must be disabled before upgrading.' + ) + + if remote is None: + remote = self.name + privileges = self.client.api.plugin_privileges(remote) + yield from self.client.api.upgrade_plugin( + self.name, + remote, + privileges, + ) + self.reload() + + +class PluginCollection(Collection): + model = Plugin + + def create(self, name, plugin_data_dir, gzip=False): + """ + Create a new plugin. + + Args: + name (string): The name of the plugin. The ``:latest`` tag is + optional, and is the default if omitted. + plugin_data_dir (string): Path to the plugin data directory. + Plugin data directory must contain the ``config.json`` + manifest file and the ``rootfs`` directory. + gzip (bool): Compress the context using gzip. Default: False + + Returns: + (:py:class:`Plugin`): The newly created plugin. + """ + self.client.api.create_plugin(name, plugin_data_dir, gzip) + return self.get(name) + + def get(self, name): + """ + Gets a plugin. + + Args: + name (str): The name of the plugin. + + Returns: + (:py:class:`Plugin`): The plugin. + + Raises: + :py:class:`docker.errors.NotFound` If the plugin does not + exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(self.client.api.inspect_plugin(name)) + + def install(self, remote_name, local_name=None): + """ + Pull and install a plugin. + + Args: + remote_name (string): Remote reference for the plugin to + install. The ``:latest`` tag is optional, and is the + default if omitted. + local_name (string): Local name for the pulled plugin. + The ``:latest`` tag is optional, and is the default if + omitted. Optional. + + Returns: + (:py:class:`Plugin`): The installed plugin + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + privileges = self.client.api.plugin_privileges(remote_name) + it = self.client.api.pull_plugin(remote_name, privileges, local_name) + for _data in it: + pass + return self.get(local_name or remote_name) + + def list(self): + """ + List plugins installed on the server. + + Returns: + (list of :py:class:`Plugin`): The plugins. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.plugins() + return [self.prepare_model(r) for r in resp] diff --git a/docker/models/resource.py b/docker/models/resource.py new file mode 100644 index 0000000000..d3a35e84be --- /dev/null +++ b/docker/models/resource.py @@ -0,0 +1,92 @@ +class Model: + """ + A base class for representing a single object on the server. + """ + id_attribute = 'Id' + + def __init__(self, attrs=None, client=None, collection=None): + #: A client pointing at the server that this object is on. + self.client = client + + #: The collection that this model is part of. + self.collection = collection + + #: The raw representation of this object from the API + self.attrs = attrs + if self.attrs is None: + self.attrs = {} + + def __repr__(self): + return f"<{self.__class__.__name__}: {self.short_id}>" + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.id == other.id + + def __hash__(self): + return hash(f"{self.__class__.__name__}:{self.id}") + + @property + def id(self): + """ + The ID of the object. + """ + return self.attrs.get(self.id_attribute) + + @property + def short_id(self): + """ + The ID of the object, truncated to 12 characters. + """ + return self.id[:12] + + def reload(self): + """ + Load this object from the server again and update ``attrs`` with the + new data. + """ + new_model = self.collection.get(self.id) + self.attrs = new_model.attrs + + +class Collection: + """ + A base class for representing all objects of a particular type on the + server. + """ + + #: The type of object this collection represents, set by subclasses + model = None + + def __init__(self, client=None): + #: The client pointing at the server that this collection of objects + #: is on. + self.client = client + + def __call__(self, *args, **kwargs): + raise TypeError( + f"'{self.__class__.__name__}' object is not callable. " + "You might be trying to use the old (pre-2.0) API - " + "use docker.APIClient if so." + ) + + def list(self): + raise NotImplementedError + + def get(self, key): + raise NotImplementedError + + def create(self, attrs=None): + raise NotImplementedError + + def prepare_model(self, attrs): + """ + Create a model from a set of attributes. + """ + if isinstance(attrs, Model): + attrs.client = self.client + attrs.collection = self + return attrs + elif isinstance(attrs, dict): + return self.model(attrs=attrs, client=self.client, collection=self) + else: + raise Exception(f"Can't create {self.model.__name__} from {attrs}") diff --git a/docker/models/secrets.py b/docker/models/secrets.py new file mode 100644 index 0000000000..38c48dc7eb --- /dev/null +++ b/docker/models/secrets.py @@ -0,0 +1,70 @@ +from ..api import APIClient +from .resource import Collection, Model + + +class Secret(Model): + """A secret.""" + id_attribute = 'ID' + + def __repr__(self): + return f"<{self.__class__.__name__}: '{self.name}'>" + + @property + def name(self): + return self.attrs['Spec']['Name'] + + def remove(self): + """ + Remove this secret. + + Raises: + :py:class:`docker.errors.APIError` + If secret failed to remove. + """ + return self.client.api.remove_secret(self.id) + + +class SecretCollection(Collection): + """Secrets on the Docker server.""" + model = Secret + + def create(self, **kwargs): + obj = self.client.api.create_secret(**kwargs) + obj.setdefault("Spec", {})["Name"] = kwargs.get("name") + return self.prepare_model(obj) + create.__doc__ = APIClient.create_secret.__doc__ + + def get(self, secret_id): + """ + Get a secret. + + Args: + secret_id (str): Secret ID. + + Returns: + (:py:class:`Secret`): The secret. + + Raises: + :py:class:`docker.errors.NotFound` + If the secret does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(self.client.api.inspect_secret(secret_id)) + + def list(self, **kwargs): + """ + List secrets. Similar to the ``docker secret ls`` command. + + Args: + filters (dict): Server-side list filtering options. + + Returns: + (list of :py:class:`Secret`): The secrets. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.secrets(**kwargs) + return [self.prepare_model(obj) for obj in resp] diff --git a/docker/models/services.py b/docker/models/services.py new file mode 100644 index 0000000000..09502633e5 --- /dev/null +++ b/docker/models/services.py @@ -0,0 +1,390 @@ +import copy + +from docker.errors import InvalidArgument, create_unexpected_kwargs_error +from docker.types import ContainerSpec, Placement, ServiceMode, TaskTemplate + +from .resource import Collection, Model + + +class Service(Model): + """A service.""" + id_attribute = 'ID' + + @property + def name(self): + """The service's name.""" + return self.attrs['Spec']['Name'] + + @property + def version(self): + """ + The version number of the service. If this is not the same as the + server, the :py:meth:`update` function will not work and you will + need to call :py:meth:`reload` before calling it again. + """ + return self.attrs.get('Version').get('Index') + + def remove(self): + """ + Stop and remove the service. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.client.api.remove_service(self.id) + + def tasks(self, filters=None): + """ + List the tasks in this service. + + Args: + filters (dict): A map of filters to process on the tasks list. + Valid filters: ``id``, ``name``, ``node``, + ``label``, and ``desired-state``. + + Returns: + :py:class:`list`: List of task dictionaries. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + if filters is None: + filters = {} + filters['service'] = self.id + return self.client.api.tasks(filters=filters) + + def update(self, **kwargs): + """ + Update a service's configuration. Similar to the ``docker service + update`` command. + + Takes the same parameters as :py:meth:`~ServiceCollection.create`. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + # Image is required, so if it hasn't been set, use current image + if 'image' not in kwargs: + spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec'] + kwargs['image'] = spec['Image'] + + if kwargs.get('force_update') is True: + task_template = self.attrs['Spec']['TaskTemplate'] + current_value = int(task_template.get('ForceUpdate', 0)) + kwargs['force_update'] = current_value + 1 + + create_kwargs = _get_create_service_kwargs('update', kwargs) + + return self.client.api.update_service( + self.id, + self.version, + **create_kwargs + ) + + def logs(self, **kwargs): + """ + Get log stream for the service. + Note: This method works only for services with the ``json-file`` + or ``journald`` logging drivers. + + Args: + details (bool): Show extra details provided to logs. + Default: ``False`` + follow (bool): Keep connection open to read logs as they are + sent by the Engine. Default: ``False`` + stdout (bool): Return logs from ``stdout``. Default: ``False`` + stderr (bool): Return logs from ``stderr``. Default: ``False`` + since (int): UNIX timestamp for the logs staring point. + Default: 0 + timestamps (bool): Add timestamps to every log line. + tail (string or int): Number of log lines to be returned, + counting from the current end of the logs. Specify an + integer or ``'all'`` to output all log lines. + Default: ``all`` + + Returns: + generator: Logs for the service. + """ + is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get( + 'TTY', False + ) + return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs) + + def scale(self, replicas): + """ + Scale service container. + + Args: + replicas (int): The number of containers that should be running. + + Returns: + bool: ``True`` if successful. + """ + + if 'Global' in self.attrs['Spec']['Mode'].keys(): + raise InvalidArgument('Cannot scale a global container') + + service_mode = ServiceMode('replicated', replicas) + return self.client.api.update_service(self.id, self.version, + mode=service_mode, + fetch_current_spec=True) + + def force_update(self): + """ + Force update the service even if no changes require it. + + Returns: + bool: ``True`` if successful. + """ + + return self.update(force_update=True, fetch_current_spec=True) + + +class ServiceCollection(Collection): + """Services on the Docker server.""" + model = Service + + def create(self, image, command=None, **kwargs): + """ + Create a service. Similar to the ``docker service create`` command. + + Args: + image (str): The image name to use for the containers. + command (list of str or str): Command to run. + args (list of str): Arguments to the command. + constraints (list of str): :py:class:`~docker.types.Placement` + constraints. + preferences (list of tuple): :py:class:`~docker.types.Placement` + preferences. + maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas + or (int) representing maximum number of replicas per node. + platforms (list of tuple): A list of platform constraints + expressed as ``(arch, os)`` tuples. + container_labels (dict): Labels to apply to the container. + endpoint_spec (EndpointSpec): Properties that can be configured to + access and load balance a service. Default: ``None``. + env (list of str): Environment variables, in the form + ``KEY=val``. + hostname (string): Hostname to set on the container. + init (boolean): Run an init inside the container that forwards + signals and reaps processes + isolation (string): Isolation technology used by the service's + containers. Only used for Windows containers. + labels (dict): Labels to apply to the service. + log_driver (str): Log driver to use for containers. + log_driver_options (dict): Log driver options. + mode (ServiceMode): Scheduling mode for the service. + Default:``None`` + mounts (list of str): Mounts for the containers, in the form + ``source:target:options``, where options is either + ``ro`` or ``rw``. + name (str): Name to give to the service. + networks (:py:class:`list`): List of network names or IDs or + :py:class:`~docker.types.NetworkAttachmentConfig` to attach the + service to. Default: ``None``. + resources (Resources): Resource limits and reservations. + restart_policy (RestartPolicy): Restart policy for containers. + secrets (list of :py:class:`~docker.types.SecretReference`): List + of secrets accessible to containers for this service. + stop_grace_period (int): Amount of time to wait for + containers to terminate before forcefully killing them. + update_config (UpdateConfig): Specification for the update strategy + of the service. Default: ``None`` + rollback_config (RollbackConfig): Specification for the rollback + strategy of the service. Default: ``None`` + user (str): User to run commands as. + workdir (str): Working directory for commands to run. + tty (boolean): Whether a pseudo-TTY should be allocated. + groups (:py:class:`list`): A list of additional groups that the + container process will run as. + open_stdin (boolean): Open ``stdin`` + read_only (boolean): Mount the container's root filesystem as read + only. + stop_signal (string): Set signal to stop the service's containers + healthcheck (Healthcheck): Healthcheck + configuration for this service. + hosts (:py:class:`dict`): A set of host to IP mappings to add to + the container's `hosts` file. + dns_config (DNSConfig): Specification for DNS + related configurations in resolver configuration file. + configs (:py:class:`list`): List of + :py:class:`~docker.types.ConfigReference` that will be exposed + to the service. + privileges (Privileges): Security options for the service's + containers. + cap_add (:py:class:`list`): A list of kernel capabilities to add to + the default set for the container. + cap_drop (:py:class:`list`): A list of kernel capabilities to drop + from the default set for the container. + sysctls (:py:class:`dict`): A dict of sysctl values to add to the + container + + Returns: + :py:class:`Service`: The created service. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + kwargs['image'] = image + kwargs['command'] = command + create_kwargs = _get_create_service_kwargs('create', kwargs) + service_id = self.client.api.create_service(**create_kwargs) + return self.get(service_id) + + def get(self, service_id, insert_defaults=None): + """ + Get a service. + + Args: + service_id (str): The ID of the service. + insert_defaults (boolean): If true, default values will be merged + into the output. + + Returns: + :py:class:`Service`: The service. + + Raises: + :py:class:`docker.errors.NotFound` + If the service does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + :py:class:`docker.errors.InvalidVersion` + If one of the arguments is not supported with the current + API version. + """ + return self.prepare_model( + self.client.api.inspect_service(service_id, insert_defaults) + ) + + def list(self, **kwargs): + """ + List services. + + Args: + filters (dict): Filters to process on the nodes list. Valid + filters: ``id``, ``name`` , ``label`` and ``mode``. + Default: ``None``. + status (bool): Include the service task count of running and + desired tasks. Default: ``None``. + + Returns: + list of :py:class:`Service`: The services. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return [ + self.prepare_model(s) + for s in self.client.api.services(**kwargs) + ] + + +# kwargs to copy straight over to ContainerSpec +CONTAINER_SPEC_KWARGS = [ + 'args', + 'cap_add', + 'cap_drop', + 'command', + 'configs', + 'dns_config', + 'env', + 'groups', + 'healthcheck', + 'hostname', + 'hosts', + 'image', + 'init', + 'isolation', + 'labels', + 'mounts', + 'open_stdin', + 'privileges', + 'read_only', + 'secrets', + 'stop_grace_period', + 'stop_signal', + 'tty', + 'user', + 'workdir', + 'sysctls', +] + +# kwargs to copy straight over to TaskTemplate +TASK_TEMPLATE_KWARGS = [ + 'networks', + 'resources', + 'restart_policy', +] + +# kwargs to copy straight over to create_service +CREATE_SERVICE_KWARGS = [ + 'name', + 'labels', + 'mode', + 'update_config', + 'rollback_config', + 'endpoint_spec', +] + +PLACEMENT_KWARGS = [ + 'constraints', + 'preferences', + 'platforms', + 'maxreplicas', +] + + +def _get_create_service_kwargs(func_name, kwargs): + # Copy over things which can be copied directly + create_kwargs = {} + for key in copy.copy(kwargs): + if key in CREATE_SERVICE_KWARGS: + create_kwargs[key] = kwargs.pop(key) + container_spec_kwargs = {} + for key in copy.copy(kwargs): + if key in CONTAINER_SPEC_KWARGS: + container_spec_kwargs[key] = kwargs.pop(key) + task_template_kwargs = {} + for key in copy.copy(kwargs): + if key in TASK_TEMPLATE_KWARGS: + task_template_kwargs[key] = kwargs.pop(key) + + if 'container_labels' in kwargs: + container_spec_kwargs['labels'] = kwargs.pop('container_labels') + + placement = {} + for key in copy.copy(kwargs): + if key in PLACEMENT_KWARGS: + placement[key] = kwargs.pop(key) + placement = Placement(**placement) + task_template_kwargs['placement'] = placement + + if 'log_driver' in kwargs: + task_template_kwargs['log_driver'] = { + 'Name': kwargs.pop('log_driver'), + 'Options': kwargs.pop('log_driver_options', {}) + } + + if func_name == 'update': + if 'force_update' in kwargs: + task_template_kwargs['force_update'] = kwargs.pop('force_update') + + # fetch the current spec by default if updating the service + # through the model + fetch_current_spec = kwargs.pop('fetch_current_spec', True) + create_kwargs['fetch_current_spec'] = fetch_current_spec + + # All kwargs should have been consumed by this point, so raise + # error if any are left + if kwargs: + raise create_unexpected_kwargs_error(func_name, kwargs) + + container_spec = ContainerSpec(**container_spec_kwargs) + task_template_kwargs['container_spec'] = container_spec + create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs) + return create_kwargs diff --git a/docker/models/swarm.py b/docker/models/swarm.py new file mode 100644 index 0000000000..271cc5dcb1 --- /dev/null +++ b/docker/models/swarm.py @@ -0,0 +1,190 @@ +from docker.api import APIClient +from docker.errors import APIError + +from .resource import Model + + +class Swarm(Model): + """ + The server's Swarm state. This a singleton that must be reloaded to get + the current state of the Swarm. + """ + id_attribute = 'ID' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.client: + try: + self.reload() + except APIError as e: + # FIXME: https://github.com/docker/docker/issues/29192 + if e.response.status_code not in (406, 503): + raise + + @property + def version(self): + """ + The version number of the swarm. If this is not the same as the + server, the :py:meth:`update` function will not work and you will + need to call :py:meth:`reload` before calling it again. + """ + return self.attrs.get('Version').get('Index') + + def get_unlock_key(self): + return self.client.api.get_unlock_key() + get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__ + + def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377', + force_new_cluster=False, default_addr_pool=None, + subnet_size=None, data_path_addr=None, data_path_port=None, + **kwargs): + """ + Initialize a new swarm on this Engine. + + Args: + advertise_addr (str): Externally reachable address advertised to + other nodes. This can either be an address/port combination in + the form ``192.168.1.1:4567``, or an interface followed by a + port number, like ``eth0:4567``. If the port number is omitted, + the port number from the listen address is used. + + If not specified, it will be automatically detected when + possible. + listen_addr (str): Listen address used for inter-manager + communication, as well as determining the networking interface + used for the VXLAN Tunnel Endpoint (VTEP). This can either be + an address/port combination in the form ``192.168.1.1:4567``, + or an interface followed by a port number, like ``eth0:4567``. + If the port number is omitted, the default swarm listening port + is used. Default: ``0.0.0.0:2377`` + force_new_cluster (bool): Force creating a new Swarm, even if + already part of one. Default: False + default_addr_pool (list of str): Default Address Pool specifies + default subnet pools for global scope networks. Each pool + should be specified as a CIDR block, like '10.0.0.0/8'. + Default: None + subnet_size (int): SubnetSize specifies the subnet size of the + networks created from the default subnet pool. Default: None + data_path_addr (string): Address or interface to use for data path + traffic. For example, 192.168.1.1, or an interface, like eth0. + data_path_port (int): Port number to use for data path traffic. + Acceptable port range is 1024 to 49151. If set to ``None`` or + 0, the default port 4789 will be used. Default: None + task_history_retention_limit (int): Maximum number of tasks + history stored. + snapshot_interval (int): Number of logs entries between snapshot. + keep_old_snapshots (int): Number of snapshots to keep beyond the + current snapshot. + log_entries_for_slow_followers (int): Number of log entries to + keep around to sync up slow followers after a snapshot is + created. + heartbeat_tick (int): Amount of ticks (in seconds) between each + heartbeat. + election_tick (int): Amount of ticks (in seconds) needed without a + leader to trigger a new election. + dispatcher_heartbeat_period (int): The delay for an agent to send + a heartbeat to the dispatcher. + node_cert_expiry (int): Automatic expiry for nodes certificates. + external_ca (dict): Configuration for forwarding signing requests + to an external certificate authority. Use + ``docker.types.SwarmExternalCA``. + name (string): Swarm's name + labels (dict): User-defined key/value metadata. + signing_ca_cert (str): The desired signing CA certificate for all + swarm node TLS leaf certificates, in PEM format. + signing_ca_key (str): The desired signing CA key for all swarm + node TLS leaf certificates, in PEM format. + ca_force_rotate (int): An integer whose purpose is to force swarm + to generate a new signing CA certificate and key, if none have + been specified. + autolock_managers (boolean): If set, generate a key and use it to + lock data stored on the managers. + log_driver (DriverConfig): The default log driver to use for tasks + created in the orchestrator. + + Returns: + (str): The ID of the created node. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.swarm.init( + advertise_addr='eth0', listen_addr='0.0.0.0:5000', + force_new_cluster=False, default_addr_pool=['10.20.0.0/16], + subnet_size=24, snapshot_interval=5000, + log_entries_for_slow_followers=1200 + ) + + """ + init_kwargs = { + 'advertise_addr': advertise_addr, + 'listen_addr': listen_addr, + 'force_new_cluster': force_new_cluster, + 'default_addr_pool': default_addr_pool, + 'subnet_size': subnet_size, + 'data_path_addr': data_path_addr, + 'data_path_port': data_path_port, + } + init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs) + node_id = self.client.api.init_swarm(**init_kwargs) + self.reload() + return node_id + + def join(self, *args, **kwargs): + return self.client.api.join_swarm(*args, **kwargs) + join.__doc__ = APIClient.join_swarm.__doc__ + + def leave(self, *args, **kwargs): + return self.client.api.leave_swarm(*args, **kwargs) + leave.__doc__ = APIClient.leave_swarm.__doc__ + + def reload(self): + """ + Inspect the swarm on the server and store the response in + :py:attr:`attrs`. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + self.attrs = self.client.api.inspect_swarm() + + def unlock(self, key): + return self.client.api.unlock_swarm(key) + unlock.__doc__ = APIClient.unlock_swarm.__doc__ + + def update(self, rotate_worker_token=False, rotate_manager_token=False, + rotate_manager_unlock_key=False, **kwargs): + """ + Update the swarm's configuration. + + It takes the same arguments as :py:meth:`init`, except + ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In + addition, it takes these arguments: + + Args: + rotate_worker_token (bool): Rotate the worker join token. Default: + ``False``. + rotate_manager_token (bool): Rotate the manager join token. + Default: ``False``. + rotate_manager_unlock_key (bool): Rotate the manager unlock key. + Default: ``False``. + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + """ + # this seems to have to be set + if kwargs.get('node_cert_expiry') is None: + kwargs['node_cert_expiry'] = 7776000000000000 + + return self.client.api.update_swarm( + version=self.version, + swarm_spec=self.client.api.create_swarm_spec(**kwargs), + rotate_worker_token=rotate_worker_token, + rotate_manager_token=rotate_manager_token, + rotate_manager_unlock_key=rotate_manager_unlock_key + ) diff --git a/docker/models/volumes.py b/docker/models/volumes.py new file mode 100644 index 0000000000..12c9f14b27 --- /dev/null +++ b/docker/models/volumes.py @@ -0,0 +1,99 @@ +from ..api import APIClient +from .resource import Collection, Model + + +class Volume(Model): + """A volume.""" + id_attribute = 'Name' + + @property + def name(self): + """The name of the volume.""" + return self.attrs['Name'] + + def remove(self, force=False): + """ + Remove this volume. + + Args: + force (bool): Force removal of volumes that were already removed + out of band by the volume driver plugin. + Raises: + :py:class:`docker.errors.APIError` + If volume failed to remove. + """ + return self.client.api.remove_volume(self.id, force=force) + + +class VolumeCollection(Collection): + """Volumes on the Docker server.""" + model = Volume + + def create(self, name=None, **kwargs): + """ + Create a volume. + + Args: + name (str): Name of the volume. If not specified, the engine + generates a name. + driver (str): Name of the driver used to create the volume + driver_opts (dict): Driver options as a key-value dictionary + labels (dict): Labels to set on the volume + + Returns: + (:py:class:`Volume`): The volume created. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + + Example: + + >>> volume = client.volumes.create(name='foobar', driver='local', + driver_opts={'foo': 'bar', 'baz': 'false'}, + labels={"key": "value"}) + + """ + obj = self.client.api.create_volume(name, **kwargs) + return self.prepare_model(obj) + + def get(self, volume_id): + """ + Get a volume. + + Args: + volume_id (str): Volume name. + + Returns: + (:py:class:`Volume`): The volume. + + Raises: + :py:class:`docker.errors.NotFound` + If the volume does not exist. + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(self.client.api.inspect_volume(volume_id)) + + def list(self, **kwargs): + """ + List volumes. Similar to the ``docker volume ls`` command. + + Args: + filters (dict): Server-side list filtering options. + + Returns: + (list of :py:class:`Volume`): The volumes. + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + resp = self.client.api.volumes(**kwargs) + if not resp.get('Volumes'): + return [] + return [self.prepare_model(obj) for obj in resp['Volumes']] + + def prune(self, filters=None): + return self.client.api.prune_volumes(filters=filters) + prune.__doc__ = APIClient.prune_volumes.__doc__ diff --git a/docker/ssladapter/__init__.py b/docker/ssladapter/__init__.py deleted file mode 100644 index 1a5e1bb6d4..0000000000 --- a/docker/ssladapter/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .ssladapter import SSLAdapter # flake8: noqa diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py deleted file mode 100644 index 3a70a916ce..0000000000 --- a/docker/ssladapter/ssladapter.py +++ /dev/null @@ -1,55 +0,0 @@ -""" Resolves OpenSSL issues in some servers: - https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/ - https://github.com/kennethreitz/requests/pull/799 -""" -from distutils.version import StrictVersion -from requests.adapters import HTTPAdapter -import ssl - -try: - import requests.packages.urllib3 as urllib3 -except ImportError: - import urllib3 - -PoolManager = urllib3.poolmanager.PoolManager - - -def get_max_tls_protocol(): - protocols = ('PROTOCOL_TLSv1_2', - 'PROTOCOL_TLSv1_1', - 'PROTOCOL_TLSv1') - for proto in protocols: - if hasattr(ssl, proto): - return getattr(ssl, proto) - - -class SSLAdapter(HTTPAdapter): - '''An HTTPS Transport Adapter that uses an arbitrary SSL version.''' - def __init__(self, ssl_version=None, assert_hostname=None, - assert_fingerprint=None, **kwargs): - ssl_version = ssl_version or get_max_tls_protocol() - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint - super(SSLAdapter, self).__init__(**kwargs) - - def init_poolmanager(self, connections, maxsize, block=False): - kwargs = { - 'num_pools': connections, - 'maxsize': maxsize, - 'block': block, - 'assert_hostname': self.assert_hostname, - 'assert_fingerprint': self.assert_fingerprint, - } - if self.can_override_ssl_version(): - kwargs['ssl_version'] = self.ssl_version - - self.poolmanager = PoolManager(**kwargs) - - def can_override_ssl_version(self): - urllib_ver = urllib3.__version__.split('-')[0] - if urllib_ver is None: - return False - if urllib_ver == 'dev': - return True - return StrictVersion(urllib_ver) > StrictVersion('1.5') diff --git a/docker/tls.py b/docker/tls.py index d888b7d483..ad4966c903 100644 --- a/docker/tls.py +++ b/docker/tls.py @@ -1,34 +1,32 @@ import os from . import errors -from .ssladapter import ssladapter -class TLSConfig(object): +class TLSConfig: + """ + TLS configuration. + + Args: + client_cert (tuple of str): Path to client cert, path to client key. + ca_cert (str): Path to CA cert file. + verify (bool or str): This can be a bool or a path to a CA cert + file to verify against. If ``True``, verify using ca_cert; + if ``False`` or not specified, do not verify. + """ cert = None + ca_cert = None verify = None - ssl_version = None - def __init__(self, client_cert=None, ca_cert=None, verify=None, - ssl_version=None, assert_hostname=None, - assert_fingerprint=None): + def __init__(self, client_cert=None, ca_cert=None, verify=None): # Argument compatibility/mapping with - # http://docs.docker.com/examples/https/ + # https://docs.docker.com/engine/articles/https/ # This diverges from the Docker CLI in that users can specify 'tls' # here, but also disable any public/default CA pool verification by - # leaving tls_verify=False - - # urllib3 sets a default ssl_version if ssl_version is None, - # but that default is the vulnerable PROTOCOL_SSLv23 selection, - # so we override the default with the maximum supported in the running - # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb) - ssl_version = ssl_version or ssladapter.get_max_tls_protocol() - self.ssl_version = ssl_version - self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint + # leaving verify=False - # "tls" and "tls_verify" must have both or neither cert/key files - # In either case, Alert the user when both are expected, but any are + # "client_cert" must have both or neither cert/key files. In + # either case, Alert the user when both are expected, but any are # missing. if client_cert: @@ -36,43 +34,34 @@ def __init__(self, client_cert=None, ca_cert=None, verify=None, tls_cert, tls_key = client_cert except ValueError: raise errors.TLSParameterError( - 'client_config must be a tuple of' + 'client_cert must be a tuple of' ' (client certificate, key file)' - ) + ) from None if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or - not os.path.isfile(tls_key)): + not os.path.isfile(tls_key)): raise errors.TLSParameterError( 'Path to a certificate and key files must be provided' - ' through the client_config param' + ' through the client_cert param' ) self.cert = (tls_cert, tls_key) - # Either set verify to True (public/default CA checks) or to the - # path of a CA Cert file. - if verify is not None: - if not ca_cert: - self.verify = verify - elif os.path.isfile(ca_cert): - if not verify: - raise errors.TLSParameterError( - 'verify can not be False when a CA cert is' - ' provided.' - ) - self.verify = ca_cert - else: - raise errors.TLSParameterError( - 'Invalid CA certificate provided for `tls_ca_cert`.' - ) + # If verify is set, make sure the cert exists + self.verify = verify + self.ca_cert = ca_cert + if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): + raise errors.TLSParameterError( + 'Invalid CA certificate provided for `ca_cert`.' + ) def configure_client(self, client): - client.ssl_version = self.ssl_version - if self.verify is not None: + """ + Configure a client with these TLS options. + """ + if self.verify and self.ca_cert: + client.verify = self.ca_cert + else: client.verify = self.verify + if self.cert: client.cert = self.cert - client.mount('https://', ssladapter.SSLAdapter( - ssl_version=self.ssl_version, - assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint, - )) diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py new file mode 100644 index 0000000000..8c68b1f6e2 --- /dev/null +++ b/docker/transport/__init__.py @@ -0,0 +1,12 @@ +from .unixconn import UnixHTTPAdapter + +try: + from .npipeconn import NpipeHTTPAdapter + from .npipesocket import NpipeSocket +except ImportError: + pass + +try: + from .sshconn import SSHHTTPAdapter +except ImportError: + pass diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py new file mode 100644 index 0000000000..2301b6b07a --- /dev/null +++ b/docker/transport/basehttpadapter.py @@ -0,0 +1,13 @@ +import requests.adapters + + +class BaseHTTPAdapter(requests.adapters.HTTPAdapter): + def close(self): + super().close() + if hasattr(self, 'pools'): + self.pools.clear() + + # Fix for requests 2.32.2+: + # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05 + def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): + return self.get_connection(request.url, proxies) diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py new file mode 100644 index 0000000000..44d6921c2c --- /dev/null +++ b/docker/transport/npipeconn.py @@ -0,0 +1,102 @@ +import queue + +import requests.adapters +import urllib3 +import urllib3.connection + +from .. import constants +from .basehttpadapter import BaseHTTPAdapter +from .npipesocket import NpipeSocket + +RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer + + +class NpipeHTTPConnection(urllib3.connection.HTTPConnection): + def __init__(self, npipe_path, timeout=60): + super().__init__( + 'localhost', timeout=timeout + ) + self.npipe_path = npipe_path + self.timeout = timeout + + def connect(self): + sock = NpipeSocket() + sock.settimeout(self.timeout) + sock.connect(self.npipe_path) + self.sock = sock + + +class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): + def __init__(self, npipe_path, timeout=60, maxsize=10): + super().__init__( + 'localhost', timeout=timeout, maxsize=maxsize + ) + self.npipe_path = npipe_path + self.timeout = timeout + + def _new_conn(self): + return NpipeHTTPConnection( + self.npipe_path, self.timeout + ) + + # When re-using connections, urllib3 tries to call select() on our + # NpipeSocket instance, causing a crash. To circumvent this, we override + # _get_conn, where that check happens. + def _get_conn(self, timeout): + conn = None + try: + conn = self.pool.get(block=self.block, timeout=timeout) + except AttributeError as ae: # self.pool is None + raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae + + except queue.Empty: + if self.block: + raise urllib3.exceptions.EmptyPoolError( + self, + "Pool reached maximum size and no more " + "connections are allowed." + ) from None + # Oh well, we'll create a new connection then + + return conn or self._new_conn() + + +class NpipeHTTPAdapter(BaseHTTPAdapter): + + __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path', + 'pools', + 'timeout', + 'max_pool_size'] + + def __init__(self, base_url, timeout=60, + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): + self.npipe_path = base_url.replace('npipe://', '') + self.timeout = timeout + self.max_pool_size = max_pool_size + self.pools = RecentlyUsedContainer( + pool_connections, dispose_func=lambda p: p.close() + ) + super().__init__() + + def get_connection(self, url, proxies=None): + with self.pools.lock: + pool = self.pools.get(url) + if pool: + return pool + + pool = NpipeHTTPConnectionPool( + self.npipe_path, self.timeout, + maxsize=self.max_pool_size + ) + self.pools[url] = pool + + return pool + + def request_url(self, request, proxies): + # The select_proxy utility in requests errors out when the provided URL + # doesn't have a hostname, like is the case when using a UNIX socket. + # Since proxies are an irrelevant notion in the case of UNIX sockets + # anyway, we simply return the path URL directly. + # See also: https://github.com/docker/docker-sdk-python/issues/811 + return request.path_url diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py new file mode 100644 index 0000000000..d91938e766 --- /dev/null +++ b/docker/transport/npipesocket.py @@ -0,0 +1,230 @@ +import functools +import io +import time + +import pywintypes +import win32api +import win32event +import win32file +import win32pipe + +cERROR_PIPE_BUSY = 0xe7 +cSECURITY_SQOS_PRESENT = 0x100000 +cSECURITY_ANONYMOUS = 0 + +MAXIMUM_RETRY_COUNT = 10 + + +def check_closed(f): + @functools.wraps(f) + def wrapped(self, *args, **kwargs): + if self._closed: + raise RuntimeError( + 'Can not reuse socket after connection was closed.' + ) + return f(self, *args, **kwargs) + return wrapped + + +class NpipeSocket: + """ Partial implementation of the socket API over windows named pipes. + This implementation is only designed to be used as a client socket, + and server-specific methods (bind, listen, accept...) are not + implemented. + """ + + def __init__(self, handle=None): + self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT + self._handle = handle + self._closed = False + + def accept(self): + raise NotImplementedError() + + def bind(self, address): + raise NotImplementedError() + + def close(self): + self._handle.Close() + self._closed = True + + @check_closed + def connect(self, address, retry_count=0): + try: + handle = win32file.CreateFile( + address, + win32file.GENERIC_READ | win32file.GENERIC_WRITE, + 0, + None, + win32file.OPEN_EXISTING, + (cSECURITY_ANONYMOUS + | cSECURITY_SQOS_PRESENT + | win32file.FILE_FLAG_OVERLAPPED), + 0 + ) + except win32pipe.error as e: + # See Remarks: + # https://msdn.microsoft.com/en-us/library/aa365800.aspx + if e.winerror == cERROR_PIPE_BUSY: + # Another program or thread has grabbed our pipe instance + # before we got to it. Wait for availability and attempt to + # connect again. + retry_count = retry_count + 1 + if (retry_count < MAXIMUM_RETRY_COUNT): + time.sleep(1) + return self.connect(address, retry_count) + raise e + + self.flags = win32pipe.GetNamedPipeInfo(handle)[0] + + self._handle = handle + self._address = address + + @check_closed + def connect_ex(self, address): + return self.connect(address) + + @check_closed + def detach(self): + self._closed = True + return self._handle + + @check_closed + def dup(self): + return NpipeSocket(self._handle) + + def getpeername(self): + return self._address + + def getsockname(self): + return self._address + + def getsockopt(self, level, optname, buflen=None): + raise NotImplementedError() + + def ioctl(self, control, option): + raise NotImplementedError() + + def listen(self, backlog): + raise NotImplementedError() + + def makefile(self, mode=None, bufsize=None): + if mode.strip('b') != 'r': + raise NotImplementedError() + rawio = NpipeFileIOBase(self) + if bufsize is None or bufsize <= 0: + bufsize = io.DEFAULT_BUFFER_SIZE + return io.BufferedReader(rawio, buffer_size=bufsize) + + @check_closed + def recv(self, bufsize, flags=0): + err, data = win32file.ReadFile(self._handle, bufsize) + return data + + @check_closed + def recvfrom(self, bufsize, flags=0): + data = self.recv(bufsize, flags) + return (data, self._address) + + @check_closed + def recvfrom_into(self, buf, nbytes=0, flags=0): + return self.recv_into(buf, nbytes, flags), self._address + + @check_closed + def recv_into(self, buf, nbytes=0): + readbuf = buf + if not isinstance(buf, memoryview): + readbuf = memoryview(buf) + + event = win32event.CreateEvent(None, True, True, None) + try: + overlapped = pywintypes.OVERLAPPED() + overlapped.hEvent = event + err, data = win32file.ReadFile( + self._handle, + readbuf[:nbytes] if nbytes else readbuf, + overlapped + ) + wait_result = win32event.WaitForSingleObject(event, self._timeout) + if wait_result == win32event.WAIT_TIMEOUT: + win32file.CancelIo(self._handle) + raise TimeoutError + return win32file.GetOverlappedResult(self._handle, overlapped, 0) + finally: + win32api.CloseHandle(event) + + @check_closed + def send(self, string, flags=0): + event = win32event.CreateEvent(None, True, True, None) + try: + overlapped = pywintypes.OVERLAPPED() + overlapped.hEvent = event + win32file.WriteFile(self._handle, string, overlapped) + wait_result = win32event.WaitForSingleObject(event, self._timeout) + if wait_result == win32event.WAIT_TIMEOUT: + win32file.CancelIo(self._handle) + raise TimeoutError + return win32file.GetOverlappedResult(self._handle, overlapped, 0) + finally: + win32api.CloseHandle(event) + + @check_closed + def sendall(self, string, flags=0): + return self.send(string, flags) + + @check_closed + def sendto(self, string, address): + self.connect(address) + return self.send(string) + + def setblocking(self, flag): + if flag: + return self.settimeout(None) + return self.settimeout(0) + + def settimeout(self, value): + if value is None: + # Blocking mode + self._timeout = win32event.INFINITE + elif not isinstance(value, (float, int)) or value < 0: + raise ValueError('Timeout value out of range') + else: + # Timeout mode - Value converted to milliseconds + self._timeout = int(value * 1000) + + def gettimeout(self): + return self._timeout + + def setsockopt(self, level, optname, value): + raise NotImplementedError() + + @check_closed + def shutdown(self, how): + return self.close() + + +class NpipeFileIOBase(io.RawIOBase): + def __init__(self, npipe_socket): + self.sock = npipe_socket + + def close(self): + super().close() + self.sock = None + + def fileno(self): + return self.sock.fileno() + + def isatty(self): + return False + + def readable(self): + return True + + def readinto(self, buf): + return self.sock.recv_into(buf) + + def seekable(self): + return False + + def writable(self): + return False diff --git a/docker/transport/sshconn.py b/docker/transport/sshconn.py new file mode 100644 index 0000000000..1870668010 --- /dev/null +++ b/docker/transport/sshconn.py @@ -0,0 +1,250 @@ +import logging +import os +import queue +import signal +import socket +import subprocess +import urllib.parse + +import paramiko +import requests.adapters +import urllib3 +import urllib3.connection + +from .. import constants +from .basehttpadapter import BaseHTTPAdapter + +RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer + + +class SSHSocket(socket.socket): + def __init__(self, host): + super().__init__( + socket.AF_INET, socket.SOCK_STREAM) + self.host = host + self.port = None + self.user = None + if ':' in self.host: + self.host, self.port = self.host.split(':') + if '@' in self.host: + self.user, self.host = self.host.split('@') + + self.proc = None + + def connect(self, **kwargs): + args = ['ssh'] + if self.user: + args = args + ['-l', self.user] + + if self.port: + args = args + ['-p', self.port] + + args = args + ['--', self.host, 'docker system dial-stdio'] + + preexec_func = None + if not constants.IS_WINDOWS_PLATFORM: + def f(): + signal.signal(signal.SIGINT, signal.SIG_IGN) + preexec_func = f + + env = dict(os.environ) + + # drop LD_LIBRARY_PATH and SSL_CERT_FILE + env.pop('LD_LIBRARY_PATH', None) + env.pop('SSL_CERT_FILE', None) + + self.proc = subprocess.Popen( + args, + env=env, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=preexec_func) + + def _write(self, data): + if not self.proc or self.proc.stdin.closed: + raise Exception('SSH subprocess not initiated.' + 'connect() must be called first.') + written = self.proc.stdin.write(data) + self.proc.stdin.flush() + return written + + def sendall(self, data): + self._write(data) + + def send(self, data): + return self._write(data) + + def recv(self, n): + if not self.proc: + raise Exception('SSH subprocess not initiated.' + 'connect() must be called first.') + return self.proc.stdout.read(n) + + def makefile(self, mode): + if not self.proc: + self.connect() + self.proc.stdout.channel = self + + return self.proc.stdout + + def close(self): + if not self.proc or self.proc.stdin.closed: + return + self.proc.stdin.write(b'\n\n') + self.proc.stdin.flush() + self.proc.terminate() + + +class SSHConnection(urllib3.connection.HTTPConnection): + def __init__(self, ssh_transport=None, timeout=60, host=None): + super().__init__( + 'localhost', timeout=timeout + ) + self.ssh_transport = ssh_transport + self.timeout = timeout + self.ssh_host = host + + def connect(self): + if self.ssh_transport: + sock = self.ssh_transport.open_session() + sock.settimeout(self.timeout) + sock.exec_command('docker system dial-stdio') + else: + sock = SSHSocket(self.ssh_host) + sock.settimeout(self.timeout) + sock.connect() + + self.sock = sock + + +class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool): + scheme = 'ssh' + + def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None): + super().__init__( + 'localhost', timeout=timeout, maxsize=maxsize + ) + self.ssh_transport = None + self.timeout = timeout + if ssh_client: + self.ssh_transport = ssh_client.get_transport() + self.ssh_host = host + + def _new_conn(self): + return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host) + + # When re-using connections, urllib3 calls fileno() on our + # SSH channel instance, quickly overloading our fd limit. To avoid this, + # we override _get_conn + def _get_conn(self, timeout): + conn = None + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError as ae: # self.pool is None + raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae + + except queue.Empty: + if self.block: + raise urllib3.exceptions.EmptyPoolError( + self, + "Pool reached maximum size and no more " + "connections are allowed." + ) from None + # Oh well, we'll create a new connection then + + return conn or self._new_conn() + + +class SSHHTTPAdapter(BaseHTTPAdapter): + + __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [ + 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size' + ] + + def __init__(self, base_url, timeout=60, + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE, + shell_out=False): + self.ssh_client = None + if not shell_out: + self._create_paramiko_client(base_url) + self._connect() + + self.ssh_host = base_url + if base_url.startswith('ssh://'): + self.ssh_host = base_url[len('ssh://'):] + + self.timeout = timeout + self.max_pool_size = max_pool_size + self.pools = RecentlyUsedContainer( + pool_connections, dispose_func=lambda p: p.close() + ) + super().__init__() + + def _create_paramiko_client(self, base_url): + logging.getLogger("paramiko").setLevel(logging.WARNING) + self.ssh_client = paramiko.SSHClient() + base_url = urllib.parse.urlparse(base_url) + self.ssh_params = { + "hostname": base_url.hostname, + "port": base_url.port, + "username": base_url.username + } + ssh_config_file = os.path.expanduser("~/.ssh/config") + if os.path.exists(ssh_config_file): + conf = paramiko.SSHConfig() + with open(ssh_config_file) as f: + conf.parse(f) + host_config = conf.lookup(base_url.hostname) + if 'proxycommand' in host_config: + self.ssh_params["sock"] = paramiko.ProxyCommand( + host_config['proxycommand'] + ) + if 'hostname' in host_config: + self.ssh_params['hostname'] = host_config['hostname'] + if base_url.port is None and 'port' in host_config: + self.ssh_params['port'] = host_config['port'] + if base_url.username is None and 'user' in host_config: + self.ssh_params['username'] = host_config['user'] + if 'identityfile' in host_config: + self.ssh_params['key_filename'] = host_config['identityfile'] + + self.ssh_client.load_system_host_keys() + self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy()) + + def _connect(self): + if self.ssh_client: + self.ssh_client.connect(**self.ssh_params) + + def get_connection(self, url, proxies=None): + if not self.ssh_client: + return SSHConnectionPool( + ssh_client=self.ssh_client, + timeout=self.timeout, + maxsize=self.max_pool_size, + host=self.ssh_host + ) + with self.pools.lock: + pool = self.pools.get(url) + if pool: + return pool + + # Connection is closed try a reconnect + if self.ssh_client and not self.ssh_client.get_transport(): + self._connect() + + pool = SSHConnectionPool( + ssh_client=self.ssh_client, + timeout=self.timeout, + maxsize=self.max_pool_size, + host=self.ssh_host + ) + self.pools[url] = pool + + return pool + + def close(self): + super().close() + if self.ssh_client: + self.ssh_client.close() diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py new file mode 100644 index 0000000000..d571833f04 --- /dev/null +++ b/docker/transport/unixconn.py @@ -0,0 +1,86 @@ +import socket + +import requests.adapters +import urllib3 +import urllib3.connection + +from .. import constants +from .basehttpadapter import BaseHTTPAdapter + +RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer + + +class UnixHTTPConnection(urllib3.connection.HTTPConnection): + + def __init__(self, base_url, unix_socket, timeout=60): + super().__init__( + 'localhost', timeout=timeout + ) + self.base_url = base_url + self.unix_socket = unix_socket + self.timeout = timeout + + def connect(self): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(self.timeout) + sock.connect(self.unix_socket) + self.sock = sock + + +class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): + def __init__(self, base_url, socket_path, timeout=60, maxsize=10): + super().__init__( + 'localhost', timeout=timeout, maxsize=maxsize + ) + self.base_url = base_url + self.socket_path = socket_path + self.timeout = timeout + + def _new_conn(self): + return UnixHTTPConnection( + self.base_url, self.socket_path, self.timeout + ) + + +class UnixHTTPAdapter(BaseHTTPAdapter): + + __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools', + 'socket_path', + 'timeout', + 'max_pool_size'] + + def __init__(self, socket_url, timeout=60, + pool_connections=constants.DEFAULT_NUM_POOLS, + max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): + socket_path = socket_url.replace('http+unix://', '') + if not socket_path.startswith('/'): + socket_path = f"/{socket_path}" + self.socket_path = socket_path + self.timeout = timeout + self.max_pool_size = max_pool_size + self.pools = RecentlyUsedContainer( + pool_connections, dispose_func=lambda p: p.close() + ) + super().__init__() + + def get_connection(self, url, proxies=None): + with self.pools.lock: + pool = self.pools.get(url) + if pool: + return pool + + pool = UnixHTTPConnectionPool( + url, self.socket_path, self.timeout, + maxsize=self.max_pool_size + ) + self.pools[url] = pool + + return pool + + def request_url(self, request, proxies): + # The select_proxy utility in requests errors out when the provided URL + # doesn't have a hostname, like is the case when using a UNIX socket. + # Since proxies are an irrelevant notion in the case of UNIX sockets + # anyway, we simply return the path URL directly. + # See also: https://github.com/docker/docker-py/issues/811 + return request.path_url diff --git a/docker/types/__init__.py b/docker/types/__init__.py new file mode 100644 index 0000000000..fbe247210b --- /dev/null +++ b/docker/types/__init__.py @@ -0,0 +1,24 @@ +from .containers import ContainerConfig, DeviceRequest, HostConfig, LogConfig, Ulimit +from .daemon import CancellableStream +from .healthcheck import Healthcheck +from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig +from .services import ( + ConfigReference, + ContainerSpec, + DNSConfig, + DriverConfig, + EndpointSpec, + Mount, + NetworkAttachmentConfig, + Placement, + PlacementPreference, + Privileges, + Resources, + RestartPolicy, + RollbackConfig, + SecretReference, + ServiceMode, + TaskTemplate, + UpdateConfig, +) +from .swarm import SwarmExternalCA, SwarmSpec diff --git a/docker/types/base.py b/docker/types/base.py new file mode 100644 index 0000000000..8851f1e2cb --- /dev/null +++ b/docker/types/base.py @@ -0,0 +1,4 @@ +class DictType(dict): + def __init__(self, init): + for k, v in init.items(): + self[k] = v diff --git a/docker/types/containers.py b/docker/types/containers.py new file mode 100644 index 0000000000..598188a25e --- /dev/null +++ b/docker/types/containers.py @@ -0,0 +1,790 @@ +from .. import errors +from ..utils.utils import ( + convert_port_bindings, + convert_tmpfs_mounts, + convert_volume_binds, + format_environment, + format_extra_hosts, + normalize_links, + parse_bytes, + parse_devices, + split_command, + version_gte, + version_lt, +) +from .base import DictType +from .healthcheck import Healthcheck + + +class LogConfigTypesEnum: + _values = ( + 'json-file', + 'syslog', + 'journald', + 'gelf', + 'fluentd', + 'none' + ) + JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values + + +class LogConfig(DictType): + """ + Configure logging for a container, when provided as an argument to + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + You may refer to the + `official logging driver documentation `_ + for more information. + + Args: + type (str): Indicate which log driver to use. A set of valid drivers + is provided as part of the :py:attr:`LogConfig.types` + enum. Other values may be accepted depending on the engine version + and available logging plugins. + config (dict): A driver-dependent configuration dictionary. Please + refer to the driver's documentation for a list of valid config + keys. + + Example: + + >>> from docker.types import LogConfig + >>> lc = LogConfig(type=LogConfig.types.JSON, config={ + ... 'max-size': '1g', + ... 'labels': 'production_status,geo' + ... }) + >>> hc = client.create_host_config(log_config=lc) + >>> container = client.create_container('busybox', 'true', + ... host_config=hc) + >>> client.inspect_container(container)['HostConfig']['LogConfig'] + { + 'Type': 'json-file', + 'Config': {'labels': 'production_status,geo', 'max-size': '1g'} + } + """ + types = LogConfigTypesEnum + + def __init__(self, **kwargs): + log_driver_type = kwargs.get('type', kwargs.get('Type')) + config = kwargs.get('config', kwargs.get('Config')) or {} + + if config and not isinstance(config, dict): + raise ValueError("LogConfig.config must be a dictionary") + + super().__init__({ + 'Type': log_driver_type, + 'Config': config + }) + + @property + def type(self): + return self['Type'] + + @type.setter + def type(self, value): + self['Type'] = value + + @property + def config(self): + return self['Config'] + + def set_config_value(self, key, value): + """ Set a the value for ``key`` to ``value`` inside the ``config`` + dict. + """ + self.config[key] = value + + def unset_config(self, key): + """ Remove the ``key`` property from the ``config`` dict. """ + if key in self.config: + del self.config[key] + + +class Ulimit(DictType): + """ + Create a ulimit declaration to be used with + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + + Args: + + name (str): Which ulimit will this apply to. The valid names can be + found in '/etc/security/limits.conf' on a gnu/linux system. + soft (int): The soft limit for this ulimit. Optional. + hard (int): The hard limit for this ulimit. Optional. + + Example: + + >>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024) + >>> hc = client.create_host_config(ulimits=[nproc_limit]) + >>> container = client.create_container( + 'busybox', 'true', host_config=hc + ) + >>> client.inspect_container(container)['HostConfig']['Ulimits'] + [{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}] + + """ + def __init__(self, **kwargs): + name = kwargs.get('name', kwargs.get('Name')) + soft = kwargs.get('soft', kwargs.get('Soft')) + hard = kwargs.get('hard', kwargs.get('Hard')) + if not isinstance(name, str): + raise ValueError("Ulimit.name must be a string") + if soft and not isinstance(soft, int): + raise ValueError("Ulimit.soft must be an integer") + if hard and not isinstance(hard, int): + raise ValueError("Ulimit.hard must be an integer") + super().__init__({ + 'Name': name, + 'Soft': soft, + 'Hard': hard + }) + + @property + def name(self): + return self['Name'] + + @name.setter + def name(self, value): + self['Name'] = value + + @property + def soft(self): + return self.get('Soft') + + @soft.setter + def soft(self, value): + self['Soft'] = value + + @property + def hard(self): + return self.get('Hard') + + @hard.setter + def hard(self, value): + self['Hard'] = value + + +class DeviceRequest(DictType): + """ + Create a device request to be used with + :py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`. + + Args: + + driver (str): Which driver to use for this device. Optional. + count (int): Number or devices to request. Optional. + Set to -1 to request all available devices. + device_ids (list): List of strings for device IDs. Optional. + Set either ``count`` or ``device_ids``. + capabilities (list): List of lists of strings to request + capabilities. Optional. The global list acts like an OR, + and the sub-lists are AND. The driver will try to satisfy + one of the sub-lists. + Available capabilities for the ``nvidia`` driver can be found + `here `_. + options (dict): Driver-specific options. Optional. + """ + + def __init__(self, **kwargs): + driver = kwargs.get('driver', kwargs.get('Driver')) + count = kwargs.get('count', kwargs.get('Count')) + device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs')) + capabilities = kwargs.get('capabilities', kwargs.get('Capabilities')) + options = kwargs.get('options', kwargs.get('Options')) + + if driver is None: + driver = '' + elif not isinstance(driver, str): + raise ValueError('DeviceRequest.driver must be a string') + if count is None: + count = 0 + elif not isinstance(count, int): + raise ValueError('DeviceRequest.count must be an integer') + if device_ids is None: + device_ids = [] + elif not isinstance(device_ids, list): + raise ValueError('DeviceRequest.device_ids must be a list') + if capabilities is None: + capabilities = [] + elif not isinstance(capabilities, list): + raise ValueError('DeviceRequest.capabilities must be a list') + if options is None: + options = {} + elif not isinstance(options, dict): + raise ValueError('DeviceRequest.options must be a dict') + + super().__init__({ + 'Driver': driver, + 'Count': count, + 'DeviceIDs': device_ids, + 'Capabilities': capabilities, + 'Options': options + }) + + @property + def driver(self): + return self['Driver'] + + @driver.setter + def driver(self, value): + self['Driver'] = value + + @property + def count(self): + return self['Count'] + + @count.setter + def count(self, value): + self['Count'] = value + + @property + def device_ids(self): + return self['DeviceIDs'] + + @device_ids.setter + def device_ids(self, value): + self['DeviceIDs'] = value + + @property + def capabilities(self): + return self['Capabilities'] + + @capabilities.setter + def capabilities(self, value): + self['Capabilities'] = value + + @property + def options(self): + return self['Options'] + + @options.setter + def options(self, value): + self['Options'] = value + + +class HostConfig(dict): + def __init__(self, version, binds=None, port_bindings=None, + lxc_conf=None, publish_all_ports=False, links=None, + privileged=False, dns=None, dns_search=None, + volumes_from=None, network_mode=None, restart_policy=None, + cap_add=None, cap_drop=None, devices=None, extra_hosts=None, + read_only=None, pid_mode=None, ipc_mode=None, + security_opt=None, ulimits=None, log_config=None, + mem_limit=None, memswap_limit=None, mem_reservation=None, + kernel_memory=None, mem_swappiness=None, cgroup_parent=None, + group_add=None, cpu_quota=None, cpu_period=None, + blkio_weight=None, blkio_weight_device=None, + device_read_bps=None, device_write_bps=None, + device_read_iops=None, device_write_iops=None, + oom_kill_disable=False, shm_size=None, sysctls=None, + tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None, + cpuset_cpus=None, userns_mode=None, uts_mode=None, + pids_limit=None, isolation=None, auto_remove=False, + storage_opt=None, init=None, init_path=None, + volume_driver=None, cpu_count=None, cpu_percent=None, + nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None, + cpu_rt_period=None, cpu_rt_runtime=None, + device_cgroup_rules=None, device_requests=None, + cgroupns=None): + + if mem_limit is not None: + self['Memory'] = parse_bytes(mem_limit) + + if memswap_limit is not None: + self['MemorySwap'] = parse_bytes(memswap_limit) + + if mem_reservation: + self['MemoryReservation'] = parse_bytes(mem_reservation) + + if kernel_memory: + self['KernelMemory'] = parse_bytes(kernel_memory) + + if mem_swappiness is not None: + if not isinstance(mem_swappiness, int): + raise host_config_type_error( + 'mem_swappiness', mem_swappiness, 'int' + ) + + self['MemorySwappiness'] = mem_swappiness + + if shm_size is not None: + if isinstance(shm_size, str): + shm_size = parse_bytes(shm_size) + + self['ShmSize'] = shm_size + + if pid_mode: + if version_lt(version, '1.24') and pid_mode != 'host': + raise host_config_value_error('pid_mode', pid_mode) + self['PidMode'] = pid_mode + + if ipc_mode: + self['IpcMode'] = ipc_mode + + if privileged: + self['Privileged'] = privileged + + if oom_kill_disable: + self['OomKillDisable'] = oom_kill_disable + + if oom_score_adj: + if version_lt(version, '1.22'): + raise host_config_version_error('oom_score_adj', '1.22') + if not isinstance(oom_score_adj, int): + raise host_config_type_error( + 'oom_score_adj', oom_score_adj, 'int' + ) + self['OomScoreAdj'] = oom_score_adj + + if publish_all_ports: + self['PublishAllPorts'] = publish_all_ports + + if read_only is not None: + self['ReadonlyRootfs'] = read_only + + if dns_search: + self['DnsSearch'] = dns_search + + if network_mode == 'host' and port_bindings: + raise host_config_incompatible_error( + 'network_mode', 'host', 'port_bindings' + ) + self['NetworkMode'] = network_mode or 'default' + + if restart_policy: + if not isinstance(restart_policy, dict): + raise host_config_type_error( + 'restart_policy', restart_policy, 'dict' + ) + + self['RestartPolicy'] = restart_policy + + if cap_add: + self['CapAdd'] = cap_add + + if cap_drop: + self['CapDrop'] = cap_drop + + if devices: + self['Devices'] = parse_devices(devices) + + if group_add: + self['GroupAdd'] = [str(grp) for grp in group_add] + + if dns is not None: + self['Dns'] = dns + + if dns_opt is not None: + self['DnsOptions'] = dns_opt + + if security_opt is not None: + if not isinstance(security_opt, list): + raise host_config_type_error( + 'security_opt', security_opt, 'list' + ) + + self['SecurityOpt'] = security_opt + + if sysctls: + if not isinstance(sysctls, dict): + raise host_config_type_error('sysctls', sysctls, 'dict') + self['Sysctls'] = {} + for k, v in sysctls.items(): + self['Sysctls'][k] = str(v) + + if volumes_from is not None: + if isinstance(volumes_from, str): + volumes_from = volumes_from.split(',') + + self['VolumesFrom'] = volumes_from + + if binds is not None: + self['Binds'] = convert_volume_binds(binds) + + if port_bindings is not None: + self['PortBindings'] = convert_port_bindings(port_bindings) + + if extra_hosts is not None: + if isinstance(extra_hosts, dict): + extra_hosts = format_extra_hosts(extra_hosts) + + self['ExtraHosts'] = extra_hosts + + if links is not None: + self['Links'] = normalize_links(links) + + if isinstance(lxc_conf, dict): + formatted = [] + for k, v in lxc_conf.items(): + formatted.append({'Key': k, 'Value': str(v)}) + lxc_conf = formatted + + if lxc_conf is not None: + self['LxcConf'] = lxc_conf + + if cgroup_parent is not None: + self['CgroupParent'] = cgroup_parent + + if ulimits is not None: + if not isinstance(ulimits, list): + raise host_config_type_error('ulimits', ulimits, 'list') + self['Ulimits'] = [] + for lmt in ulimits: + if not isinstance(lmt, Ulimit): + lmt = Ulimit(**lmt) + self['Ulimits'].append(lmt) + + if log_config is not None: + if not isinstance(log_config, LogConfig): + if not isinstance(log_config, dict): + raise host_config_type_error( + 'log_config', log_config, 'LogConfig' + ) + log_config = LogConfig(**log_config) + + self['LogConfig'] = log_config + + if cpu_quota: + if not isinstance(cpu_quota, int): + raise host_config_type_error('cpu_quota', cpu_quota, 'int') + self['CpuQuota'] = cpu_quota + + if cpu_period: + if not isinstance(cpu_period, int): + raise host_config_type_error('cpu_period', cpu_period, 'int') + self['CpuPeriod'] = cpu_period + + if cpu_shares: + if not isinstance(cpu_shares, int): + raise host_config_type_error('cpu_shares', cpu_shares, 'int') + + self['CpuShares'] = cpu_shares + + if cpuset_cpus: + self['CpusetCpus'] = cpuset_cpus + + if cpuset_mems: + if not isinstance(cpuset_mems, str): + raise host_config_type_error( + 'cpuset_mems', cpuset_mems, 'str' + ) + self['CpusetMems'] = cpuset_mems + + if cpu_rt_period: + if version_lt(version, '1.25'): + raise host_config_version_error('cpu_rt_period', '1.25') + + if not isinstance(cpu_rt_period, int): + raise host_config_type_error( + 'cpu_rt_period', cpu_rt_period, 'int' + ) + self['CPURealtimePeriod'] = cpu_rt_period + + if cpu_rt_runtime: + if version_lt(version, '1.25'): + raise host_config_version_error('cpu_rt_runtime', '1.25') + + if not isinstance(cpu_rt_runtime, int): + raise host_config_type_error( + 'cpu_rt_runtime', cpu_rt_runtime, 'int' + ) + self['CPURealtimeRuntime'] = cpu_rt_runtime + + if blkio_weight: + if not isinstance(blkio_weight, int): + raise host_config_type_error( + 'blkio_weight', blkio_weight, 'int' + ) + if version_lt(version, '1.22'): + raise host_config_version_error('blkio_weight', '1.22') + self["BlkioWeight"] = blkio_weight + + if blkio_weight_device: + if not isinstance(blkio_weight_device, list): + raise host_config_type_error( + 'blkio_weight_device', blkio_weight_device, 'list' + ) + if version_lt(version, '1.22'): + raise host_config_version_error('blkio_weight_device', '1.22') + self["BlkioWeightDevice"] = blkio_weight_device + + if device_read_bps: + if not isinstance(device_read_bps, list): + raise host_config_type_error( + 'device_read_bps', device_read_bps, 'list' + ) + if version_lt(version, '1.22'): + raise host_config_version_error('device_read_bps', '1.22') + self["BlkioDeviceReadBps"] = device_read_bps + + if device_write_bps: + if not isinstance(device_write_bps, list): + raise host_config_type_error( + 'device_write_bps', device_write_bps, 'list' + ) + if version_lt(version, '1.22'): + raise host_config_version_error('device_write_bps', '1.22') + self["BlkioDeviceWriteBps"] = device_write_bps + + if device_read_iops: + if not isinstance(device_read_iops, list): + raise host_config_type_error( + 'device_read_iops', device_read_iops, 'list' + ) + if version_lt(version, '1.22'): + raise host_config_version_error('device_read_iops', '1.22') + self["BlkioDeviceReadIOps"] = device_read_iops + + if device_write_iops: + if not isinstance(device_write_iops, list): + raise host_config_type_error( + 'device_write_iops', device_write_iops, 'list' + ) + if version_lt(version, '1.22'): + raise host_config_version_error('device_write_iops', '1.22') + self["BlkioDeviceWriteIOps"] = device_write_iops + + if tmpfs: + if version_lt(version, '1.22'): + raise host_config_version_error('tmpfs', '1.22') + self["Tmpfs"] = convert_tmpfs_mounts(tmpfs) + + if userns_mode: + if version_lt(version, '1.23'): + raise host_config_version_error('userns_mode', '1.23') + + if userns_mode != "host": + raise host_config_value_error("userns_mode", userns_mode) + self['UsernsMode'] = userns_mode + + if uts_mode: + if uts_mode != "host": + raise host_config_value_error("uts_mode", uts_mode) + self['UTSMode'] = uts_mode + + if pids_limit: + if not isinstance(pids_limit, int): + raise host_config_type_error('pids_limit', pids_limit, 'int') + if version_lt(version, '1.23'): + raise host_config_version_error('pids_limit', '1.23') + self["PidsLimit"] = pids_limit + + if isolation: + if not isinstance(isolation, str): + raise host_config_type_error('isolation', isolation, 'string') + if version_lt(version, '1.24'): + raise host_config_version_error('isolation', '1.24') + self['Isolation'] = isolation + + if auto_remove: + if version_lt(version, '1.25'): + raise host_config_version_error('auto_remove', '1.25') + self['AutoRemove'] = auto_remove + + if storage_opt is not None: + if version_lt(version, '1.24'): + raise host_config_version_error('storage_opt', '1.24') + self['StorageOpt'] = storage_opt + + if init is not None: + if version_lt(version, '1.25'): + raise host_config_version_error('init', '1.25') + self['Init'] = init + + if init_path is not None: + if version_lt(version, '1.25'): + raise host_config_version_error('init_path', '1.25') + + if version_gte(version, '1.29'): + # https://github.com/moby/moby/pull/32470 + raise host_config_version_error('init_path', '1.29', False) + self['InitPath'] = init_path + + if volume_driver is not None: + self['VolumeDriver'] = volume_driver + + if cpu_count: + if not isinstance(cpu_count, int): + raise host_config_type_error('cpu_count', cpu_count, 'int') + if version_lt(version, '1.25'): + raise host_config_version_error('cpu_count', '1.25') + + self['CpuCount'] = cpu_count + + if cpu_percent: + if not isinstance(cpu_percent, int): + raise host_config_type_error('cpu_percent', cpu_percent, 'int') + if version_lt(version, '1.25'): + raise host_config_version_error('cpu_percent', '1.25') + + self['CpuPercent'] = cpu_percent + + if nano_cpus: + if not isinstance(nano_cpus, int): + raise host_config_type_error('nano_cpus', nano_cpus, 'int') + if version_lt(version, '1.25'): + raise host_config_version_error('nano_cpus', '1.25') + + self['NanoCpus'] = nano_cpus + + if runtime: + if version_lt(version, '1.25'): + raise host_config_version_error('runtime', '1.25') + self['Runtime'] = runtime + + if mounts is not None: + if version_lt(version, '1.30'): + raise host_config_version_error('mounts', '1.30') + self['Mounts'] = mounts + + if device_cgroup_rules is not None: + if version_lt(version, '1.28'): + raise host_config_version_error('device_cgroup_rules', '1.28') + if not isinstance(device_cgroup_rules, list): + raise host_config_type_error( + 'device_cgroup_rules', device_cgroup_rules, 'list' + ) + self['DeviceCgroupRules'] = device_cgroup_rules + + if device_requests is not None: + if version_lt(version, '1.40'): + raise host_config_version_error('device_requests', '1.40') + if not isinstance(device_requests, list): + raise host_config_type_error( + 'device_requests', device_requests, 'list' + ) + self['DeviceRequests'] = [] + for req in device_requests: + if not isinstance(req, DeviceRequest): + req = DeviceRequest(**req) + self['DeviceRequests'].append(req) + + if cgroupns: + self['CgroupnsMode'] = cgroupns + + +def host_config_type_error(param, param_value, expected): + return TypeError( + f'Invalid type for {param} param: expected {expected} ' + f'but found {type(param_value)}' + ) + + +def host_config_version_error(param, version, less_than=True): + operator = '<' if less_than else '>' + return errors.InvalidVersion( + f'{param} param is not supported in API versions {operator} {version}', + ) + +def host_config_value_error(param, param_value): + return ValueError(f'Invalid value for {param} param: {param_value}') + + +def host_config_incompatible_error(param, param_value, incompatible_param): + return errors.InvalidArgument( + f'\"{param_value}\" {param} is incompatible with {incompatible_param}' + ) + + +class ContainerConfig(dict): + def __init__( + self, version, image, command, hostname=None, user=None, detach=False, + stdin_open=False, tty=False, ports=None, environment=None, + volumes=None, network_disabled=False, entrypoint=None, + working_dir=None, domainname=None, host_config=None, mac_address=None, + labels=None, stop_signal=None, networking_config=None, + healthcheck=None, stop_timeout=None, runtime=None + ): + + if stop_timeout is not None and version_lt(version, '1.25'): + raise errors.InvalidVersion( + 'stop_timeout was only introduced in API version 1.25' + ) + + if healthcheck is not None: + if version_lt(version, '1.24'): + raise errors.InvalidVersion( + 'Health options were only introduced in API version 1.24' + ) + + if version_lt(version, '1.29') and 'StartPeriod' in healthcheck: + raise errors.InvalidVersion( + 'healthcheck start period was introduced in API ' + 'version 1.29' + ) + + if isinstance(command, str): + command = split_command(command) + + if isinstance(entrypoint, str): + entrypoint = split_command(entrypoint) + + if isinstance(environment, dict): + environment = format_environment(environment) + + if isinstance(labels, list): + labels = {lbl: '' for lbl in labels} + + if isinstance(ports, list): + exposed_ports = {} + for port_definition in ports: + port = port_definition + proto = 'tcp' + if isinstance(port_definition, tuple): + if len(port_definition) == 2: + proto = port_definition[1] + port = port_definition[0] + exposed_ports[f'{port}/{proto}'] = {} + ports = exposed_ports + + if isinstance(volumes, str): + volumes = [volumes, ] + + if isinstance(volumes, list): + volumes_dict = {} + for vol in volumes: + volumes_dict[vol] = {} + volumes = volumes_dict + + if healthcheck and isinstance(healthcheck, dict): + healthcheck = Healthcheck(**healthcheck) + + attach_stdin = False + attach_stdout = False + attach_stderr = False + stdin_once = False + + if not detach: + attach_stdout = True + attach_stderr = True + + if stdin_open: + attach_stdin = True + stdin_once = True + + self.update({ + 'Hostname': hostname, + 'Domainname': domainname, + 'ExposedPorts': ports, + 'User': str(user) if user is not None else None, + 'Tty': tty, + 'OpenStdin': stdin_open, + 'StdinOnce': stdin_once, + 'AttachStdin': attach_stdin, + 'AttachStdout': attach_stdout, + 'AttachStderr': attach_stderr, + 'Env': environment, + 'Cmd': command, + 'Image': image, + 'Volumes': volumes, + 'NetworkDisabled': network_disabled, + 'Entrypoint': entrypoint, + 'WorkingDir': working_dir, + 'HostConfig': host_config, + 'NetworkingConfig': networking_config, + 'MacAddress': mac_address, + 'Labels': labels, + 'StopSignal': stop_signal, + 'Healthcheck': healthcheck, + 'StopTimeout': stop_timeout, + 'Runtime': runtime + }) diff --git a/docker/types/daemon.py b/docker/types/daemon.py new file mode 100644 index 0000000000..04e6ccb2d7 --- /dev/null +++ b/docker/types/daemon.py @@ -0,0 +1,71 @@ +import socket + +import urllib3 + +from ..errors import DockerException + + +class CancellableStream: + """ + Stream wrapper for real-time events, logs, etc. from the server. + + Example: + >>> events = client.events() + >>> for event in events: + ... print(event) + >>> # and cancel from another thread + >>> events.close() + """ + + def __init__(self, stream, response): + self._stream = stream + self._response = response + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._stream) + except urllib3.exceptions.ProtocolError: + raise StopIteration from None + except OSError: + raise StopIteration from None + + next = __next__ + + def close(self): + """ + Closes the event streaming. + """ + + if not self._response.raw.closed: + # find the underlying socket object + # based on api.client._get_raw_response_socket + + sock_fp = self._response.raw._fp.fp + + if hasattr(sock_fp, 'raw'): + sock_raw = sock_fp.raw + + if hasattr(sock_raw, 'sock'): + sock = sock_raw.sock + + elif hasattr(sock_raw, '_sock'): + sock = sock_raw._sock + + elif hasattr(sock_fp, 'channel'): + # We're working with a paramiko (SSH) channel, which doesn't + # support cancelable streams with the current implementation + raise DockerException( + 'Cancellable streams not supported for the SSH protocol' + ) + else: + sock = sock_fp._sock + + if hasattr(urllib3.contrib, 'pyopenssl') and isinstance( + sock, urllib3.contrib.pyopenssl.WrappedSocket): + sock = sock.socket + + sock.shutdown(socket.SHUT_RDWR) + sock.close() diff --git a/docker/types/healthcheck.py b/docker/types/healthcheck.py new file mode 100644 index 0000000000..dfc88a9771 --- /dev/null +++ b/docker/types/healthcheck.py @@ -0,0 +1,88 @@ +from .base import DictType + + +class Healthcheck(DictType): + """ + Defines a healthcheck configuration for a container or service. + + Args: + test (:py:class:`list` or str): Test to perform to determine + container health. Possible values: + + - Empty list: Inherit healthcheck from parent image + - ``["NONE"]``: Disable healthcheck + - ``["CMD", args...]``: exec arguments directly. + - ``["CMD-SHELL", command]``: Run command in the system's + default shell. + + If a string is provided, it will be used as a ``CMD-SHELL`` + command. + interval (int): The time to wait between checks in nanoseconds. It + should be 0 or at least 1000000 (1 ms). + timeout (int): The time to wait before considering the check to + have hung. It should be 0 or at least 1000000 (1 ms). + retries (int): The number of consecutive failures needed to + consider a container as unhealthy. + start_period (int): Start period for the container to + initialize before starting health-retries countdown in + nanoseconds. It should be 0 or at least 1000000 (1 ms). + """ + def __init__(self, **kwargs): + test = kwargs.get('test', kwargs.get('Test')) + if isinstance(test, str): + test = ["CMD-SHELL", test] + + interval = kwargs.get('interval', kwargs.get('Interval')) + timeout = kwargs.get('timeout', kwargs.get('Timeout')) + retries = kwargs.get('retries', kwargs.get('Retries')) + start_period = kwargs.get('start_period', kwargs.get('StartPeriod')) + + super().__init__({ + 'Test': test, + 'Interval': interval, + 'Timeout': timeout, + 'Retries': retries, + 'StartPeriod': start_period + }) + + @property + def test(self): + return self['Test'] + + @test.setter + def test(self, value): + if isinstance(value, str): + value = ["CMD-SHELL", value] + self['Test'] = value + + @property + def interval(self): + return self['Interval'] + + @interval.setter + def interval(self, value): + self['Interval'] = value + + @property + def timeout(self): + return self['Timeout'] + + @timeout.setter + def timeout(self, value): + self['Timeout'] = value + + @property + def retries(self): + return self['Retries'] + + @retries.setter + def retries(self, value): + self['Retries'] = value + + @property + def start_period(self): + return self['StartPeriod'] + + @start_period.setter + def start_period(self, value): + self['StartPeriod'] = value diff --git a/docker/types/networks.py b/docker/types/networks.py new file mode 100644 index 0000000000..ed1ced13ed --- /dev/null +++ b/docker/types/networks.py @@ -0,0 +1,128 @@ +from .. import errors +from ..utils import normalize_links, version_lt + + +class EndpointConfig(dict): + def __init__(self, version, aliases=None, links=None, ipv4_address=None, + ipv6_address=None, link_local_ips=None, driver_opt=None, + mac_address=None): + if version_lt(version, '1.22'): + raise errors.InvalidVersion( + 'Endpoint config is not supported for API version < 1.22' + ) + + if aliases: + self["Aliases"] = aliases + + if links: + self["Links"] = normalize_links(links) + + ipam_config = {} + if ipv4_address: + ipam_config['IPv4Address'] = ipv4_address + + if ipv6_address: + ipam_config['IPv6Address'] = ipv6_address + + if mac_address: + if version_lt(version, '1.25'): + raise errors.InvalidVersion( + 'mac_address is not supported for API version < 1.25' + ) + self['MacAddress'] = mac_address + + if link_local_ips is not None: + if version_lt(version, '1.24'): + raise errors.InvalidVersion( + 'link_local_ips is not supported for API version < 1.24' + ) + ipam_config['LinkLocalIPs'] = link_local_ips + + if ipam_config: + self['IPAMConfig'] = ipam_config + + if driver_opt: + if version_lt(version, '1.32'): + raise errors.InvalidVersion( + 'DriverOpts is not supported for API version < 1.32' + ) + if not isinstance(driver_opt, dict): + raise TypeError('driver_opt must be a dictionary') + self['DriverOpts'] = driver_opt + + +class NetworkingConfig(dict): + def __init__(self, endpoints_config=None): + if endpoints_config: + self["EndpointsConfig"] = endpoints_config + + +class IPAMConfig(dict): + """ + Create an IPAM (IP Address Management) config dictionary to be used with + :py:meth:`~docker.api.network.NetworkApiMixin.create_network`. + + Args: + + driver (str): The IPAM driver to use. Defaults to ``default``. + pool_configs (:py:class:`list`): A list of pool configurations + (:py:class:`~docker.types.IPAMPool`). Defaults to empty list. + options (dict): Driver options as a key-value dictionary. + Defaults to `None`. + + Example: + + >>> ipam_config = docker.types.IPAMConfig(driver='default') + >>> network = client.create_network('network1', ipam=ipam_config) + + """ + def __init__(self, driver='default', pool_configs=None, options=None): + self.update({ + 'Driver': driver, + 'Config': pool_configs or [] + }) + + if options: + if not isinstance(options, dict): + raise TypeError('IPAMConfig options must be a dictionary') + self['Options'] = options + + +class IPAMPool(dict): + """ + Create an IPAM pool config dictionary to be added to the + ``pool_configs`` parameter of + :py:class:`~docker.types.IPAMConfig`. + + Args: + + subnet (str): Custom subnet for this IPAM pool using the CIDR + notation. Defaults to ``None``. + iprange (str): Custom IP range for endpoints in this IPAM pool using + the CIDR notation. Defaults to ``None``. + gateway (str): Custom IP address for the pool's gateway. + aux_addresses (dict): A dictionary of ``key -> ip_address`` + relationships specifying auxiliary addresses that need to be + allocated by the IPAM driver. + + Example: + + >>> ipam_pool = docker.types.IPAMPool( + subnet='124.42.0.0/16', + iprange='124.42.0.0/24', + gateway='124.42.0.254', + aux_addresses={ + 'reserved1': '124.42.1.1' + } + ) + >>> ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool]) + """ + def __init__(self, subnet=None, iprange=None, gateway=None, + aux_addresses=None): + self.update({ + 'Subnet': subnet, + 'IPRange': iprange, + 'Gateway': gateway, + 'AuxiliaryAddresses': aux_addresses + }) diff --git a/docker/types/services.py b/docker/types/services.py new file mode 100644 index 0000000000..69c0c498ea --- /dev/null +++ b/docker/types/services.py @@ -0,0 +1,870 @@ +from .. import errors +from ..constants import IS_WINDOWS_PLATFORM +from ..utils import ( + check_resource, + convert_service_networks, + format_environment, + format_extra_hosts, + parse_bytes, + split_command, +) + + +class TaskTemplate(dict): + """ + Describe the task specification to be used when creating or updating a + service. + + Args: + + container_spec (ContainerSpec): Container settings for containers + started as part of this task. + log_driver (DriverConfig): Log configuration for containers created as + part of the service. + resources (Resources): Resource requirements which apply to each + individual container created as part of the service. + restart_policy (RestartPolicy): Specification for the restart policy + which applies to containers created as part of this service. + placement (Placement): Placement instructions for the scheduler. + If a list is passed instead, it is assumed to be a list of + constraints as part of a :py:class:`Placement` object. + networks (:py:class:`list`): List of network names or IDs or + :py:class:`NetworkAttachmentConfig` to attach the service to. + force_update (int): A counter that triggers an update even if no + relevant parameters have been changed. + """ + + def __init__(self, container_spec, resources=None, restart_policy=None, + placement=None, log_driver=None, networks=None, + force_update=None): + self['ContainerSpec'] = container_spec + if resources: + self['Resources'] = resources + if restart_policy: + self['RestartPolicy'] = restart_policy + if placement: + if isinstance(placement, list): + placement = Placement(constraints=placement) + self['Placement'] = placement + if log_driver: + self['LogDriver'] = log_driver + if networks: + self['Networks'] = convert_service_networks(networks) + + if force_update is not None: + if not isinstance(force_update, int): + raise TypeError('force_update must be an integer') + self['ForceUpdate'] = force_update + + @property + def container_spec(self): + return self.get('ContainerSpec') + + @property + def resources(self): + return self.get('Resources') + + @property + def restart_policy(self): + return self.get('RestartPolicy') + + @property + def placement(self): + return self.get('Placement') + + +class ContainerSpec(dict): + """ + Describes the behavior of containers that are part of a task, and is used + when declaring a :py:class:`~docker.types.TaskTemplate`. + + Args: + + image (string): The image name to use for the container. + command (string or list): The command to be run in the image. + args (:py:class:`list`): Arguments to the command. + hostname (string): The hostname to set on the container. + env (dict): Environment variables. + workdir (string): The working directory for commands to run in. + user (string): The user inside the container. + labels (dict): A map of labels to associate with the service. + mounts (:py:class:`list`): A list of specifications for mounts to be + added to containers created as part of the service. See the + :py:class:`~docker.types.Mount` class for details. + stop_grace_period (int): Amount of time to wait for the container to + terminate before forcefully killing it. + secrets (:py:class:`list`): List of :py:class:`SecretReference` to be + made available inside the containers. + tty (boolean): Whether a pseudo-TTY should be allocated. + groups (:py:class:`list`): A list of additional groups that the + container process will run as. + open_stdin (boolean): Open ``stdin`` + read_only (boolean): Mount the container's root filesystem as read + only. + stop_signal (string): Set signal to stop the service's containers + healthcheck (Healthcheck): Healthcheck + configuration for this service. + hosts (:py:class:`dict`): A set of host to IP mappings to add to + the container's ``hosts`` file. + dns_config (DNSConfig): Specification for DNS + related configurations in resolver configuration file. + configs (:py:class:`list`): List of :py:class:`ConfigReference` that + will be exposed to the service. + privileges (Privileges): Security options for the service's containers. + isolation (string): Isolation technology used by the service's + containers. Only used for Windows containers. + init (boolean): Run an init inside the container that forwards signals + and reaps processes. + cap_add (:py:class:`list`): A list of kernel capabilities to add to the + default set for the container. + cap_drop (:py:class:`list`): A list of kernel capabilities to drop from + the default set for the container. + sysctls (:py:class:`dict`): A dict of sysctl values to add to + the container + """ + + def __init__(self, image, command=None, args=None, hostname=None, env=None, + workdir=None, user=None, labels=None, mounts=None, + stop_grace_period=None, secrets=None, tty=None, groups=None, + open_stdin=None, read_only=None, stop_signal=None, + healthcheck=None, hosts=None, dns_config=None, configs=None, + privileges=None, isolation=None, init=None, cap_add=None, + cap_drop=None, sysctls=None): + self['Image'] = image + + if isinstance(command, str): + command = split_command(command) + self['Command'] = command + self['Args'] = args + + if hostname is not None: + self['Hostname'] = hostname + if env is not None: + if isinstance(env, dict): + self['Env'] = format_environment(env) + else: + self['Env'] = env + if workdir is not None: + self['Dir'] = workdir + if user is not None: + self['User'] = user + if groups is not None: + self['Groups'] = groups + if stop_signal is not None: + self['StopSignal'] = stop_signal + if stop_grace_period is not None: + self['StopGracePeriod'] = stop_grace_period + if labels is not None: + self['Labels'] = labels + if hosts is not None: + self['Hosts'] = format_extra_hosts(hosts, task=True) + + if mounts is not None: + parsed_mounts = [] + for mount in mounts: + if isinstance(mount, str): + parsed_mounts.append(Mount.parse_mount_string(mount)) + else: + # If mount already parsed + parsed_mounts.append(mount) + self['Mounts'] = parsed_mounts + + if secrets is not None: + if not isinstance(secrets, list): + raise TypeError('secrets must be a list') + self['Secrets'] = secrets + + if configs is not None: + if not isinstance(configs, list): + raise TypeError('configs must be a list') + self['Configs'] = configs + + if dns_config is not None: + self['DNSConfig'] = dns_config + if privileges is not None: + self['Privileges'] = privileges + if healthcheck is not None: + self['Healthcheck'] = healthcheck + + if tty is not None: + self['TTY'] = tty + if open_stdin is not None: + self['OpenStdin'] = open_stdin + if read_only is not None: + self['ReadOnly'] = read_only + + if isolation is not None: + self['Isolation'] = isolation + + if init is not None: + self['Init'] = init + + if cap_add is not None: + if not isinstance(cap_add, list): + raise TypeError('cap_add must be a list') + + self['CapabilityAdd'] = cap_add + + if cap_drop is not None: + if not isinstance(cap_drop, list): + raise TypeError('cap_drop must be a list') + + self['CapabilityDrop'] = cap_drop + + if sysctls is not None: + if not isinstance(sysctls, dict): + raise TypeError('sysctls must be a dict') + + self['Sysctls'] = sysctls + + +class Mount(dict): + """ + Describes a mounted folder's configuration inside a container. A list of + :py:class:`Mount` would be used as part of a + :py:class:`~docker.types.ContainerSpec`. + + Args: + + target (string): Container path. + source (string): Mount source (e.g. a volume name or a host path). + type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` / + ``npipe``). Default: ``volume``. + read_only (bool): Whether the mount should be read-only. + consistency (string): The consistency requirement for the mount. One of + ``default```, ``consistent``, ``cached``, ``delegated``. + propagation (string): A propagation mode with the value ``[r]private``, + ``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type. + no_copy (bool): False if the volume should be populated with the data + from the target. Default: ``False``. Only valid for the ``volume`` + type. + labels (dict): User-defined name and labels for the volume. Only valid + for the ``volume`` type. + driver_config (DriverConfig): Volume driver configuration. Only valid + for the ``volume`` type. + subpath (str): Path inside a volume to mount instead of the volume root. + tmpfs_size (int or string): The size for the tmpfs mount in bytes. + tmpfs_mode (int): The permission mode for the tmpfs mount. + """ + + def __init__(self, target, source, type='volume', read_only=False, + consistency=None, propagation=None, no_copy=False, + labels=None, driver_config=None, tmpfs_size=None, + tmpfs_mode=None, subpath=None): + self['Target'] = target + self['Source'] = source + if type not in ('bind', 'volume', 'tmpfs', 'npipe'): + raise errors.InvalidArgument( + f'Unsupported mount type: "{type}"' + ) + self['Type'] = type + self['ReadOnly'] = read_only + + if consistency: + self['Consistency'] = consistency + + if type == 'bind': + if propagation is not None: + self['BindOptions'] = { + 'Propagation': propagation + } + if any([labels, driver_config, no_copy, tmpfs_size, tmpfs_mode, subpath]): + raise errors.InvalidArgument( + 'Incompatible options have been provided for the bind ' + 'type mount.' + ) + elif type == 'volume': + volume_opts = {} + if no_copy: + volume_opts['NoCopy'] = True + if labels: + volume_opts['Labels'] = labels + if driver_config: + volume_opts['DriverConfig'] = driver_config + if subpath: + volume_opts['Subpath'] = subpath + if volume_opts: + self['VolumeOptions'] = volume_opts + if any([propagation, tmpfs_size, tmpfs_mode]): + raise errors.InvalidArgument( + 'Incompatible options have been provided for the volume ' + 'type mount.' + ) + elif type == 'tmpfs': + tmpfs_opts = {} + if tmpfs_mode: + if not isinstance(tmpfs_mode, int): + raise errors.InvalidArgument( + 'tmpfs_mode must be an integer' + ) + tmpfs_opts['Mode'] = tmpfs_mode + if tmpfs_size: + tmpfs_opts['SizeBytes'] = parse_bytes(tmpfs_size) + if tmpfs_opts: + self['TmpfsOptions'] = tmpfs_opts + if any([propagation, labels, driver_config, no_copy]): + raise errors.InvalidArgument( + 'Incompatible options have been provided for the tmpfs ' + 'type mount.' + ) + + @classmethod + def parse_mount_string(cls, string): + parts = string.split(':') + if len(parts) > 3: + raise errors.InvalidArgument( + f'Invalid mount format "{string}"' + ) + if len(parts) == 1: + return cls(target=parts[0], source=None) + else: + target = parts[1] + source = parts[0] + mount_type = 'volume' + if source.startswith('/') or ( + IS_WINDOWS_PLATFORM and source[0].isalpha() and + source[1] == ':' + ): + # FIXME: That windows condition will fail earlier since we + # split on ':'. We should look into doing a smarter split + # if we detect we are on Windows. + mount_type = 'bind' + read_only = not (len(parts) == 2 or parts[2] == 'rw') + return cls(target, source, read_only=read_only, type=mount_type) + + +class Resources(dict): + """ + Configures resource allocation for containers when made part of a + :py:class:`~docker.types.ContainerSpec`. + + Args: + + cpu_limit (int): CPU limit in units of 10^9 CPU shares. + mem_limit (int): Memory limit in Bytes. + cpu_reservation (int): CPU reservation in units of 10^9 CPU shares. + mem_reservation (int): Memory reservation in Bytes. + generic_resources (dict or :py:class:`list`): Node level generic + resources, for example a GPU, using the following format: + ``{ resource_name: resource_value }``. Alternatively, a list of + of resource specifications as defined by the Engine API. + """ + + def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None, + mem_reservation=None, generic_resources=None): + limits = {} + reservation = {} + if cpu_limit is not None: + limits['NanoCPUs'] = cpu_limit + if mem_limit is not None: + limits['MemoryBytes'] = mem_limit + if cpu_reservation is not None: + reservation['NanoCPUs'] = cpu_reservation + if mem_reservation is not None: + reservation['MemoryBytes'] = mem_reservation + if generic_resources is not None: + reservation['GenericResources'] = ( + _convert_generic_resources_dict(generic_resources) + ) + if limits: + self['Limits'] = limits + if reservation: + self['Reservations'] = reservation + + +def _convert_generic_resources_dict(generic_resources): + if isinstance(generic_resources, list): + return generic_resources + if not isinstance(generic_resources, dict): + raise errors.InvalidArgument( + 'generic_resources must be a dict or a list ' + f'(found {type(generic_resources)})' + ) + resources = [] + for kind, value in generic_resources.items(): + resource_type = None + if isinstance(value, int): + resource_type = 'DiscreteResourceSpec' + elif isinstance(value, str): + resource_type = 'NamedResourceSpec' + else: + kv = {kind: value} + raise errors.InvalidArgument( + f'Unsupported generic resource reservation type: {kv}' + ) + resources.append({ + resource_type: {'Kind': kind, 'Value': value} + }) + return resources + + +class UpdateConfig(dict): + """ + + Used to specify the way container updates should be performed by a service. + + Args: + + parallelism (int): Maximum number of tasks to be updated in one + iteration (0 means unlimited parallelism). Default: 0. + delay (int): Amount of time between updates, in nanoseconds. + failure_action (string): Action to take if an updated task fails to + run, or stops running during the update. Acceptable values are + ``continue``, ``pause``, as well as ``rollback`` since API v1.28. + Default: ``continue`` + monitor (int): Amount of time to monitor each updated task for + failures, in nanoseconds. + max_failure_ratio (float): The fraction of tasks that may fail during + an update before the failure action is invoked, specified as a + floating point number between 0 and 1. Default: 0 + order (string): Specifies the order of operations when rolling out an + updated task. Either ``start-first`` or ``stop-first`` are accepted. + """ + + def __init__(self, parallelism=0, delay=None, failure_action='continue', + monitor=None, max_failure_ratio=None, order=None): + self['Parallelism'] = parallelism + if delay is not None: + self['Delay'] = delay + if failure_action not in ('pause', 'continue', 'rollback'): + raise errors.InvalidArgument( + 'failure_action must be one of `pause`, `continue`, `rollback`' + ) + self['FailureAction'] = failure_action + + if monitor is not None: + if not isinstance(monitor, int): + raise TypeError('monitor must be an integer') + self['Monitor'] = monitor + + if max_failure_ratio is not None: + if not isinstance(max_failure_ratio, (float, int)): + raise TypeError('max_failure_ratio must be a float') + if max_failure_ratio > 1 or max_failure_ratio < 0: + raise errors.InvalidArgument( + 'max_failure_ratio must be a number between 0 and 1' + ) + self['MaxFailureRatio'] = max_failure_ratio + + if order is not None: + if order not in ('start-first', 'stop-first'): + raise errors.InvalidArgument( + 'order must be either `start-first` or `stop-first`' + ) + self['Order'] = order + + +class RollbackConfig(UpdateConfig): + """ + Used to specify the way container rollbacks should be performed by a + service + + Args: + parallelism (int): Maximum number of tasks to be rolled back in one + iteration (0 means unlimited parallelism). Default: 0 + delay (int): Amount of time between rollbacks, in nanoseconds. + failure_action (string): Action to take if a rolled back task fails to + run, or stops running during the rollback. Acceptable values are + ``continue``, ``pause`` or ``rollback``. + Default: ``continue`` + monitor (int): Amount of time to monitor each rolled back task for + failures, in nanoseconds. + max_failure_ratio (float): The fraction of tasks that may fail during + a rollback before the failure action is invoked, specified as a + floating point number between 0 and 1. Default: 0 + order (string): Specifies the order of operations when rolling out a + rolled back task. Either ``start-first`` or ``stop-first`` are + accepted. + """ + pass + + +class RestartConditionTypesEnum: + _values = ( + 'none', + 'on-failure', + 'any', + ) + NONE, ON_FAILURE, ANY = _values + + +class RestartPolicy(dict): + """ + Used when creating a :py:class:`~docker.types.ContainerSpec`, + dictates whether a container should restart after stopping or failing. + + Args: + + condition (string): Condition for restart (``none``, ``on-failure``, + or ``any``). Default: `none`. + delay (int): Delay between restart attempts. Default: 0 + max_attempts (int): Maximum attempts to restart a given container + before giving up. Default value is 0, which is ignored. + window (int): Time window used to evaluate the restart policy. Default + value is 0, which is unbounded. + """ + + condition_types = RestartConditionTypesEnum + + def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0, + max_attempts=0, window=0): + if condition not in self.condition_types._values: + raise TypeError( + f'Invalid RestartPolicy condition {condition}' + ) + + self['Condition'] = condition + self['Delay'] = delay + self['MaxAttempts'] = max_attempts + self['Window'] = window + + +class DriverConfig(dict): + """ + Indicates which driver to use, as well as its configuration. Can be used + as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`, + for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or + as the driver object in + :py:meth:`create_secret`. + + Args: + + name (string): Name of the driver to use. + options (dict): Driver-specific options. Default: ``None``. + """ + + def __init__(self, name, options=None): + self['Name'] = name + if options: + self['Options'] = options + + +class EndpointSpec(dict): + """ + Describes properties to access and load-balance a service. + + Args: + + mode (string): The mode of resolution to use for internal load + balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to + ``'vip'`` if not provided. + ports (dict): Exposed ports that this service is accessible on from the + outside, in the form of ``{ published_port: target_port }`` or + ``{ published_port: }``. Port config tuple format + is ``(target_port [, protocol [, publish_mode]])``. + Ports can only be provided if the ``vip`` resolution mode is used. + """ + + def __init__(self, mode=None, ports=None): + if ports: + self['Ports'] = convert_service_ports(ports) + if mode: + self['Mode'] = mode + + +def convert_service_ports(ports): + if isinstance(ports, list): + return ports + if not isinstance(ports, dict): + raise TypeError( + 'Invalid type for ports, expected dict or list' + ) + + result = [] + for k, v in ports.items(): + port_spec = { + 'Protocol': 'tcp', + 'PublishedPort': k + } + + if isinstance(v, tuple): + port_spec['TargetPort'] = v[0] + if len(v) >= 2 and v[1] is not None: + port_spec['Protocol'] = v[1] + if len(v) == 3: + port_spec['PublishMode'] = v[2] + if len(v) > 3: + raise ValueError( + 'Service port configuration can have at most 3 elements: ' + '(target_port, protocol, mode)' + ) + else: + port_spec['TargetPort'] = v + + result.append(port_spec) + return result + + +class ServiceMode(dict): + """ + Indicate whether a service or a job should be deployed as a replicated + or global service, and associated parameters + + Args: + mode (string): Can be either ``replicated``, ``global``, + ``replicated-job`` or ``global-job`` + replicas (int): Number of replicas. For replicated services only. + concurrency (int): Number of concurrent jobs. For replicated job + services only. + """ + + def __init__(self, mode, replicas=None, concurrency=None): + replicated_modes = ('replicated', 'replicated-job') + supported_modes = replicated_modes + ('global', 'global-job') + + if mode not in supported_modes: + raise errors.InvalidArgument( + 'mode must be either "replicated", "global", "replicated-job"' + ' or "global-job"' + ) + + if mode not in replicated_modes: + if replicas is not None: + raise errors.InvalidArgument( + 'replicas can only be used for "replicated" or' + ' "replicated-job" mode' + ) + + if concurrency is not None: + raise errors.InvalidArgument( + 'concurrency can only be used for "replicated-job" mode' + ) + + service_mode = self._convert_mode(mode) + self.mode = service_mode + self[service_mode] = {} + + if replicas is not None: + if mode == 'replicated': + self[service_mode]['Replicas'] = replicas + + if mode == 'replicated-job': + self[service_mode]['MaxConcurrent'] = concurrency or 1 + self[service_mode]['TotalCompletions'] = replicas + + @staticmethod + def _convert_mode(original_mode): + if original_mode == 'global-job': + return 'GlobalJob' + + if original_mode == 'replicated-job': + return 'ReplicatedJob' + + return original_mode + + @property + def replicas(self): + if 'replicated' in self: + return self['replicated'].get('Replicas') + + if 'ReplicatedJob' in self: + return self['ReplicatedJob'].get('TotalCompletions') + + return None + + +class SecretReference(dict): + """ + Secret reference to be used as part of a :py:class:`ContainerSpec`. + Describes how a secret is made accessible inside the service's + containers. + + Args: + secret_id (string): Secret's ID + secret_name (string): Secret's name as defined at its creation. + filename (string): Name of the file containing the secret. Defaults + to the secret's name if not specified. + uid (string): UID of the secret file's owner. Default: 0 + gid (string): GID of the secret file's group. Default: 0 + mode (int): File access mode inside the container. Default: 0o444 + """ + @check_resource('secret_id') + def __init__(self, secret_id, secret_name, filename=None, uid=None, + gid=None, mode=0o444): + self['SecretName'] = secret_name + self['SecretID'] = secret_id + self['File'] = { + 'Name': filename or secret_name, + 'UID': uid or '0', + 'GID': gid or '0', + 'Mode': mode + } + + +class ConfigReference(dict): + """ + Config reference to be used as part of a :py:class:`ContainerSpec`. + Describes how a config is made accessible inside the service's + containers. + + Args: + config_id (string): Config's ID + config_name (string): Config's name as defined at its creation. + filename (string): Name of the file containing the config. Defaults + to the config's name if not specified. + uid (string): UID of the config file's owner. Default: 0 + gid (string): GID of the config file's group. Default: 0 + mode (int): File access mode inside the container. Default: 0o444 + """ + @check_resource('config_id') + def __init__(self, config_id, config_name, filename=None, uid=None, + gid=None, mode=0o444): + self['ConfigName'] = config_name + self['ConfigID'] = config_id + self['File'] = { + 'Name': filename or config_name, + 'UID': uid or '0', + 'GID': gid or '0', + 'Mode': mode + } + + +class Placement(dict): + """ + Placement constraints to be used as part of a :py:class:`TaskTemplate` + + Args: + constraints (:py:class:`list` of str): A list of constraints + preferences (:py:class:`list` of tuple): Preferences provide a way + to make the scheduler aware of factors such as topology. They + are provided in order from highest to lowest precedence and + are expressed as ``(strategy, descriptor)`` tuples. See + :py:class:`PlacementPreference` for details. + maxreplicas (int): Maximum number of replicas per node + platforms (:py:class:`list` of tuple): A list of platforms + expressed as ``(arch, os)`` tuples + """ + + def __init__(self, constraints=None, preferences=None, platforms=None, + maxreplicas=None): + if constraints is not None: + self['Constraints'] = constraints + if preferences is not None: + self['Preferences'] = [] + for pref in preferences: + if isinstance(pref, tuple): + pref = PlacementPreference(*pref) + self['Preferences'].append(pref) + if maxreplicas is not None: + self['MaxReplicas'] = maxreplicas + if platforms: + self['Platforms'] = [] + for plat in platforms: + self['Platforms'].append({ + 'Architecture': plat[0], 'OS': plat[1] + }) + + +class PlacementPreference(dict): + """ + Placement preference to be used as an element in the list of + preferences for :py:class:`Placement` objects. + + Args: + strategy (string): The placement strategy to implement. Currently, + the only supported strategy is ``spread``. + descriptor (string): A label descriptor. For the spread strategy, + the scheduler will try to spread tasks evenly over groups of + nodes identified by this label. + """ + + def __init__(self, strategy, descriptor): + if strategy != 'spread': + raise errors.InvalidArgument( + f'PlacementPreference strategy value is invalid ({strategy}): ' + 'must be "spread".' + ) + self['Spread'] = {'SpreadDescriptor': descriptor} + + +class DNSConfig(dict): + """ + Specification for DNS related configurations in resolver configuration + file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition. + + Args: + nameservers (:py:class:`list`): The IP addresses of the name + servers. + search (:py:class:`list`): A search list for host-name lookup. + options (:py:class:`list`): A list of internal resolver variables + to be modified (e.g., ``debug``, ``ndots:3``, etc.). + """ + + def __init__(self, nameservers=None, search=None, options=None): + self['Nameservers'] = nameservers + self['Search'] = search + self['Options'] = options + + +class Privileges(dict): + r""" + Security options for a service's containers. + Part of a :py:class:`ContainerSpec` definition. + + Args: + credentialspec_file (str): Load credential spec from this file. + The file is read by the daemon, and must be present in the + CredentialSpecs subdirectory in the docker data directory, + which defaults to ``C:\ProgramData\Docker\`` on Windows. + Can not be combined with credentialspec_registry. + + credentialspec_registry (str): Load credential spec from this value + in the Windows registry. The specified registry value must be + located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion + \Virtualization\Containers\CredentialSpecs``. + Can not be combined with credentialspec_file. + + selinux_disable (boolean): Disable SELinux + selinux_user (string): SELinux user label + selinux_role (string): SELinux role label + selinux_type (string): SELinux type label + selinux_level (string): SELinux level label + """ + + def __init__(self, credentialspec_file=None, credentialspec_registry=None, + selinux_disable=None, selinux_user=None, selinux_role=None, + selinux_type=None, selinux_level=None): + credential_spec = {} + if credentialspec_registry is not None: + credential_spec['Registry'] = credentialspec_registry + if credentialspec_file is not None: + credential_spec['File'] = credentialspec_file + + if len(credential_spec) > 1: + raise errors.InvalidArgument( + 'credentialspec_file and credentialspec_registry are mutually' + ' exclusive' + ) + + selinux_context = { + 'Disable': selinux_disable, + 'User': selinux_user, + 'Role': selinux_role, + 'Type': selinux_type, + 'Level': selinux_level, + } + + if len(credential_spec) > 0: + self['CredentialSpec'] = credential_spec + + if len(selinux_context) > 0: + self['SELinuxContext'] = selinux_context + + +class NetworkAttachmentConfig(dict): + """ + Network attachment options for a service. + + Args: + target (str): The target network for attachment. + Can be a network name or ID. + aliases (:py:class:`list`): A list of discoverable alternate names + for the service. + options (:py:class:`dict`): Driver attachment options for the + network target. + """ + + def __init__(self, target, aliases=None, options=None): + self['Target'] = target + self['Aliases'] = aliases + self['DriverOpts'] = options diff --git a/docker/types/swarm.py b/docker/types/swarm.py new file mode 100644 index 0000000000..9687a82d82 --- /dev/null +++ b/docker/types/swarm.py @@ -0,0 +1,119 @@ +from ..errors import InvalidVersion +from ..utils import version_lt + + +class SwarmSpec(dict): + """ + Describe a Swarm's configuration and options. Use + :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` + to instantiate. + """ + def __init__(self, version, task_history_retention_limit=None, + snapshot_interval=None, keep_old_snapshots=None, + log_entries_for_slow_followers=None, heartbeat_tick=None, + election_tick=None, dispatcher_heartbeat_period=None, + node_cert_expiry=None, external_cas=None, name=None, + labels=None, signing_ca_cert=None, signing_ca_key=None, + ca_force_rotate=None, autolock_managers=None, + log_driver=None): + if task_history_retention_limit is not None: + self['Orchestration'] = { + 'TaskHistoryRetentionLimit': task_history_retention_limit + } + if any([snapshot_interval, + keep_old_snapshots, + log_entries_for_slow_followers, + heartbeat_tick, + election_tick]): + self['Raft'] = { + 'SnapshotInterval': snapshot_interval, + 'KeepOldSnapshots': keep_old_snapshots, + 'LogEntriesForSlowFollowers': log_entries_for_slow_followers, + 'HeartbeatTick': heartbeat_tick, + 'ElectionTick': election_tick + } + + if dispatcher_heartbeat_period: + self['Dispatcher'] = { + 'HeartbeatPeriod': dispatcher_heartbeat_period + } + + ca_config = {} + if node_cert_expiry is not None: + ca_config['NodeCertExpiry'] = node_cert_expiry + if external_cas: + if version_lt(version, '1.25'): + if len(external_cas) > 1: + raise InvalidVersion( + 'Support for multiple external CAs is not available ' + 'for API version < 1.25' + ) + ca_config['ExternalCA'] = external_cas[0] + else: + ca_config['ExternalCAs'] = external_cas + if signing_ca_key: + if version_lt(version, '1.30'): + raise InvalidVersion( + 'signing_ca_key is not supported in API version < 1.30' + ) + ca_config['SigningCAKey'] = signing_ca_key + if signing_ca_cert: + if version_lt(version, '1.30'): + raise InvalidVersion( + 'signing_ca_cert is not supported in API version < 1.30' + ) + ca_config['SigningCACert'] = signing_ca_cert + if ca_force_rotate is not None: + if version_lt(version, '1.30'): + raise InvalidVersion( + 'force_rotate is not supported in API version < 1.30' + ) + ca_config['ForceRotate'] = ca_force_rotate + if ca_config: + self['CAConfig'] = ca_config + + if autolock_managers is not None: + if version_lt(version, '1.25'): + raise InvalidVersion( + 'autolock_managers is not supported in API version < 1.25' + ) + + self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers} + + if log_driver is not None: + if version_lt(version, '1.25'): + raise InvalidVersion( + 'log_driver is not supported in API version < 1.25' + ) + + self['TaskDefaults'] = {'LogDriver': log_driver} + + if name is not None: + self['Name'] = name + if labels is not None: + self['Labels'] = labels + + +class SwarmExternalCA(dict): + """ + Configuration for forwarding signing requests to an external + certificate authority. + + Args: + url (string): URL where certificate signing requests should be + sent. + protocol (string): Protocol for communication with the external CA. + options (dict): An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + ca_cert (string): The root CA certificate (in PEM format) this + external CA uses to issue TLS certificates (assumed to be to + the current swarm root CA certificate if not provided). + + + + """ + def __init__(self, url, protocol=None, options=None, ca_cert=None): + self['URL'] = url + self['Protocol'] = protocol + self['Options'] = options + self['CACert'] = ca_cert diff --git a/docker/unixconn/__init__.py b/docker/unixconn/__init__.py deleted file mode 100644 index 53711fc6d8..0000000000 --- a/docker/unixconn/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .unixconn import UnixAdapter # flake8: noqa diff --git a/docker/unixconn/unixconn.py b/docker/unixconn/unixconn.py deleted file mode 100644 index 551bd29456..0000000000 --- a/docker/unixconn/unixconn.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import six -import requests.adapters -import socket - -if six.PY3: - import http.client as httplib -else: - import httplib - -try: - import requests.packages.urllib3 as urllib3 -except ImportError: - import urllib3 - -RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer - - -class UnixHTTPConnection(httplib.HTTPConnection, object): - def __init__(self, base_url, unix_socket, timeout=60): - httplib.HTTPConnection.__init__(self, 'localhost', timeout=timeout) - self.base_url = base_url - self.unix_socket = unix_socket - self.timeout = timeout - - def connect(self): - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.settimeout(self.timeout) - sock.connect(self.unix_socket) - self.sock = sock - - -class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): - def __init__(self, base_url, socket_path, timeout=60): - urllib3.connectionpool.HTTPConnectionPool.__init__( - self, 'localhost', timeout=timeout - ) - self.base_url = base_url - self.socket_path = socket_path - self.timeout = timeout - - def _new_conn(self): - return UnixHTTPConnection(self.base_url, self.socket_path, - self.timeout) - - -class UnixAdapter(requests.adapters.HTTPAdapter): - def __init__(self, socket_url, timeout=60): - socket_path = socket_url.replace('http+unix://', '') - if not socket_path.startswith('/'): - socket_path = '/' + socket_path - self.socket_path = socket_path - self.timeout = timeout - self.pools = RecentlyUsedContainer(10, - dispose_func=lambda p: p.close()) - super(UnixAdapter, self).__init__() - - def get_connection(self, url, proxies=None): - with self.pools.lock: - pool = self.pools.get(url) - if pool: - return pool - - pool = UnixHTTPConnectionPool(url, - self.socket_path, - self.timeout) - self.pools[url] = pool - - return pool - - def close(self): - self.pools.clear() diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py index 81cc8a68c2..c086a9f073 100644 --- a/docker/utils/__init__.py +++ b/docker/utils/__init__.py @@ -1,9 +1,28 @@ + +from .build import create_archive, exclude_paths, match_tag, mkbuildcontext, tar +from .decorators import check_resource, minimum_version, update_headers from .utils import ( - compare_version, convert_port_bindings, convert_volume_binds, - mkbuildcontext, tar, parse_repository_tag, parse_host, - kwargs_from_env, convert_filters, create_host_config, - create_container_config, parse_bytes, ping_registry -) # flake8: noqa + compare_version, + convert_filters, + convert_port_bindings, + convert_service_networks, + convert_volume_binds, + create_host_config, + create_ipam_config, + create_ipam_pool, + datetime_to_timestamp, + decode_json_header, + format_environment, + format_extra_hosts, + kwargs_from_env, + normalize_links, + parse_bytes, + parse_devices, + parse_env_file, + parse_host, + parse_repository_tag, + split_command, + version_gte, + version_lt, +) -from .types import Ulimit, LogConfig # flake8: noqa -from .decorators import check_resource #flake8: noqa diff --git a/docker/utils/build.py b/docker/utils/build.py new file mode 100644 index 0000000000..b841391044 --- /dev/null +++ b/docker/utils/build.py @@ -0,0 +1,260 @@ +import io +import os +import re +import tarfile +import tempfile + +from ..constants import IS_WINDOWS_PLATFORM +from .fnmatch import fnmatch + +_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/') +_TAG = re.compile( + r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*" + r"(?::[0-9]+)?(/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*" + r"(:[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})?$" +) + + +def match_tag(tag: str) -> bool: + return bool(_TAG.match(tag)) + + +def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False): + root = os.path.abspath(path) + exclude = exclude or [] + dockerfile = dockerfile or (None, None) + extra_files = [] + if dockerfile[1] is not None: + dockerignore_contents = '\n'.join( + (exclude or ['.dockerignore']) + [dockerfile[0]] + ) + extra_files = [ + ('.dockerignore', dockerignore_contents), + dockerfile, + ] + return create_archive( + files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])), + root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files + ) + + +def exclude_paths(root, patterns, dockerfile=None): + """ + Given a root directory path and a list of .dockerignore patterns, return + an iterator of all paths (both regular files and directories) in the root + directory that do *not* match any of the patterns. + + All paths returned are relative to the root. + """ + + if dockerfile is None: + dockerfile = 'Dockerfile' + + patterns.append(f"!{dockerfile}") + pm = PatternMatcher(patterns) + return set(pm.walk(root)) + + +def build_file_list(root): + files = [] + for dirname, dirnames, fnames in os.walk(root): + for filename in fnames + dirnames: + longpath = os.path.join(dirname, filename) + files.append( + longpath.replace(root, '', 1).lstrip('/') + ) + + return files + + +def create_archive(root, files=None, fileobj=None, gzip=False, + extra_files=None): + extra_files = extra_files or [] + if not fileobj: + fileobj = tempfile.NamedTemporaryFile() + t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj) + if files is None: + files = build_file_list(root) + extra_names = {e[0] for e in extra_files} + for path in files: + if path in extra_names: + # Extra files override context files with the same name + continue + full_path = os.path.join(root, path) + + i = t.gettarinfo(full_path, arcname=path) + if i is None: + # This happens when we encounter a socket file. We can safely + # ignore it and proceed. + continue + + # Workaround https://bugs.python.org/issue32713 + if i.mtime < 0 or i.mtime > 8**11 - 1: + i.mtime = int(i.mtime) + + if IS_WINDOWS_PLATFORM: + # Windows doesn't keep track of the execute bit, so we make files + # and directories executable by default. + i.mode = i.mode & 0o755 | 0o111 + + if i.isfile(): + try: + with open(full_path, 'rb') as f: + t.addfile(i, f) + except OSError as oe: + raise OSError( + f'Can not read file in context: {full_path}' + ) from oe + else: + # Directories, FIFOs, symlinks... don't need to be read. + t.addfile(i, None) + + for name, contents in extra_files: + info = tarfile.TarInfo(name) + contents_encoded = contents.encode('utf-8') + info.size = len(contents_encoded) + t.addfile(info, io.BytesIO(contents_encoded)) + + t.close() + fileobj.seek(0) + return fileobj + + +def mkbuildcontext(dockerfile): + f = tempfile.NamedTemporaryFile() + t = tarfile.open(mode='w', fileobj=f) + if isinstance(dockerfile, io.StringIO): + dfinfo = tarfile.TarInfo('Dockerfile') + raise TypeError('Please use io.BytesIO to create in-memory ' + 'Dockerfiles with Python 3') + elif isinstance(dockerfile, io.BytesIO): + dfinfo = tarfile.TarInfo('Dockerfile') + dfinfo.size = len(dockerfile.getvalue()) + dockerfile.seek(0) + else: + dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile') + t.addfile(dfinfo, dockerfile) + t.close() + f.seek(0) + return f + + +def split_path(p): + return [pt for pt in re.split(_SEP, p) if pt and pt != '.'] + + +def normalize_slashes(p): + if IS_WINDOWS_PLATFORM: + return '/'.join(split_path(p)) + return p + + +def walk(root, patterns, default=True): + pm = PatternMatcher(patterns) + return pm.walk(root) + + +# Heavily based on +# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go +class PatternMatcher: + def __init__(self, patterns): + self.patterns = list(filter( + lambda p: p.dirs, [Pattern(p) for p in patterns] + )) + self.patterns.append(Pattern('!.dockerignore')) + + def matches(self, filepath): + matched = False + parent_path = os.path.dirname(filepath) + parent_path_dirs = split_path(parent_path) + + for pattern in self.patterns: + negative = pattern.exclusion + match = pattern.match(filepath) + if not match and parent_path != '': + if len(pattern.dirs) <= len(parent_path_dirs): + match = pattern.match( + os.path.sep.join(parent_path_dirs[:len(pattern.dirs)]) + ) + + if match: + matched = not negative + + return matched + + def walk(self, root): + def rec_walk(current_dir): + for f in os.listdir(current_dir): + fpath = os.path.join( + os.path.relpath(current_dir, root), f + ) + if fpath.startswith(f".{os.path.sep}"): + fpath = fpath[2:] + match = self.matches(fpath) + if not match: + yield fpath + + cur = os.path.join(root, fpath) + if not os.path.isdir(cur) or os.path.islink(cur): + continue + + if match: + # If we want to skip this file and it's a directory + # then we should first check to see if there's an + # excludes pattern (e.g. !dir/file) that starts with this + # dir. If so then we can't skip this dir. + skip = True + + for pat in self.patterns: + if not pat.exclusion: + continue + if pat.cleaned_pattern.startswith( + normalize_slashes(fpath)): + skip = False + break + if skip: + continue + yield from rec_walk(cur) + + return rec_walk(root) + + +class Pattern: + def __init__(self, pattern_str): + self.exclusion = False + if pattern_str.startswith('!'): + self.exclusion = True + pattern_str = pattern_str[1:] + + self.dirs = self.normalize(pattern_str) + self.cleaned_pattern = '/'.join(self.dirs) + + @classmethod + def normalize(cls, p): + + # Remove trailing spaces + p = p.strip() + + # Leading and trailing slashes are not relevant. Yes, + # "foo.py/" must exclude the "foo.py" regular file. "." + # components are not relevant either, even if the whole + # pattern is only ".", as the Docker reference states: "For + # historical reasons, the pattern . is ignored." + # ".." component must be cleared with the potential previous + # component, regardless of whether it exists: "A preprocessing + # step [...] eliminates . and .. elements using Go's + # filepath.". + i = 0 + split = split_path(p) + while i < len(split): + if split[i] == '..': + del split[i] + if i > 0: + del split[i - 1] + i -= 1 + else: + i += 1 + return split + + def match(self, filepath): + return fnmatch(normalize_slashes(filepath), self.cleaned_pattern) diff --git a/docker/utils/config.py b/docker/utils/config.py new file mode 100644 index 0000000000..8e24959a5d --- /dev/null +++ b/docker/utils/config.py @@ -0,0 +1,66 @@ +import json +import logging +import os + +from ..constants import IS_WINDOWS_PLATFORM + +DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json') +LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg' + +log = logging.getLogger(__name__) + + +def find_config_file(config_path=None): + paths = list(filter(None, [ + config_path, # 1 + config_path_from_environment(), # 2 + os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3 + os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4 + ])) + + log.debug(f"Trying paths: {repr(paths)}") + + for path in paths: + if os.path.exists(path): + log.debug(f"Found file at path: {path}") + return path + + log.debug("No config file found") + + return None + + +def config_path_from_environment(): + config_dir = os.environ.get('DOCKER_CONFIG') + if not config_dir: + return None + return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME)) + + +def home_dir(): + """ + Get the user's home directory, using the same logic as the Docker Engine + client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX. + """ + if IS_WINDOWS_PLATFORM: + return os.environ.get('USERPROFILE', '') + else: + return os.path.expanduser('~') + + +def load_general_config(config_path=None): + config_file = find_config_file(config_path) + + if not config_file: + return {} + + try: + with open(config_file) as f: + return json.load(f) + except (OSError, ValueError) as e: + # In the case of a legacy `.dockercfg` file, we won't + # be able to load any JSON data. + log.debug(e) + + log.debug("All parsing attempts failed - returning empty config") + return {} diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py index 3c42fe4b9f..5aab98cd46 100644 --- a/docker/utils/decorators.py +++ b/docker/utils/decorators.py @@ -1,21 +1,45 @@ import functools from .. import errors +from . import utils -def check_resource(f): - @functools.wraps(f) - def wrapped(self, resource_id=None, *args, **kwargs): - if resource_id is None: - if kwargs.get('container'): - resource_id = kwargs.pop('container') - elif kwargs.get('image'): - resource_id = kwargs.pop('image') - if isinstance(resource_id, dict): - resource_id = resource_id.get('Id') - if not resource_id: - raise errors.NullResource( - 'image or container param is undefined' - ) - return f(self, resource_id, *args, **kwargs) - return wrapped +def check_resource(resource_name): + def decorator(f): + @functools.wraps(f) + def wrapped(self, resource_id=None, *args, **kwargs): + if resource_id is None and kwargs.get(resource_name): + resource_id = kwargs.pop(resource_name) + if isinstance(resource_id, dict): + resource_id = resource_id.get('Id', resource_id.get('ID')) + if not resource_id: + raise errors.NullResource( + 'Resource ID was not provided' + ) + return f(self, resource_id, *args, **kwargs) + return wrapped + return decorator + + +def minimum_version(version): + def decorator(f): + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + if utils.version_lt(self._version, version): + raise errors.InvalidVersion( + f'{f.__name__} is not available for version < {version}', + ) + return f(self, *args, **kwargs) + return wrapper + return decorator + + +def update_headers(f): + def inner(self, *args, **kwargs): + if 'HttpHeaders' in self._general_configs: + if not kwargs.get('headers'): + kwargs['headers'] = self._general_configs['HttpHeaders'] + else: + kwargs['headers'].update(self._general_configs['HttpHeaders']) + return f(self, *args, **kwargs) + return inner diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py new file mode 100644 index 0000000000..be745381e4 --- /dev/null +++ b/docker/utils/fnmatch.py @@ -0,0 +1,115 @@ +"""Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +""" + +import re + +__all__ = ["fnmatch", "fnmatchcase", "translate"] + +_cache = {} +_MAXCACHE = 100 + + +def _purge(): + """Clear the pattern cache""" + _cache.clear() + + +def fnmatch(name, pat): + """Test whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + """ + + name = name.lower() + pat = pat.lower() + return fnmatchcase(name, pat) + + +def fnmatchcase(name, pat): + """Test whether FILENAME matches PATTERN, including case. + This is a version of fnmatch() which doesn't case-normalize + its arguments. + """ + + try: + re_pat = _cache[pat] + except KeyError: + res = translate(pat) + if len(_cache) >= _MAXCACHE: + _cache.clear() + _cache[pat] = re_pat = re.compile(res) + return re_pat.match(name) is not None + + +def translate(pat): + """Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + """ + i, n = 0, len(pat) + res = '^' + while i < n: + c = pat[i] + i = i + 1 + if c == '*': + if i < n and pat[i] == '*': + # is some flavor of "**" + i = i + 1 + # Treat **/ as ** so eat the "/" + if i < n and pat[i] == '/': + i = i + 1 + if i >= n: + # is "**EOF" - to align with .gitignore just accept all + res = f"{res}.*" + else: + # is "**" + # Note that this allows for any # of /'s (even 0) because + # the .* will eat everything, even /'s + res = f"{res}(.*/)?" + else: + # is "*" so map it to anything but "/" + res = f"{res}[^/]*" + elif c == '?': + # "?" is any char except "/" + res = f"{res}[^/]" + elif c == '[': + j = i + if j < n and pat[j] == '!': + j = j + 1 + if j < n and pat[j] == ']': + j = j + 1 + while j < n and pat[j] != ']': + j = j + 1 + if j >= n: + res = f"{res}\\[" + else: + stuff = pat[i:j].replace('\\', '\\\\') + i = j + 1 + if stuff[0] == '!': + stuff = f"^{stuff[1:]}" + elif stuff[0] == '^': + stuff = f"\\{stuff}" + res = f'{res}[{stuff}]' + else: + res = res + re.escape(c) + + return f"{res}$" diff --git a/docker/utils/json_stream.py b/docker/utils/json_stream.py new file mode 100644 index 0000000000..41d25920ce --- /dev/null +++ b/docker/utils/json_stream.py @@ -0,0 +1,74 @@ +import json +import json.decoder + +from ..errors import StreamParseError + +json_decoder = json.JSONDecoder() + + +def stream_as_text(stream): + """ + Given a stream of bytes or text, if any of the items in the stream + are bytes convert them to text. + This function can be removed once we return text streams + instead of byte streams. + """ + for data in stream: + if not isinstance(data, str): + data = data.decode('utf-8', 'replace') + yield data + + +def json_splitter(buffer): + """Attempt to parse a json object from a buffer. If there is at least one + object, return it and the rest of the buffer, otherwise return None. + """ + buffer = buffer.strip() + try: + obj, index = json_decoder.raw_decode(buffer) + rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():] + return obj, rest + except ValueError: + return None + + +def json_stream(stream): + """Given a stream of text, return a stream of json objects. + This handles streams which are inconsistently buffered (some entries may + be newline delimited, and others are not). + """ + return split_buffer(stream, json_splitter, json_decoder.decode) + + +def line_splitter(buffer, separator='\n'): + index = buffer.find(str(separator)) + if index == -1: + return None + return buffer[:index + 1], buffer[index + 1:] + + +def split_buffer(stream, splitter=None, decoder=lambda a: a): + """Given a generator which yields strings and a splitter function, + joins all input, splits on the separator and yields each chunk. + Unlike string.split(), each chunk includes the trailing + separator, except for the last one if none was found on the end + of the input. + """ + splitter = splitter or line_splitter + buffered = '' + + for data in stream_as_text(stream): + buffered += data + while True: + buffer_split = splitter(buffered) + if buffer_split is None: + break + + item, buffered = buffer_split + yield item + + if buffered: + try: + yield decoder(buffered) + except Exception as e: + raise StreamParseError(e) from e diff --git a/docker/utils/ports.py b/docker/utils/ports.py new file mode 100644 index 0000000000..9fd6e8f6b8 --- /dev/null +++ b/docker/utils/ports.py @@ -0,0 +1,83 @@ +import re + +PORT_SPEC = re.compile( + "^" # Match full string + "(" # External part + r"(\[?(?P[a-fA-F\d.:]+)\]?:)?" # Address + r"(?P[\d]*)(-(?P[\d]+))?:" # External range + ")?" + r"(?P[\d]+)(-(?P[\d]+))?" # Internal range + "(?P/(udp|tcp|sctp))?" # Protocol + "$" # Match full string +) + + +def add_port_mapping(port_bindings, internal_port, external): + if internal_port in port_bindings: + port_bindings[internal_port].append(external) + else: + port_bindings[internal_port] = [external] + + +def add_port(port_bindings, internal_port_range, external_range): + if external_range is None: + for internal_port in internal_port_range: + add_port_mapping(port_bindings, internal_port, None) + else: + ports = zip(internal_port_range, external_range) + for internal_port, external_port in ports: + add_port_mapping(port_bindings, internal_port, external_port) + + +def build_port_bindings(ports): + port_bindings = {} + for port in ports: + internal_port_range, external_range = split_port(port) + add_port(port_bindings, internal_port_range, external_range) + return port_bindings + + +def _raise_invalid_port(port): + raise ValueError('Invalid port "%s", should be ' + '[[remote_ip:]remote_port[-remote_port]:]' + 'port[/protocol]' % port) + + +def port_range(start, end, proto, randomly_available_port=False): + if not start: + return start + if not end: + return [start + proto] + if randomly_available_port: + return [f"{start}-{end}{proto}"] + return [str(port) + proto for port in range(int(start), int(end) + 1)] + + +def split_port(port): + if hasattr(port, 'legacy_repr'): + # This is the worst hack, but it prevents a bug in Compose 1.14.0 + # https://github.com/docker/docker-py/issues/1668 + # TODO: remove once fixed in Compose stable + port = port.legacy_repr() + port = str(port) + match = PORT_SPEC.match(port) + if match is None: + _raise_invalid_port(port) + parts = match.groupdict() + + host = parts['host'] + proto = parts['proto'] or '' + internal = port_range(parts['int'], parts['int_end'], proto) + external = port_range( + parts['ext'], parts['ext_end'], '', len(internal) == 1) + + if host is None: + if external is not None and len(internal) != len(external): + raise ValueError('Port ranges don\'t match in length') + return internal, external + else: + if not external: + external = [None] * len(internal) + elif len(internal) != len(external): + raise ValueError('Port ranges don\'t match in length') + return internal, [(host, ext_port) for ext_port in external] diff --git a/docker/utils/ports/__init__.py b/docker/utils/ports/__init__.py deleted file mode 100644 index 1dbfa3a709..0000000000 --- a/docker/utils/ports/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .ports import ( - split_port, - build_port_bindings -) # flake8: noqa diff --git a/docker/utils/ports/ports.py b/docker/utils/ports/ports.py deleted file mode 100644 index 6a0a862a20..0000000000 --- a/docker/utils/ports/ports.py +++ /dev/null @@ -1,84 +0,0 @@ - - -def add_port_mapping(port_bindings, internal_port, external): - if internal_port in port_bindings: - port_bindings[internal_port].append(external) - else: - port_bindings[internal_port] = [external] - - -def add_port(port_bindings, internal_port_range, external_range): - if external_range is None: - for internal_port in internal_port_range: - add_port_mapping(port_bindings, internal_port, None) - else: - ports = zip(internal_port_range, external_range) - for internal_port, external_port in ports: - add_port_mapping(port_bindings, internal_port, external_port) - - -def build_port_bindings(ports): - port_bindings = {} - for port in ports: - internal_port_range, external_range = split_port(port) - add_port(port_bindings, internal_port_range, external_range) - return port_bindings - - -def to_port_range(port): - if not port: - return None - - protocol = "" - if "/" in port: - parts = port.split("/") - if len(parts) != 2: - raise ValueError('Invalid port "%s", should be ' - '[[remote_ip:]remote_port[-remote_port]:]' - 'port[/protocol]' % port) - port, protocol = parts - protocol = "/" + protocol - - parts = str(port).split('-') - - if len(parts) == 1: - return ["%s%s" % (port, protocol)] - - if len(parts) == 2: - full_port_range = range(int(parts[0]), int(parts[1]) + 1) - return ["%s%s" % (p, protocol) for p in full_port_range] - - raise ValueError('Invalid port range "%s", should be ' - 'port or startport-endport' % port) - - -def split_port(port): - parts = str(port).split(':') - if not 1 <= len(parts) <= 3: - raise ValueError('Invalid port "%s", should be ' - '[[remote_ip:]remote_port:]port[/protocol]' % port) - - if len(parts) == 1: - internal_port, = parts - return to_port_range(internal_port), None - if len(parts) == 2: - external_port, internal_port = parts - - internal_range = to_port_range(internal_port) - external_range = to_port_range(external_port) - if len(internal_range) != len(external_range): - raise ValueError('Port ranges don\'t match in length') - - return internal_range, external_range - - external_ip, external_port, internal_port = parts - internal_range = to_port_range(internal_port) - external_range = to_port_range(external_port) - if not external_range: - external_range = [None] * len(internal_range) - - if len(internal_range) != len(external_range): - raise ValueError('Port ranges don\'t match in length') - - return internal_range, [(external_ip, ex_port or None) - for ex_port in external_range] diff --git a/docker/utils/proxy.py b/docker/utils/proxy.py new file mode 100644 index 0000000000..e7164b6cea --- /dev/null +++ b/docker/utils/proxy.py @@ -0,0 +1,77 @@ +from .utils import format_environment + + +class ProxyConfig(dict): + ''' + Hold the client's proxy configuration + ''' + @property + def http(self): + return self.get('http') + + @property + def https(self): + return self.get('https') + + @property + def ftp(self): + return self.get('ftp') + + @property + def no_proxy(self): + return self.get('no_proxy') + + @staticmethod + def from_dict(config): + ''' + Instantiate a new ProxyConfig from a dictionary that represents a + client configuration, as described in `the documentation`_. + + .. _the documentation: + https://docs.docker.com/network/proxy/#configure-the-docker-client + ''' + return ProxyConfig( + http=config.get('httpProxy'), + https=config.get('httpsProxy'), + ftp=config.get('ftpProxy'), + no_proxy=config.get('noProxy'), + ) + + def get_environment(self): + ''' + Return a dictionary representing the environment variables used to + set the proxy settings. + ''' + env = {} + if self.http: + env['http_proxy'] = env['HTTP_PROXY'] = self.http + if self.https: + env['https_proxy'] = env['HTTPS_PROXY'] = self.https + if self.ftp: + env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp + if self.no_proxy: + env['no_proxy'] = env['NO_PROXY'] = self.no_proxy + return env + + def inject_proxy_environment(self, environment): + ''' + Given a list of strings representing environment variables, prepend the + environment variables corresponding to the proxy settings. + ''' + if not self: + return environment + + proxy_env = format_environment(self.get_environment()) + if not environment: + return proxy_env + # It is important to prepend our variables, because we want the + # variables defined in "environment" to take precedence. + return proxy_env + environment + + def __str__(self): + return ( + 'ProxyConfig(' + f'http={self.http}, https={self.https}, ' + f'ftp={self.ftp}, no_proxy={self.no_proxy}' + ')' + ) diff --git a/docker/utils/socket.py b/docker/utils/socket.py new file mode 100644 index 0000000000..c7cb584d4f --- /dev/null +++ b/docker/utils/socket.py @@ -0,0 +1,187 @@ +import errno +import os +import select +import socket as pysocket +import struct + +try: + from ..transport import NpipeSocket +except ImportError: + NpipeSocket = type(None) + + +STDOUT = 1 +STDERR = 2 + + +class SocketError(Exception): + pass + + +# NpipeSockets have their own error types +# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.') +NPIPE_ENDED = 109 + + +def read(socket, n=4096): + """ + Reads at most n bytes from socket + """ + + recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) + + if not isinstance(socket, NpipeSocket): + if not hasattr(select, "poll"): + # Limited to 1024 + select.select([socket], [], []) + else: + poll = select.poll() + poll.register(socket, select.POLLIN | select.POLLPRI) + poll.poll() + + try: + if hasattr(socket, 'recv'): + return socket.recv(n) + if isinstance(socket, pysocket.SocketIO): + return socket.read(n) + return os.read(socket.fileno(), n) + except OSError as e: + if e.errno not in recoverable_errors: + raise + except Exception as e: + is_pipe_ended = (isinstance(socket, NpipeSocket) and + len(e.args) > 0 and + e.args[0] == NPIPE_ENDED) + if is_pipe_ended: + # npipes don't support duplex sockets, so we interpret + # a PIPE_ENDED error as a close operation (0-length read). + return '' + raise + + +def read_exactly(socket, n): + """ + Reads exactly n bytes from socket + Raises SocketError if there isn't enough data + """ + data = b"" + while len(data) < n: + next_data = read(socket, n - len(data)) + if not next_data: + raise SocketError("Unexpected EOF") + data += next_data + return data + + +def next_frame_header(socket): + """ + Returns the stream and size of the next frame of data waiting to be read + from socket, according to the protocol defined here: + + https://docs.docker.com/engine/api/v1.24/#attach-to-a-container + """ + try: + data = read_exactly(socket, 8) + except SocketError: + return (-1, -1) + + stream, actual = struct.unpack('>BxxxL', data) + return (stream, actual) + + +def frames_iter(socket, tty): + """ + Return a generator of frames read from socket. A frame is a tuple where + the first item is the stream number and the second item is a chunk of data. + + If the tty setting is enabled, the streams are multiplexed into the stdout + stream. + """ + if tty: + return ((STDOUT, frame) for frame in frames_iter_tty(socket)) + else: + return frames_iter_no_tty(socket) + + +def frames_iter_no_tty(socket): + """ + Returns a generator of data read from the socket when the tty setting is + not enabled. + """ + while True: + (stream, n) = next_frame_header(socket) + if n < 0: + break + while n > 0: + result = read(socket, n) + if result is None: + continue + data_length = len(result) + if data_length == 0: + # We have reached EOF + return + n -= data_length + yield (stream, result) + + +def frames_iter_tty(socket): + """ + Return a generator of data read from the socket when the tty setting is + enabled. + """ + while True: + result = read(socket) + if len(result) == 0: + # We have reached EOF + return + yield result + + +def consume_socket_output(frames, demux=False): + """ + Iterate through frames read from the socket and return the result. + + Args: + + demux (bool): + If False, stdout and stderr are multiplexed, and the result is the + concatenation of all the frames. If True, the streams are + demultiplexed, and the result is a 2-tuple where each item is the + concatenation of frames belonging to the same stream. + """ + if demux is False: + # If the streams are multiplexed, the generator returns strings, that + # we just need to concatenate. + return b"".join(frames) + + # If the streams are demultiplexed, the generator yields tuples + # (stdout, stderr) + out = [None, None] + for frame in frames: + # It is guaranteed that for each frame, one and only one stream + # is not None. + assert frame != (None, None) + if frame[0] is not None: + if out[0] is None: + out[0] = frame[0] + else: + out[0] += frame[0] + else: + if out[1] is None: + out[1] = frame[1] + else: + out[1] += frame[1] + return tuple(out) + + +def demux_adaptor(stream_id, data): + """ + Utility to demultiplex stdout and stderr when reading frames from the + socket. + """ + if stream_id == STDOUT: + return (data, None) + elif stream_id == STDERR: + return (None, data) + else: + raise ValueError(f'{stream_id} is not a valid stream') diff --git a/docker/utils/types.py b/docker/utils/types.py deleted file mode 100644 index d742fd0a5a..0000000000 --- a/docker/utils/types.py +++ /dev/null @@ -1,100 +0,0 @@ -import six - - -class LogConfigTypesEnum(object): - _values = ( - 'json-file', - 'syslog', - 'none' - ) - JSON, SYSLOG, NONE = _values - - -class DictType(dict): - def __init__(self, init): - for k, v in six.iteritems(init): - self[k] = v - - -class LogConfig(DictType): - types = LogConfigTypesEnum - - def __init__(self, **kwargs): - type_ = kwargs.get('type', kwargs.get('Type')) - config = kwargs.get('config', kwargs.get('Config')) - if type_ not in self.types._values: - raise ValueError("LogConfig.type must be one of ({0})".format( - ', '.join(self.types._values) - )) - if config and not isinstance(config, dict): - raise ValueError("LogConfig.config must be a dictionary") - - super(LogConfig, self).__init__({ - 'Type': type_, - 'Config': config or {} - }) - - @property - def type(self): - return self['Type'] - - @type.setter - def type(self, value): - if value not in self.types._values: - raise ValueError("LogConfig.type must be one of {0}".format( - ', '.join(self.types._values) - )) - self['Type'] = value - - @property - def config(self): - return self['Config'] - - def set_config_value(self, key, value): - self.config[key] = value - - def unset_config(self, key): - if key in self.config: - del self.config[key] - - -class Ulimit(DictType): - def __init__(self, **kwargs): - name = kwargs.get('name', kwargs.get('Name')) - soft = kwargs.get('soft', kwargs.get('Soft')) - hard = kwargs.get('hard', kwargs.get('Hard')) - if not isinstance(name, six.string_types): - raise ValueError("Ulimit.name must be a string") - if soft and not isinstance(soft, int): - raise ValueError("Ulimit.soft must be an integer") - if hard and not isinstance(hard, int): - raise ValueError("Ulimit.hard must be an integer") - super(Ulimit, self).__init__({ - 'Name': name, - 'Soft': soft, - 'Hard': hard - }) - - @property - def name(self): - return self['Name'] - - @name.setter - def name(self, value): - self['Name'] = value - - @property - def soft(self): - return self.get('Soft') - - @soft.setter - def soft(self, value): - self['Soft'] = value - - @property - def hard(self): - return self.get('Hard') - - @hard.setter - def hard(self, value): - self['Hard'] = value diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 175a7e0ff6..f36a3afb89 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -1,102 +1,51 @@ -# Copyright 2013 dotCloud inc. +import base64 +import collections +import json +import os +import os.path +import shlex +import string +from datetime import datetime, timezone +from functools import lru_cache +from itertools import zip_longest +from urllib.parse import urlparse, urlunparse -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +from .. import errors +from ..constants import ( + BYTE_UNITS, + DEFAULT_HTTP_HOST, + DEFAULT_NPIPE, + DEFAULT_UNIX_SOCKET, +) +from ..tls import TLSConfig -# http://www.apache.org/licenses/LICENSE-2.0 +URLComponents = collections.namedtuple( + 'URLComponents', + 'scheme netloc url params query fragment', +) -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import io -import os -import os.path -import json -import shlex -import tarfile -import tempfile -from distutils.version import StrictVersion -from fnmatch import fnmatch -from datetime import datetime +def create_ipam_pool(*args, **kwargs): + raise errors.DeprecatedMethod( + 'utils.create_ipam_pool has been removed. Please use a ' + 'docker.types.IPAMPool object instead.' + ) -import requests -import six -from .. import errors -from .. import tls -from .types import Ulimit, LogConfig - - -DEFAULT_HTTP_HOST = "127.0.0.1" -DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock" -BYTE_UNITS = { - 'b': 1, - 'k': 1024, - 'm': 1024 * 1024, - 'g': 1024 * 1024 * 1024 -} - - -def mkbuildcontext(dockerfile): - f = tempfile.NamedTemporaryFile() - t = tarfile.open(mode='w', fileobj=f) - if isinstance(dockerfile, io.StringIO): - dfinfo = tarfile.TarInfo('Dockerfile') - if six.PY3: - raise TypeError('Please use io.BytesIO to create in-memory ' - 'Dockerfiles with Python 3') - else: - dfinfo.size = len(dockerfile.getvalue()) - dockerfile.seek(0) - elif isinstance(dockerfile, io.BytesIO): - dfinfo = tarfile.TarInfo('Dockerfile') - dfinfo.size = len(dockerfile.getvalue()) - dockerfile.seek(0) - else: - dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile') - t.addfile(dfinfo, dockerfile) - t.close() - f.seek(0) - return f - - -def fnmatch_any(relpath, patterns): - return any([fnmatch(relpath, pattern) for pattern in patterns]) - - -def tar(path, exclude=None): - f = tempfile.NamedTemporaryFile() - t = tarfile.open(mode='w', fileobj=f) - for dirpath, dirnames, filenames in os.walk(path): - relpath = os.path.relpath(dirpath, path) - if relpath == '.': - relpath = '' - if exclude is None: - fnames = filenames - else: - dirnames[:] = [d for d in dirnames - if not fnmatch_any(os.path.join(relpath, d), - exclude)] - fnames = [name for name in filenames - if not fnmatch_any(os.path.join(relpath, name), - exclude)] - dirnames.sort() - for name in sorted(fnames): - arcname = os.path.join(relpath, name) - t.add(os.path.join(path, arcname), arcname=arcname) - for name in dirnames: - arcname = os.path.join(relpath, name) - t.add(os.path.join(path, arcname), - arcname=arcname, recursive=False) - t.close() - f.seek(0) - return f +def create_ipam_config(*args, **kwargs): + raise errors.DeprecatedMethod( + 'utils.create_ipam_config has been removed. Please use a ' + 'docker.types.IPAMConfig object instead.' + ) + +def decode_json_header(header): + data = base64.b64decode(header) + data = data.decode('utf-8') + return json.loads(data) + +@lru_cache(maxsize=None) def compare_version(v1, v2): """Compare docker versions @@ -109,32 +58,28 @@ def compare_version(v1, v2): >>> compare_version(v2, v2) 0 """ - s1 = StrictVersion(v1) - s2 = StrictVersion(v2) - if s1 == s2: + if v1 == v2: return 0 - elif s1 > s2: - return -1 - else: - return 1 + # Split into `sys.version_info` like tuples. + s1 = tuple(int(p) for p in v1.split('.')) + s2 = tuple(int(p) for p in v2.split('.')) + # Compare each component, padding with 0 if necessary. + for c1, c2 in zip_longest(s1, s2, fillvalue=0): + if c1 == c2: + continue + elif c1 > c2: + return -1 + else: + return 1 + return 0 -def ping_registry(url): - return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping') +def version_lt(v1, v2): + return compare_version(v1, v2) > 0 -def ping(url, valid_4xx_statuses=None): - try: - res = requests.get(url, timeout=3) - except Exception: - return False - else: - # We don't send yet auth headers - # and a v2 registry will respond with status 401 - return ( - res.status_code < 400 or - (valid_4xx_statuses and res.status_code in valid_4xx_statuses) - ) +def version_gte(v1, v2): + return not version_lt(v1, v2) def _convert_port_binding(binding): @@ -143,7 +88,7 @@ def _convert_port_binding(binding): if len(binding) == 2: result['HostPort'] = binding[1] result['HostIp'] = binding[0] - elif isinstance(binding[0], six.string_types): + elif isinstance(binding[0], str): result['HostIp'] = binding[0] else: result['HostPort'] = binding[0] @@ -167,10 +112,10 @@ def _convert_port_binding(binding): def convert_port_bindings(port_bindings): result = {} - for k, v in six.iteritems(port_bindings): + for k, v in iter(port_bindings.items()): key = str(k) if '/' not in key: - key = key + '/tcp' + key += '/tcp' if isinstance(v, list): result[key] = [_convert_port_binding(binding) for binding in v] else: @@ -184,13 +129,19 @@ def convert_volume_binds(binds): result = [] for k, v in binds.items(): + if isinstance(k, bytes): + k = k.decode('utf-8') + if isinstance(v, dict): if 'ro' in v and 'mode' in v: raise ValueError( - 'Binding cannot contain both "ro" and "mode": {}' - .format(repr(v)) + f'Binding cannot contain both "ro" and "mode": {v!r}' ) + bind = v['bind'] + if isinstance(bind, bytes): + bind = bind.decode('utf-8') + if 'ro' in v: mode = 'ro' if v['ro'] else 'rw' elif 'mode' in v: @@ -198,94 +149,189 @@ def convert_volume_binds(binds): else: mode = 'rw' - result.append('{0}:{1}:{2}'.format( - k, v['bind'], mode - )) + # NOTE: this is only relevant for Linux hosts + # (doesn't apply in Docker Desktop) + propagation_modes = [ + 'rshared', + 'shared', + 'rslave', + 'slave', + 'rprivate', + 'private', + ] + if 'propagation' in v and v['propagation'] in propagation_modes: + if mode: + mode = f"{mode},{v['propagation']}" + else: + mode = v['propagation'] + + result.append( + f'{k}:{bind}:{mode}' + ) else: - result.append('{0}:{1}:rw'.format(k, v)) + if isinstance(v, bytes): + v = v.decode('utf-8') + result.append( + f'{k}:{v}:rw' + ) return result -def parse_repository_tag(repo): - column_index = repo.rfind(':') - if column_index < 0: - return repo, None - tag = repo[column_index + 1:] - slash_index = tag.find('/') - if slash_index < 0: - return repo[:column_index], tag +def convert_tmpfs_mounts(tmpfs): + if isinstance(tmpfs, dict): + return tmpfs - return repo, None + if not isinstance(tmpfs, list): + raise ValueError( + 'Expected tmpfs value to be either a list or a dict, ' + f'found: {type(tmpfs).__name__}' + ) + result = {} + for mount in tmpfs: + if isinstance(mount, str): + if ":" in mount: + name, options = mount.split(":", 1) + else: + name = mount + options = "" -# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh -# fd:// protocol unsupported (for obvious reasons) -# Added support for http and https -# Protocol translation: tcp -> http, unix -> http+unix -def parse_host(addr): - proto = "http+unix" - host = DEFAULT_HTTP_HOST - port = None + else: + raise ValueError( + "Expected item in tmpfs list to be a string, " + f"found: {type(mount).__name__}" + ) + + result[name] = options + return result + + +def convert_service_networks(networks): + if not networks: + return networks + if not isinstance(networks, list): + raise TypeError('networks parameter must be a list.') + + result = [] + for n in networks: + if isinstance(n, str): + n = {'Target': n} + result.append(n) + return result + + +def parse_repository_tag(repo_name): + parts = repo_name.rsplit('@', 1) + if len(parts) == 2: + return tuple(parts) + parts = repo_name.rsplit(':', 1) + if len(parts) == 2 and '/' not in parts[1]: + return tuple(parts) + return repo_name, None + + +def parse_host(addr, is_win32=False, tls=False): + # Sensible defaults + if not addr and is_win32: + return DEFAULT_NPIPE if not addr or addr.strip() == 'unix://': return DEFAULT_UNIX_SOCKET addr = addr.strip() - if addr.startswith('http://'): - addr = addr.replace('http://', 'tcp://') - if addr.startswith('http+unix://'): - addr = addr.replace('http+unix://', 'unix://') - if addr == 'tcp://': + parsed_url = urlparse(addr) + proto = parsed_url.scheme + if not proto or any(x not in f"{string.ascii_letters}+" for x in proto): + # https://bugs.python.org/issue754016 + parsed_url = urlparse(f"//{addr}", 'tcp') + proto = 'tcp' + + if proto == 'fd': + raise errors.DockerException('fd protocol is not implemented') + + # These protos are valid aliases for our library but not for the + # official spec + if proto == 'http' or proto == 'https': + tls = proto == 'https' + proto = 'tcp' + elif proto == 'http+unix': + proto = 'unix' + + if proto not in ('tcp', 'unix', 'npipe', 'ssh'): raise errors.DockerException( - "Invalid bind address format: {0}".format(addr)) - elif addr.startswith('unix://'): - addr = addr[7:] - elif addr.startswith('tcp://'): - proto = "http" - addr = addr[6:] - elif addr.startswith('https://'): - proto = "https" - addr = addr[8:] - elif addr.startswith('fd://'): - raise errors.DockerException("fd protocol is not implemented") - else: - if "://" in addr: - raise errors.DockerException( - "Invalid bind address protocol: {0}".format(addr) - ) - proto = "http" + f"Invalid bind address protocol: {addr}" + ) - if proto != "http+unix" and ":" in addr: - host_parts = addr.split(':') - if len(host_parts) != 2: - raise errors.DockerException( - "Invalid bind address format: {0}".format(addr) - ) - if host_parts[0]: - host = host_parts[0] + if proto == 'tcp' and not parsed_url.netloc: + # "tcp://" is exceptionally disallowed by convention; + # omitting a hostname for other protocols is fine + raise errors.DockerException( + f'Invalid bind address format: {addr}' + ) - try: - port = int(host_parts[1]) - except Exception: - raise errors.DockerException( - "Invalid port: %s", addr - ) + if any([ + parsed_url.params, parsed_url.query, parsed_url.fragment, + parsed_url.password + ]): + raise errors.DockerException( + f'Invalid bind address format: {addr}' + ) - elif proto in ("http", "https") and ':' not in addr: + if parsed_url.path and proto == 'ssh': raise errors.DockerException( - "Bind address needs a port: {0}".format(addr)) + f'Invalid bind address format: no path allowed for this protocol: {addr}' + ) else: - host = addr + path = parsed_url.path + if proto == 'unix' and parsed_url.hostname is not None: + # For legacy reasons, we consider unix://path + # to be valid and equivalent to unix:///path + path = f"{parsed_url.hostname}/{path}" + + netloc = parsed_url.netloc + if proto in ('tcp', 'ssh'): + port = parsed_url.port or 0 + if port <= 0: + if proto != 'ssh': + raise errors.DockerException( + f'Invalid bind address format: port is required: {addr}' + ) + port = 22 + netloc = f'{parsed_url.netloc}:{port}' + + if not parsed_url.hostname: + netloc = f'{DEFAULT_HTTP_HOST}:{port}' - if proto == "http+unix": - return "{0}://{1}".format(proto, host) - return "{0}://{1}:{2}".format(proto, host, port) + # Rewrite schemes to fit library internals (requests adapters) + if proto == 'tcp': + proto = f"http{'s' if tls else ''}" + elif proto == 'unix': + proto = 'http+unix' + + if proto in ('http+unix', 'npipe'): + return f"{proto}://{path}".rstrip('/') + + return urlunparse(URLComponents( + scheme=proto, + netloc=netloc, + url=path, + params='', + query='', + fragment='', + )).rstrip('/') def parse_devices(devices): device_list = [] for device in devices: - device_mapping = device.split(":") + if isinstance(device, dict): + device_list.append(device) + continue + if not isinstance(device, str): + raise errors.DockerException( + f'Invalid device type {type(device)}' + ) + device_mapping = device.split(':') if device_mapping: path_on_host = device_mapping[0] if len(device_mapping) > 1: @@ -296,346 +342,176 @@ def parse_devices(devices): permissions = device_mapping[2] else: permissions = 'rwm' - device_list.append({"PathOnHost": path_on_host, - "PathInContainer": path_in_container, - "CgroupPermissions": permissions}) + device_list.append({ + 'PathOnHost': path_on_host, + 'PathInContainer': path_in_container, + 'CgroupPermissions': permissions + }) return device_list -def kwargs_from_env(ssl_version=None, assert_hostname=None): - host = os.environ.get('DOCKER_HOST') - cert_path = os.environ.get('DOCKER_CERT_PATH') - tls_verify = os.environ.get('DOCKER_TLS_VERIFY') +def kwargs_from_env(environment=None): + if not environment: + environment = os.environ + host = environment.get('DOCKER_HOST') + + # empty string for cert path is the same as unset. + cert_path = environment.get('DOCKER_CERT_PATH') or None + + # empty string for tls verify counts as "false". + # Any value or 'unset' counts as true. + tls_verify = environment.get('DOCKER_TLS_VERIFY') + if tls_verify == '': + tls_verify = False + else: + tls_verify = tls_verify is not None + enable_tls = cert_path or tls_verify params = {} + if host: - params['base_url'] = (host.replace('tcp://', 'https://') - if tls_verify else host) - if tls_verify and cert_path: - params['tls'] = tls.TLSConfig( - client_cert=(os.path.join(cert_path, 'cert.pem'), - os.path.join(cert_path, 'key.pem')), - ca_cert=os.path.join(cert_path, 'ca.pem'), - verify=True, - ssl_version=ssl_version, - assert_hostname=assert_hostname) + params['base_url'] = host + + if not enable_tls: + return params + + if not cert_path: + cert_path = os.path.join(os.path.expanduser('~'), '.docker') + + params['tls'] = TLSConfig( + client_cert=(os.path.join(cert_path, 'cert.pem'), + os.path.join(cert_path, 'key.pem')), + ca_cert=os.path.join(cert_path, 'ca.pem'), + verify=tls_verify, + ) + return params def convert_filters(filters): result = {} - for k, v in six.iteritems(filters): + for k, v in iter(filters.items()): if isinstance(v, bool): v = 'true' if v else 'false' if not isinstance(v, list): v = [v, ] - result[k] = v + result[k] = [ + str(item) if not isinstance(item, str) else item + for item in v + ] return json.dumps(result) -def datetime_to_timestamp(dt=datetime.now()): - """Convert a datetime in local timezone to a unix timestamp""" - delta = dt - datetime.fromtimestamp(0) +def datetime_to_timestamp(dt): + """Convert a datetime to a Unix timestamp""" + delta = dt.astimezone(timezone.utc) - datetime(1970, 1, 1, tzinfo=timezone.utc) return delta.seconds + delta.days * 24 * 3600 def parse_bytes(s): + if isinstance(s, (int, float,)): + return s if len(s) == 0: - s = 0 - else: - if s[-2:-1].isalpha() and s[-1].isalpha(): - if (s[-1] == "b" or s[-1] == "B"): - s = s[:-1] - units = BYTE_UNITS - suffix = s[-1].lower() - - # Check if the variable is a string representation of an int - # without a units part. Assuming that the units are bytes. - if suffix.isdigit(): - digits_part = s - suffix = 'b' - else: - digits_part = s[:-1] - - if suffix in units.keys() or suffix.isdigit(): - try: - digits = int(digits_part) - except ValueError: - message = ('Failed converting the string value for' - 'memory ({0}) to a number.') - formatted_message = message.format(digits_part) - raise errors.DockerException(formatted_message) - - s = digits * units[suffix] - else: - message = ('The specified value for memory' - ' ({0}) should specify the units. The postfix' - ' should be one of the `b` `k` `m` `g`' - ' characters') - raise errors.DockerException(message.format(s)) + return 0 - return s + if s[-2:-1].isalpha() and s[-1].isalpha(): + if s[-1] == "b" or s[-1] == "B": + s = s[:-1] + units = BYTE_UNITS + suffix = s[-1].lower() + + # Check if the variable is a string representation of an int + # without a units part. Assuming that the units are bytes. + if suffix.isdigit(): + digits_part = s + suffix = 'b' + else: + digits_part = s[:-1] + if suffix in units.keys() or suffix.isdigit(): + try: + digits = float(digits_part) + except ValueError as ve: + raise errors.DockerException( + 'Failed converting the string value for memory ' + f'({digits_part}) to an integer.' + ) from ve -def create_host_config( - binds=None, port_bindings=None, lxc_conf=None, - publish_all_ports=False, links=None, privileged=False, - dns=None, dns_search=None, volumes_from=None, network_mode=None, - restart_policy=None, cap_add=None, cap_drop=None, devices=None, - extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None, - security_opt=None, ulimits=None, log_config=None, mem_limit=None, - memswap_limit=None -): - host_config = {} - - if mem_limit is not None: - if isinstance(mem_limit, six.string_types): - mem_limit = parse_bytes(mem_limit) - host_config['Memory'] = mem_limit - - if memswap_limit is not None: - if isinstance(memswap_limit, six.string_types): - memswap_limit = parse_bytes(memswap_limit) - host_config['MemorySwap'] = memswap_limit - - if pid_mode not in (None, 'host'): + # Reconvert to long for the final result + s = int(digits * units[suffix]) + else: raise errors.DockerException( - 'Invalid value for pid param: {0}'.format(pid_mode) + f'The specified value for memory ({s}) should specify the units. ' + 'The postfix should be one of the `b` `k` `m` `g` characters' ) - elif pid_mode: - host_config['PidMode'] = pid_mode - if ipc_mode: - host_config['IpcMode'] = ipc_mode - - if privileged: - host_config['Privileged'] = privileged - - if publish_all_ports: - host_config['PublishAllPorts'] = publish_all_ports - - if read_only is not None: - host_config['ReadonlyRootfs'] = read_only - - if dns_search: - host_config['DnsSearch'] = dns_search - - if network_mode: - host_config['NetworkMode'] = network_mode + return s - if restart_policy: - host_config['RestartPolicy'] = restart_policy - if cap_add: - host_config['CapAdd'] = cap_add +def normalize_links(links): + if isinstance(links, dict): + links = iter(links.items()) - if cap_drop: - host_config['CapDrop'] = cap_drop + return [f'{k}:{v}' if v else k for k, v in sorted(links)] - if devices: - host_config['Devices'] = parse_devices(devices) - if dns is not None: - host_config['Dns'] = dns +def parse_env_file(env_file): + """ + Reads a line-separated environment file. + The format of each line should be "key=value". + """ + environment = {} - if security_opt is not None: - if not isinstance(security_opt, list): - raise errors.DockerException( - 'Invalid type for security_opt param: expected list but found' - ' {0}'.format(type(security_opt)) - ) - host_config['SecurityOpt'] = security_opt + with open(env_file) as f: + for line in f: - if volumes_from is not None: - if isinstance(volumes_from, six.string_types): - volumes_from = volumes_from.split(',') - host_config['VolumesFrom'] = volumes_from + if line[0] == '#': + continue - if binds is not None: - host_config['Binds'] = convert_volume_binds(binds) + line = line.strip() + if not line: + continue - if port_bindings is not None: - host_config['PortBindings'] = convert_port_bindings( - port_bindings - ) + parse_line = line.split('=', 1) + if len(parse_line) == 2: + k, v = parse_line + environment[k] = v + else: + raise errors.DockerException( + f'Invalid line in environment file {env_file}:\n{line}') - if extra_hosts is not None: - if isinstance(extra_hosts, dict): - extra_hosts = [ - '{0}:{1}'.format(k, v) - for k, v in sorted(six.iteritems(extra_hosts)) - ] + return environment - host_config['ExtraHosts'] = extra_hosts - if links is not None: - if isinstance(links, dict): - links = six.iteritems(links) +def split_command(command): + return shlex.split(command) - formatted_links = [ - '{0}:{1}'.format(k, v) for k, v in sorted(links) - ] - host_config['Links'] = formatted_links +def format_environment(environment): + def format_env(key, value): + if value is None: + return key + if isinstance(value, bytes): + value = value.decode('utf-8') - if isinstance(lxc_conf, dict): - formatted = [] - for k, v in six.iteritems(lxc_conf): - formatted.append({'Key': k, 'Value': str(v)}) - lxc_conf = formatted + return f'{key}={value}' + return [format_env(*var) for var in iter(environment.items())] - if lxc_conf is not None: - host_config['LxcConf'] = lxc_conf - if ulimits is not None: - if not isinstance(ulimits, list): - raise errors.DockerException( - 'Invalid type for ulimits param: expected list but found' - ' {0}'.format(type(ulimits)) - ) - host_config['Ulimits'] = [] - for l in ulimits: - if not isinstance(l, Ulimit): - l = Ulimit(**l) - host_config['Ulimits'].append(l) - - if log_config is not None: - if not isinstance(log_config, LogConfig): - if not isinstance(log_config, dict): - raise errors.DockerException( - 'Invalid type for log_config param: expected LogConfig but' - ' found {0}'.format(type(log_config)) - ) - log_config = LogConfig(**log_config) - host_config['LogConfig'] = log_config - - return host_config - - -def create_container_config( - version, image, command, hostname=None, user=None, detach=False, - stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None, - dns=None, volumes=None, volumes_from=None, network_disabled=False, - entrypoint=None, cpu_shares=None, working_dir=None, domainname=None, - memswap_limit=None, cpuset=None, host_config=None, mac_address=None, - labels=None, volume_driver=None -): - if isinstance(command, six.string_types): - command = shlex.split(str(command)) - if isinstance(environment, dict): - environment = [ - six.text_type('{0}={1}').format(k, v) - for k, v in six.iteritems(environment) +def format_extra_hosts(extra_hosts, task=False): + # Use format dictated by Swarm API if container is part of a task + if task: + return [ + f'{v} {k}' for k, v in sorted(iter(extra_hosts.items())) ] - if labels is not None and compare_version('1.18', version) < 0: - raise errors.InvalidVersion( - 'labels were only introduced in API version 1.18' - ) + return [ + f'{k}:{v}' for k, v in sorted(iter(extra_hosts.items())) + ] - if compare_version('1.19', version) < 0: - if volume_driver is not None: - raise errors.InvalidVersion( - 'Volume drivers were only introduced in API version 1.19' - ) - mem_limit = mem_limit if mem_limit is not None else 0 - memswap_limit = memswap_limit if memswap_limit is not None else 0 - else: - if mem_limit is not None: - raise errors.InvalidVersion( - 'mem_limit has been moved to host_config in API version 1.19' - ) - if memswap_limit is not None: - raise errors.InvalidVersion( - 'memswap_limit has been moved to host_config in API ' - 'version 1.19' - ) - - if isinstance(labels, list): - labels = dict((lbl, six.text_type('')) for lbl in labels) - - if isinstance(mem_limit, six.string_types): - mem_limit = parse_bytes(mem_limit) - if isinstance(memswap_limit, six.string_types): - memswap_limit = parse_bytes(memswap_limit) - - if isinstance(ports, list): - exposed_ports = {} - for port_definition in ports: - port = port_definition - proto = 'tcp' - if isinstance(port_definition, tuple): - if len(port_definition) == 2: - proto = port_definition[1] - port = port_definition[0] - exposed_ports['{0}/{1}'.format(port, proto)] = {} - ports = exposed_ports - - if isinstance(volumes, six.string_types): - volumes = [volumes, ] - - if isinstance(volumes, list): - volumes_dict = {} - for vol in volumes: - volumes_dict[vol] = {} - volumes = volumes_dict - - if volumes_from: - if not isinstance(volumes_from, six.string_types): - volumes_from = ','.join(volumes_from) - else: - # Force None, an empty list or dict causes client.start to fail - volumes_from = None - - attach_stdin = False - attach_stdout = False - attach_stderr = False - stdin_once = False - - if not detach: - attach_stdout = True - attach_stderr = True - - if stdin_open: - attach_stdin = True - stdin_once = True - - if compare_version('1.10', version) >= 0: - message = ('{0!r} parameter has no effect on create_container().' - ' It has been moved to start()') - if dns is not None: - raise errors.InvalidVersion(message.format('dns')) - if volumes_from is not None: - raise errors.InvalidVersion(message.format('volumes_from')) - - return { - 'Hostname': hostname, - 'Domainname': domainname, - 'ExposedPorts': ports, - 'User': user, - 'Tty': tty, - 'OpenStdin': stdin_open, - 'StdinOnce': stdin_once, - 'Memory': mem_limit, - 'AttachStdin': attach_stdin, - 'AttachStdout': attach_stdout, - 'AttachStderr': attach_stderr, - 'Env': environment, - 'Cmd': command, - 'Dns': dns, - 'Image': image, - 'Volumes': volumes, - 'VolumesFrom': volumes_from, - 'NetworkDisabled': network_disabled, - 'Entrypoint': entrypoint, - 'CpuShares': cpu_shares, - 'Cpuset': cpuset, - 'CpusetCpus': cpuset, - 'WorkingDir': working_dir, - 'MemorySwap': memswap_limit, - 'HostConfig': host_config, - 'MacAddress': mac_address, - 'Labels': labels, - 'VolumeDriver': volume_driver, - } +def create_host_config(self, *args, **kwargs): + raise errors.DeprecatedMethod( + 'utils.create_host_config has been removed. Please use a ' + 'docker.types.HostConfig object instead.' + ) diff --git a/docker/version.py b/docker/version.py index 88859a6c00..72b12b84df 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,8 @@ -version = "1.3.0-dev" -version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) +try: + from ._version import __version__ +except ImportError: + from importlib.metadata import PackageNotFoundError, version + try: + __version__ = version('docker') + except PackageNotFoundError: + __version__ = '0.0.0' diff --git a/docs-requirements.txt b/docs-requirements.txt deleted file mode 100644 index abc8d72db6..0000000000 --- a/docs-requirements.txt +++ /dev/null @@ -1 +0,0 @@ -mkdocs==0.9 diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 0000000000..76c74e10d5 --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,12 @@ +dl.hide-signature > dt { + display: none; +} + +dl.field-list > dt { + /* prevent code blocks from forcing wrapping on the "Parameters" header */ + word-break: initial; +} + +code.literal{ + hyphens: none; +} diff --git a/docs/_templates/page.html b/docs/_templates/page.html new file mode 100644 index 0000000000..cf0264cf73 --- /dev/null +++ b/docs/_templates/page.html @@ -0,0 +1,2 @@ +{% extends "!page.html" %} +{% set css_files = css_files + ["_static/custom.css"] %} diff --git a/docs/api.md b/docs/api.md deleted file mode 100644 index 5a3b3222a6..0000000000 --- a/docs/api.md +++ /dev/null @@ -1,850 +0,0 @@ -# Client API - -To instantiate a `Client` class that will allow you to communicate with a -Docker daemon, simply do: - -```python -from docker import Client -c = Client(base_url='unix://var/run/docker.sock') -``` - -**Params**: - -* base_url (str): Refers to the protocol+hostname+port where the Docker server -is hosted. -* version (str): The version of the API the client will use. Specify `'auto'` - to use the API version provided by the server. -* timeout (int): The HTTP request timeout, in seconds. -* tls (bool or [TLSConfig](tls.md#TLSConfig)): Equivalent CLI options: `docker --tls ...` - -**** - -## attach - -The `.logs()` function is a wrapper around this method, which you can use -instead if you want to fetch/stream container output without first retrieving -the entire backlog. - -**Params**: - -* container (str): The container to attach to -* stdout (bool): Get STDOUT -* stderr (bool): Get STDERR -* stream (bool): Return an interator -* logs (bool): Get all previous output - -**Returns** (generator or str): The logs or output for the image - -## build - -Similar to the `docker build` command. Either `path` or `fileobj` needs to be -set. `path` can be a local path (to a directory containing a Dockerfile) or a -remote URL. `fileobj` must be a readable file-like object to a Dockerfile. - -If you have a tar file for the Docker build context (including a Dockerfile) -already, pass a readable file-like object to `fileobj` and also pass -`custom_context=True`. If the stream is compressed also, set `encoding` to the -correct value (e.g `gzip`). - -**Params**: - -* path (str): Path to the directory containing the Dockerfile -* tag (str): A tag to add to the final image -* quiet (bool): Whether to return the status -* fileobj: A file object to use as the Dockerfile. (Or a file-like object) -* nocache (bool): Don't use the cache when set to `True` -* rm (bool): Remove intermediate containers. The `docker build` command now - defaults to ``--rm=true``, but we have kept the old default of `False` - to preserve backward compatibility -* stream (bool): *Deprecated for API version > 1.8 (always True)*. - Return a blocking generator you can iterate over to retrieve build output as - it happens -* timeout (int): HTTP timeout -* custom_context (bool): Optional if using `fileobj` -* encoding (str): The encoding for a stream. Set to `gzip` for compressing -* pull (bool): Downloads any updates to the FROM image in Dockerfiles -* forcerm (bool): Always remove intermediate containers, even after unsuccessful builds -* dockerfile (str): path within the build context to the Dockerfile -* container_limits (dict): A dictionary of limits applied to each container - created by the build process. Valid keys: - - memory (int): set memory limit for build - - memswap (int): Total memory (memory + swap), -1 to disable swap - - cpushares (int): CPU shares (relative weight) - - cpusetcpus (str): CPUs in which to allow exection, e.g., `"0-3"`, `"0,1"` -* decode (bool): If set to `True`, the returned stream will be decoded into - dicts on the fly. Default `False`. - -**Returns** (generator): A generator for the build output - -```python ->>> from io import BytesIO ->>> from docker import Client ->>> dockerfile = ''' -... # Shared Volume -... FROM busybox:buildroot-2014.02 -... MAINTAINER first last, first.last@yourdomain.com -... VOLUME /data -... CMD ["/bin/sh"] -... ''' ->>> f = BytesIO(dockerfile.encode('utf-8')) ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> response = [line for line in cli.build( -... fileobj=f, rm=True, tag='yourname/volume' -... )] ->>> response -['{"stream":" ---\\u003e a9eb17255234\\n"}', -'{"stream":"Step 1 : MAINTAINER first last, first.last@yourdomain.com\\n"}', -'{"stream":" ---\\u003e Running in 08787d0ee8b1\\n"}', -'{"stream":" ---\\u003e 23e5e66a4494\\n"}', -'{"stream":"Removing intermediate container 08787d0ee8b1\\n"}', -'{"stream":"Step 2 : VOLUME /data\\n"}', -'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}', -'{"stream":" ---\\u003e 713bca62012e\\n"}', -'{"stream":"Removing intermediate container abdc1e6896c6\\n"}', -'{"stream":"Step 3 : CMD [\\"/bin/sh\\"]\\n"}', -'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}', -'{"stream":" ---\\u003e 032b8b2855fc\\n"}', -'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}', -'{"stream":"Successfully built 032b8b2855fc\\n"}'] -``` - -**Raises:** [TypeError]( -https://docs.python.org/3.4/library/exceptions.html#TypeError) if `path` nor -`fileobj` are specified - -## commit - -Identical to the `docker commit` command. - -**Params**: - -* container (str): The image hash of the container -* repository (str): The repository to push the image to -* tag (str): The tag to push -* message (str): A commit message -* author (str): The name of the author -* conf (dict): The configuraton for the container. See the [Docker remote api]( -https://docs.docker.com/reference/api/docker_remote_api/) for full details. - -## containers - -List containers. Identical to the `docker ps` command. - -**Params**: - -* quiet (bool): Only display numeric Ids -* all (bool): Show all containers. Only running containers are shown by default -* trunc (bool): Truncate output -* latest (bool): Show only the latest created container, include non-running -ones. -* since (str): Show only containers created since Id or Name, include -non-running ones -* before (str): Show only container created before Id or Name, include -non-running ones -* limit (int): Show `limit` last created containers, include non-running ones -* size (bool): Display sizes -* filters (dict): Filters to be processed on the image list. Available filters: - - `exited` (int): Only containers with specified exit code - - `status` (str): One of `restarting`, `running`, `paused`, `exited` - - `label` (str): format either `"key"` or `"key=value"` - -**Returns** (dict): The system's containers - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> cli.containers() -[{'Command': '/bin/sleep 30', - 'Created': 1412574844, - 'Id': '6e276c9e6e5759e12a6a9214efec6439f80b4f37618e1a6547f28a3da34db07a', - 'Image': 'busybox:buildroot-2014.02', - 'Names': ['/grave_mayer'], - 'Ports': [], - 'Status': 'Up 1 seconds'}] -``` - -## copy -Identical to the `docker cp` command. Get files/folders from the container. - -**Params**: - -* container (str): The container to copy from -* resource (str): The path within the container - -**Returns** (str): The contents of the file as a string - -## create_container - -Creates a container that can then be `.start()` ed. Parameters are similar to -those for the `docker run` command except it doesn't support the attach -options (`-a`). - -See [Port bindings](port-bindings.md) and [Using volumes](volumes.md) for more -information on how to create port bindings and volume mappings. - -The `mem_limit` variable accepts float values (which represent the memory limit -of the created container in bytes) or a string with a units identification char -('100000b', '1000k', '128m', '1g'). If a string is specified without a units -character, bytes are assumed as an intended unit. - -`volumes_from` and `dns` arguments raise [TypeError]( -https://docs.python.org/3.4/library/exceptions.html#TypeError) exception if -they are used against v1.10 and above of the Docker remote API. Those -arguments should be passed as part of the `host_config` dictionary. - -**Params**: - -* image (str): The image to run -* command (str or list): The command to be run in the container -* hostname (str): Optional hostname for the container -* user (str or int): Username or UID -* detach (bool): Detached mode: run container in the background and print new -container Id -* stdin_open (bool): Keep STDIN open even if not attached -* tty (bool): Allocate a pseudo-TTY -* mem_limit (float or str): Memory limit (format: [number][optional unit], -where unit = b, k, m, or g) -* ports (list of ints): A list of port numbers -* environment (dict or list): A dictionary or a list of strings in the -following format `["PASSWORD=xxx"]` or `{"PASSWORD": "xxx"}`. -* dns (list): DNS name servers -* volumes (str or list): -* volumes_from (str or list): List of container names or Ids to get volumes -from. Optionally a single string joining container id's with commas -* network_disabled (bool): Disable networking -* name (str): A name for the container -* entrypoint (str or list): An entrypoint -* cpu_shares (int or float): CPU shares (relative weight) -* working_dir (str): Path to the working directory -* domainname (str or list): Set custom DNS search domains -* memswap_limit (int): -* host_config (dict): A [HostConfig](hostconfig.md) dictionary -* mac_address (str): The Mac Address to assign the container -* labels (dict or list): A dictionary of name-value labels (e.g. `{"label1": "value1", "label2": "value2"}`) or a list of names of labels to set with empty values (e.g. `["label1", "label2"]`) -* volume_driver (str): The name of a volume driver/plugin. - -**Returns** (dict): A dictionary with an image 'Id' key and a 'Warnings' key. - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> container = cli.create_container(image='busybox:latest', command='/bin/sleep 30') ->>> print(container) -{'Id': '8a61192da2b3bb2d922875585e29b74ec0dc4e0117fcbf84c962204e97564cd7', - 'Warnings': None} -``` - -## diff - -Inspect changes on a container's filesystem - -**Params**: - -* container (str): The container to diff - -**Returns** (str): - -## events - -Identical to the `docker events` command: get real time events from the server. The `events` -function return a blocking generator you can iterate over to retrieve events as they happen. - -**Params**: - -* since (datetime or int): get events from this point -* until (datetime or int): get events until this point -* filters (dict): filter the events by event time, container or image -* decode (bool): If set to true, stream will be decoded into dicts on the - fly. False by default. - -**Returns** (generator): - -```python -{u'status': u'start', - u'from': u'image/with:tag', - u'id': u'container-id', - u'time': 1423339459} -``` - -## execute - -This command is deprecated for docker-py >= 1.2.0 ; use `exec_create` and -`exec_start` instead. - -## exec_create - -Sets up an exec instance in a running container. - -**Params**: - -* container (str): Target container where exec instance will be created -* cmd (str or list): Command to be executed -* stdout (bool): Attach to stdout of the exec command if true. Default: True -* stderr (bool): Attach to stderr of the exec command if true. Default: True -* tty (bool): Allocate a pseudo-TTY. Default: False - -**Returns** (dict): A dictionary with an exec 'Id' key. - - -## exec_inspect - -Return low-level information about an exec command. - -**Params**: - -* exec_id (str): ID of the exec instance - -**Returns** (dict): Dictionary of values returned by the endpoint. - - -## exec_resize - -Resize the tty session used by the specified exec command. - -**Params**: - -* exec_id (str): ID of the exec instance -* height (int): Height of tty session -* width (int): Width of tty session - -## exec_start - -Start a previously set up exec instance. - -**Params**: - -* exec_id (str): ID of the exec instance -* detach (bool): If true, detach from the exec command. Default: False -* tty (bool): Allocate a pseudo-TTY. Default: False -* stream (bool): Stream response data - -**Returns** (generator or str): If `stream=True`, a generator yielding response -chunks. A string containing response data otherwise. - -## export - -Export the contents of a filesystem as a tar archive to STDOUT - -**Params**: - -* container (str): The container to export - -**Returns** (str): The filesystem tar archive as a str - -## get_image - -Get an image from the docker daemon. Similar to the `docker save` command. - -**Params**: - -* image (str): Image name to get - -**Returns** (urllib3.response.HTTPResponse object): The response from the docker daemon - -An example of how to get (save) an image to a file. -```python ->>> from docker import Client ->>> cli = Client(base_url='unix://var/run/docker.sock') ->>> image = cli.get_image(“fedora:latest”) ->>> image_tar = open(‘/tmp/fedora-latest.tar’,’w’) ->>> image_tar.write(image.data) ->>> image_tar.close() -``` - -## history - -Show the history of an image - -**Params**: - -* image (str): The image to show history for - -**Returns** (str): The history of the image - -## images - -List images. Identical to the `docker images` command. - -**Params**: - -* name (str): Only show images belonging to the repository `name` -* quiet (bool): Only show numeric Ids. Returns a list -* all (bool): Show all images (by default filter out the intermediate image -layers) -* filters (dict): Filters to be processed on the image list. Available filters: - - `dangling` (bool) - - `label` (str): format either `"key"` or `"key=value"` - -**Returns** (dict or list): A list if `quiet=True`, otherwise a dict. - -```python -[{'Created': 1401926735, -'Id': 'a9eb172552348a9a49180694790b33a1097f546456d041b6e82e4d7716ddb721', -'ParentId': '120e218dd395ec314e7b6249f39d2853911b3d6def6ea164ae05722649f34b16', -'RepoTags': ['busybox:buildroot-2014.02', 'busybox:latest'], -'Size': 0, -'VirtualSize': 2433303}, -... -``` - -## import_image - -Similar to the `docker import` command. - -If `src` is a string or unicode string, it will first be treated as a path to -a tarball on the local system. If there is an error reading from that file, -src will be treated as a URL instead to fetch the image from. You can also pass -an open file handle as 'src', in which case the data will be read from that -file. - -If `src` is unset but `image` is set, the `image` paramater will be taken as -the name of an existing image to import from. - -**Params**: - -* src (str or file): Path to tarfile, URL, or file-like object -* repository (str): The repository to create -* tag (str): The tag to apply -* image (str): Use another image like the `FROM` Dockerfile parameter - -## import_image_from_data - -Like `.import_image()`, but allows importing in-memory bytes data. - -**Params**: - -* data (bytes collection): Bytes collection containing valid tar data -* repository (str): The repository to create -* tag (str): The tag to apply - -## import_image_from_file - -Like `.import_image()`, but only supports importing from a tar file on -disk. If the file doesn't exist it will raise `IOError`. - -**Params**: - -* filename (str): Full path to a tar file. -* repository (str): The repository to create -* tag (str): The tag to apply - -## import_image_from_url - -Like `.import_image()`, but only supports importing from a URL. - -**Params**: - -* url (str): A URL pointing to a tar file. -* repository (str): The repository to create -* tag (str): The tag to apply - -## import_image_from_image - -Like `.import_image()`, but only supports importing from another image, -like the `FROM` Dockerfile parameter. - -**Params**: - -* image (str): Image name to import from -* repository (str): The repository to create -* tag (str): The tag to apply - -## info - -Display system-wide information. Identical to the `docker info` command. - -**Returns** (dict): The info as a dict - -``` ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> cli.info() -{'Containers': 3, - 'Debug': 1, - 'Driver': 'aufs', - 'DriverStatus': [['Root Dir', '/mnt/sda1/var/lib/docker/aufs'], - ['Dirs', '225']], - 'ExecutionDriver': 'native-0.2', - 'IPv4Forwarding': 1, - 'Images': 219, - 'IndexServerAddress': 'https://index.docker.io/v1/', - 'InitPath': '/usr/local/bin/docker', - 'InitSha1': '', - 'KernelVersion': '3.16.1-tinycore64', - 'MemoryLimit': 1, - 'NEventsListener': 0, - 'NFd': 11, - 'NGoroutines': 12, - 'OperatingSystem': 'Boot2Docker 1.2.0 (TCL 5.3);', - 'SwapLimit': 1} -``` - -## insert -*DEPRECATED* - -## inspect_container - -Identical to the `docker inspect` command, but only for containers. - -**Params**: - -* container (str): The container to inspect - -**Returns** (dict): Nearly the same output as `docker inspect`, just as a -single dict - -## inspect_image - -Identical to the `docker inspect` command, but only for images - -**Params**: - -* image_id (str): The image to inspect - -**Returns** (dict): Nearly the same output as `docker inspect`, just as a -single dict - -## kill - -Kill a container or send a signal to a container - -**Params**: - -* container (str): The container to kill -* signal (str or int): The singal to send. Defaults to `SIGKILL` - -## login - -Nearly identical to the `docker login` command, but non-interactive. - -**Params**: - -* username (str): The registry username -* password (str): The plaintext password -* email (str): The email for the registry account -* registry (str): URL to the registry. Ex:`https://index.docker.io/v1/` -* reauth (bool): Whether refresh existing authentication on the docker server. -* dockercfg_path (str): Use a custom path for the .dockercfg file - (default `$HOME/.dockercfg`) - -**Returns** (dict): The response from the login request - -## logs - -Identical to the `docker logs` command. The `stream` parameter makes the `logs` -function return a blocking generator you can iterate over to retrieve log -output as it happens. - -**Params**: - -* container (str): The container to get logs from -* stdout (bool): Get STDOUT -* stderr (bool): Get STDERR -* stream (bool): Stream the response -* timestamps (bool): Show timestamps -* tail (str or int): Output specified number of lines at the end of logs: `"all"` or `number`. Default `"all"` - -**Returns** (generator or str): - -## pause - -Pauses all processes within a container. - -**Params**: - -* container (str): The container to pause - - -## ping - -Hits the `/_ping` endpoint of the remote API and returns the result. An -exception will be raised if the endpoint isn't responding. - -**Returns** (bool) - -## port -Lookup the public-facing port that is NAT-ed to `private_port`. Identical to -the `docker port` command. - -**Params**: - -* container (str): The container to look up -* private_port (int): The private port to inspect - -**Returns** (list of dict): The mapping for the host ports - -```bash -$ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30 -7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b -``` -```python ->>> cli.port('7174d6347063', 80) -[{'HostIp': '0.0.0.0', 'HostPort': '80'}] -``` - -## pull - -Identical to the `docker pull` command. - -**Params**: - -* repository (str): The repository to pull -* tag (str): The tag to pull -* stream (bool): Stream the output as a generator -* insecure_registry (bool): Use an insecure registry -* auth_config (dict): Override the credentials that Client.login has set for this request - `auth_config` should contain the `username` and `password` keys to be valid. - -**Returns** (generator or str): The output - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> for line in cli.pull('busybox', stream=True): -... print(json.dumps(json.loads(line), indent=4)) -{ - "status": "Pulling image (latest) from busybox", - "progressDetail": {}, - "id": "e72ac664f4f0" -} -{ - "status": "Pulling image (latest) from busybox, endpoint: ...", - "progressDetail": {}, - "id": "e72ac664f4f0" -} -``` - -## push - -Push an image or a repository to the registry. Identical to the `docker push` -command - -**Params**: - -* repository (str): The repository to push to -* tag (str): An optional tag to push -* stream (bool): Stream the output as a blocking generator -* insecure_registry (bool): Use `http://` to connect to the registry - -**Returns** (generator or str): The output of the upload - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> response = [line for line in cli.push('yourname/app', stream=True)] ->>> response -['{"status":"Pushing repository yourname/app (1 tags)"}\\n', - '{"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}\\n', - '{"status":"Image already pushed, skipping","progressDetail":{}, - "id":"511136ea3c5a"}\\n', - ... - '{"status":"Pushing tag for rev [918af568e6e5] on { - https://cdn-registry-1.docker.io/v1/repositories/ - yourname/app/tags/latest}"}\\n'] -``` - -## remove_container - -Remove a container. Similar to the `docker rm` command. - -**Params**: - -* container (str): The container to remove -* v (bool): Remove the volumes associated with the container -* link (bool): Remove the specified link and not the underlying container -* force (bool): Force the removal of a running container (uses SIGKILL) - -## remove_image - -Remove an image. Similar to the `docker rmi` command. - -**Params**: - -* image (str): The image to remove -* force (bool): Force removal of the image -* noprune (bool): Do not delete untagged parents - -## rename - -Rename a container. Similar to the `docker rename` command. - -**Params**: - -* container (str): ID of the container to rename -* name (str): New name for the container - -## restart - -Restart a container. Similar to the `docker restart` command. - -If `container` a dict, the `Id` key is used. - -**Params**: - -* container (str or dict): The container to restart -* timeout (int): Number of seconds to try to stop for before killing the -container. Once killed it will then be restarted. Default is 10 seconds. - -## search -Identical to the `docker search` command. - -**Params**: - -* term (str): A term to search for - -**Returns** (list of dicts): The response of the search - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> response = cli.search('nginx') ->>> response[:2] -[{'description': 'Official build of Nginx.', - 'is_official': True, - 'is_trusted': False, - 'name': 'nginx', - 'star_count': 266}, - {'description': 'Trusted automated Nginx (http://nginx.org/) ...', - 'is_official': False, - 'is_trusted': True, - 'name': 'dockerfile/nginx', - 'star_count': 60}, - ... -``` - -## start - -Similar to the `docker start` command, but doesn't support attach options. Use -`.logs()` to recover `stdout`/`stderr`. - -**Deprecation warning:** For API version > 1.15, it is highly recommended to - provide host config options in the - [`host_config` parameter of `create_container`](#create_container) - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> container = cli.create_container( -... image='busybox:latest', -... command='/bin/sleep 30') ->>> response = cli.start(container=container.get('Id')) ->>> print(response) -None -``` - -## stats - -The Docker API parallel to the `docker stats` command. -This will stream statistics for a specific container. - -**Params**: - -* container (str): The container to start -* decode (bool): If set to true, stream will be decoded into dicts on the - fly. False by default. - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> stats_obj = cli.stats('elasticsearch') ->>> for stat in stats_obj: ->>> print(stat) -{"read":"2015-02-11T21:47:30.49388286+02:00","network":{"rx_bytes":666052,"rx_packets":4409 ... -... -... -... -``` - -## stop - -Stops a container. Similar to the `docker stop` command. - -**Params**: - -* container (str): The container to stop -* timeout (int): Timeout in seconds to wait for the container to stop before -sending a `SIGKILL` - -## tag - -Tag an image into a repository. Identical to the `docker tag` command. - -**Params**: - -* image (str): The image to tag -* repository (str): The repository to set for the tag -* tag (str): The tag name -* force (bool): Force - -**Returns** (bool): True if successful - -## top -Display the running processes of a container - -**Params**: - -* container (str): The container to inspect - -**Returns** (str): The output of the top - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> cli.create_container('busybox:latest', '/bin/sleep 30', name='sleeper') ->>> cli.start('sleeper') ->>> cli.top('sleeper') -{'Processes': [['952', 'root', '/bin/sleep 30']], - 'Titles': ['PID', 'USER', 'COMMAND']} -``` - -## unpause - -Unpauses all processes within a container. - -**Params**: - -* container (str): The container to unpause - -## version -Nearly identical to the `docker version` command. - -**Returns** (dict): The server version information - -```python ->>> from docker import Client ->>> cli = Client(base_url='tcp://127.0.0.1:2375') ->>> cli.version() -{ - "KernelVersion": "3.16.4-tinycore64", - "Arch": "amd64", - "ApiVersion": "1.15", - "Version": "1.3.0", - "GitCommit": "c78088f", - "Os": "linux", - "GoVersion": "go1.3.3" -} -``` - - -## wait -Identical to the `docker wait` command. Block until a container stops, then -print its exit code. Returns the value `-1` if no `StatusCode` is returned by -the API. - -If `container` a dict, the `Id` key is used. - -**Params**: - -* container (str or dict): The container to wait on -* timeout (int): Request timeout - -**Returns** (int): The exit code of the container - - - diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 0000000000..bd0466143d --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,158 @@ +Low-level API +============= + +The main object-orientated API is built on top of :py:class:`APIClient`. Each method on :py:class:`APIClient` maps one-to-one with a REST API endpoint, and returns the response that the API responds with. + +It's possible to use :py:class:`APIClient` directly. Some basic things (e.g. running a container) consist of several API calls and are complex to do with the low-level API, but it's useful if you need extra flexibility and power. + +.. py:module:: docker.api + +.. autoclass:: docker.api.client.APIClient + +Configs +------- + +.. py:module:: docker.api.config + +.. rst-class:: hide-signature +.. autoclass:: ConfigApiMixin + :members: + :undoc-members: + +Containers +---------- + +.. py:module:: docker.api.container + +.. rst-class:: hide-signature +.. autoclass:: ContainerApiMixin + :members: + :undoc-members: + +Images +------ + +.. py:module:: docker.api.image + +.. rst-class:: hide-signature +.. autoclass:: ImageApiMixin + :members: + :undoc-members: + +Building images +--------------- + +.. py:module:: docker.api.build + +.. rst-class:: hide-signature +.. autoclass:: BuildApiMixin + :members: + :undoc-members: + +Networks +-------- + +.. rst-class:: hide-signature +.. autoclass:: docker.api.network.NetworkApiMixin + :members: + :undoc-members: + +Volumes +------- + +.. py:module:: docker.api.volume + +.. rst-class:: hide-signature +.. autoclass:: VolumeApiMixin + :members: + :undoc-members: + +Executing commands in containers +-------------------------------- + +.. py:module:: docker.api.exec_api + +.. rst-class:: hide-signature +.. autoclass:: ExecApiMixin + :members: + :undoc-members: + +Swarms +------ + +.. py:module:: docker.api.swarm + +.. rst-class:: hide-signature +.. autoclass:: SwarmApiMixin + :members: + :undoc-members: + +Services +-------- + +.. py:module:: docker.api.service + +.. rst-class:: hide-signature +.. autoclass:: ServiceApiMixin + :members: + :undoc-members: + +Plugins +------- + +.. py:module:: docker.api.plugin + +.. rst-class:: hide-signature +.. autoclass:: PluginApiMixin + :members: + :undoc-members: + +Secrets +------- + +.. py:module:: docker.api.secret + +.. rst-class:: hide-signature +.. autoclass:: SecretApiMixin + :members: + :undoc-members: + +The Docker daemon +----------------- + +.. py:module:: docker.api.daemon + +.. rst-class:: hide-signature +.. autoclass:: DaemonApiMixin + :members: + :undoc-members: + +Configuration types +------------------- + +.. py:module:: docker.types + +.. autoclass:: ConfigReference +.. autoclass:: ContainerSpec +.. autoclass:: DNSConfig +.. autoclass:: DriverConfig +.. autoclass:: EndpointSpec +.. autoclass:: Healthcheck +.. autoclass:: IPAMConfig +.. autoclass:: IPAMPool +.. autoclass:: LogConfig +.. autoclass:: Mount +.. autoclass:: NetworkAttachmentConfig +.. autoclass:: Placement +.. autoclass:: PlacementPreference +.. autoclass:: Privileges +.. autoclass:: Resources +.. autoclass:: RestartPolicy +.. autoclass:: RollbackConfig +.. autoclass:: SecretReference +.. autoclass:: ServiceMode +.. autoclass:: SwarmExternalCA +.. autoclass:: SwarmSpec(*args, **kwargs) +.. autoclass:: TaskTemplate +.. autoclass:: Ulimit +.. autoclass:: UpdateConfig diff --git a/docs/boot2docker.md b/docs/boot2docker.md deleted file mode 100644 index 43aa558d21..0000000000 --- a/docs/boot2docker.md +++ /dev/null @@ -1,38 +0,0 @@ -# Using with Boot2docker - -For usage with boot2docker, there is a helper function in the utils package named `kwargs_from_env`, it will pass any environment variables from Boot2docker to the Client. - -First run boot2docker in your shell: -```bash -$ eval "$(boot2docker shellinit)" -Writing /Users/you/.boot2docker/certs/boot2docker-vm/ca.pem -Writing /Users/you/.boot2docker/certs/boot2docker-vm/cert.pem -Writing /Users/you/.boot2docker/certs/boot2docker-vm/key.pem -``` - -You can then instantiate `docker.Client` like this: -```python -from docker.client import Client -from docker.utils import kwargs_from_env - -client = Client(**kwargs_from_env()) -print client.version() -``` - -If you're encountering the following error: -`SSLError: hostname '192.168.59.103' doesn't match 'boot2docker'`, you can: - -1. Add an entry to your /etc/hosts file matching boot2docker to the daemon's IP -1. disable hostname validation (but please consider the security implications - in doing this) - -```python -from docker.client import Client -from docker.utils import kwargs_from_env - -kwargs = kwargs_from_env() -kwargs['tls'].assert_hostname = False - -client = Client(**kwargs) -print client.version() -``` \ No newline at end of file diff --git a/docs/change-log.md b/docs/change-log.md new file mode 100644 index 0000000000..ebbdb71301 --- /dev/null +++ b/docs/change-log.md @@ -0,0 +1,2242 @@ +Changelog +========== + +7.1.0 +----- +### Upgrade Notes +- Bumped minimum engine API version to 1.24 +- Bumped default engine API version to 1.44 (Moby 25.0) + +### Bugfixes +- Fixed issue with tag parsing when the registry address includes ports that resulted in `invalid tag format` errors +- Fixed issue preventing creating new configs (`ConfigCollection`), which failed with a `KeyError` due to the `name` field +- Fixed an issue due to an update in the [requests](https://github.com/psf/requests) package breaking `docker-py` by applying the [suggested fix](https://github.com/psf/requests/pull/6710) + +### Miscellaneous +- Documentation improvements +- Updated Ruff (linter) and fixed minor linting issues +- Packaging/CI updates + - Started using hatch for packaging (https://github.com/pypa/hatch) + - Updated `setup-python` github action +- Updated tests + - Stopped checking for deprecated container and image related fields (`Container` and `ContainerConfig`) + - Updated tests that check `NetworkSettings.Networks..Aliases` due to engine changes + +7.0.0 +----- +### Upgrade Notes +- Removed SSL version (`ssl_version`) and explicit hostname check (`assert_hostname`) options + - `assert_hostname` has not been used since Python 3.6 and was removed in 3.12 + - Python 3.7+ supports TLSv1.3 by default +- Websocket support is no longer included by default + - Use `pip install docker[websockets]` to include `websocket-client` dependency + - By default, `docker-py` hijacks the TCP connection and does not use Websockets + - Websocket client is only required to use `attach_socket(container, ws=True)` +- Python 3.7 no longer officially supported (reached end-of-life June 2023) + +### Features +- Python 3.12 support +- Full `networking_config` support for `containers.create()` + - Replaces `network_driver_opt` (added in 6.1.0) +- Add `health()` property to container that returns status (e.g. `unhealthy`) +- Add `pause` option to `container.commit()` +- Add support for bind mount propagation (e.g. `rshared`, `private`) +- Add `filters`, `keep_storage`, and `all` parameters to `prune_builds()` (requires API v1.39+) + +### Bugfixes +- Consistently return `docker.errors.NotFound` on 404 responses +- Validate tag format before image push + +### Miscellaneous +- Upgraded urllib3 version in `requirements.txt` (used for development/tests) +- Documentation typo fixes & formatting improvements +- Fixed integration test compatibility for newer Moby engine versions +- Switch to [ruff](https://github.com/astral-sh/ruff) for linting + +6.1.3 +----- +#### Bugfixes +- Fix compatibility with [`eventlet/eventlet`](https://github.com/eventlet/eventlet) + +6.1.2 +----- + +#### Bugfixes +- Fix for socket timeouts on long `docker exec` calls + +6.1.1 +----- + +#### Bugfixes +- Fix `containers.stats()` hanging with `stream=True` +- Correct return type in docs for `containers.diff()` method + + +6.1.0 +----- + +### Upgrade Notes +- Errors are no longer returned during client initialization if the credential helper cannot be found. A warning will be emitted instead, and an error is returned if the credential helper is used. + +### Features +- Python 3.11 support +- Use `poll()` instead of `select()` on non-Windows platforms +- New API fields + - `network_driver_opt` on container run / create + - `one-shot` on container stats + - `status` on services list + +### Bugfixes +- Support for requests 2.29.0+ and urllib3 2.x +- Do not strip characters from volume names +- Fix connection leak on container.exec_* operations +- Fix errors closing named pipes on Windows + +6.0.1 +----- + +### Bugfixes +- Fix for `The pipe has been ended errors` on Windows +- Support floats for container log filtering by timestamp (`since` / `until`) + +6.0.0 +----- + +### Upgrade Notes +- Minimum supported Python version is 3.7+ +- When installing with pip, the `docker[tls]` extra is deprecated and a no-op, + use `docker` for same functionality (TLS support is always available now) +- Native Python SSH client (used by default / `use_ssh_client=False`) will now + reject unknown host keys with `paramiko.ssh_exception.SSHException` +- Short IDs are now 12 characters instead of 10 characters (same as Docker CLI) + +### Features +- Python 3.10 support +- Automatically negotiate most secure TLS version +- Add `platform` (e.g. `linux/amd64`, `darwin/arm64`) to container create & run +- Add support for `GlobalJob` and `ReplicatedJobs` for Swarm +- Add `remove()` method on `Image` +- Add `force` param to `disable()` on `Plugin` + +### Bugfixes +- Fix install issues on Windows related to `pywin32` +- Do not accept unknown SSH host keys in native Python SSH mode +- Use 12 character short IDs for consistency with Docker CLI +- Ignore trailing whitespace in `.dockerignore` files +- Fix IPv6 host parsing when explicit port specified +- Fix `ProxyCommand` option for SSH connections +- Do not spawn extra subshell when launching external SSH client +- Improve exception semantics to preserve context +- Documentation improvements (formatting, examples, typos, missing params) + +### Miscellaneous +- Upgrade dependencies in `requirements.txt` to latest versions +- Remove extraneous transitive dependencies +- Eliminate usages of deprecated functions/methods +- Test suite reliability improvements +- GitHub Actions workflows for linting, unit tests, integration tests, and + publishing releases + +5.0.3 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/76?closed=1) + +### Features +- Add `cap_add` and `cap_drop` parameters to service create and ContainerSpec +- Add `templating` parameter to config create + +### Bugfixes +- Fix getting a read timeout for logs/attach with a tty and slow output + +### Miscellaneous +- Fix documentation examples + +5.0.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/75?closed=1) + +### Bugfixes +- Fix `disable_buffering` regression + +5.0.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/74?closed=1) + +### Bugfixes +- Bring back support for ssh identity file +- Cleanup remaining python-2 dependencies +- Fix image save example in docs + +### Miscellaneous +- Bump urllib3 to 1.26.5 +- Bump requests to 2.26.0 + +5.0.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/70?closed=1) + +### Breaking changes +- Remove support for Python 2.7 +- Make Python 3.6 the minimum version supported + +### Features +- Add `limit` parameter to image search endpoint + +### Bugfixes +- Fix `KeyError` exception on secret create +- Verify TLS keys loaded from docker contexts +- Update PORT_SPEC regex to allow square brackets for IPv6 addresses +- Fix containers and images documentation examples + +4.4.4 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/73?closed=1) + +### Bugfixes +- Remove `LD_LIBRARY_PATH` and `SSL_CERT_FILE` environment variables when shelling out to the ssh client + +4.4.3 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/72?closed=1) + +### Features +- Add support for docker.types.Placement.MaxReplicas + +### Bugfixes +- Fix SSH port parsing when shelling out to the ssh client + +4.4.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/71?closed=1) + +### Bugfixes +- Fix SSH connection bug where the hostname was incorrectly trimmed and the error was hidden +- Fix docs example + +### Miscellaneous +- Add Python3.8 and 3.9 in setup.py classifier list + +4.4.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/69?closed=1) + +### Bugfixes +- Avoid setting unsuported parameter for subprocess.Popen on Windows +- Replace use of deprecated "filter" argument on ""docker/api/image" + +4.4.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/67?closed=1) + +### Features +- Add an alternative SSH connection to the paramiko one, based on shelling out to the SSh client. Similar to the behaviour of Docker cli +- Default image tag to `latest` on `pull` + +### Bugfixes +- Fix plugin model upgrade +- Fix examples URL in ulimits + +### Miscellaneous +- Improve exception messages for server and client errors +- Bump cryptography from 2.3 to 3.2 + +4.3.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/68?closed=1) + +### Miscellaneous +- Set default API version to `auto` +- Fix conversion to bytes for `float` +- Support OpenSSH `identityfile` option + +4.3.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/64?closed=1) + +### Features +- Add `DeviceRequest` type to expose host resources such as GPUs +- Add support for `DriverOpts` in EndpointConfig +- Disable compression by default when using container.get_archive method + +### Miscellaneous +- Update default API version to v1.39 +- Update test engine version to 19.03.12 + +4.2.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/66?closed=1) + +### Bugfixes + +- Fix context load for non-docker endpoints + +4.2.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/65?closed=1) + +### Features + +- Add option on when to use `tls` on Context constructor +- Make context orchestrator field optional + +4.2.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/63?closed=1) + +### Bugfixes + +- Fix `win32pipe.WaitNamedPipe` throw exception in Windows containers +- Use `Hostname`, `Username`, `Port` and `ProxyCommand` settings from `.ssh/config` when on SSH +- Set host key policy for ssh transport to `paramiko.WarningPolicy()` +- Set logging level of `paramiko` to warn + +### Features + +- Add support for docker contexts through `docker.ContextAPI` + +4.1.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/61?closed=1) + +### Bugfixes + +- Correct `INDEX_URL` logic in build.py _set_auth_headers +- Fix for empty auth keys in config.json + +### Features + +- Add `NetworkAttachmentConfig` for service create/update + +### Miscellaneous + +- Bump pytest to 4.3.1 +- Adjust `--platform` tests for changes in docker engine +- Update credentials-helpers to v0.6.3 + +4.0.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/62?closed=1) + +### Bugfixes + +- Unified the way `HealthCheck` is created/configured + +### Miscellaneous + +- Bumped version of websocket-client + +4.0.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/60?closed=1) + +### Bugfixes + +- Fixed an obsolete import in the `credentials` subpackage that caused import errors in + Python 3.7 + +### Miscellaneous + +- Docs building has been repaired + +4.0.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/57?closed=1) + +### Breaking changes + +- Support for Python 3.3 and Python 3.4 has been dropped +- `APIClient.update_service`, `APIClient.init_swarm`, and + `DockerClient.swarm.init` now return a `dict` from the API's response body +- In `APIClient.build` and `DockerClient.images.build`, the `use_config_proxy` + parameter now defaults to True +- `init_path` is no longer a valid parameter for `HostConfig` + +### Features + +- It is now possible to provide `SCTP` ports for port mappings +- `ContainerSpec`s now support the `init` parameter +- `DockerClient.swarm.init` and `APIClient.init_swarm` now support the + `data_path_addr` parameter +- `APIClient.update_swarm` and `DockerClient.swarm.update` now support the + `rotate_manager_unlock_key` parameter +- `APIClient.update_service` returns the API's response body as a `dict` +- `APIClient.init_swarm`, and `DockerClient.swarm.init` now return the API's + response body as a `dict` + +### Bugfixes + +- Fixed `PlacementPreference` instances to produce a valid API type +- Fixed a bug where not setting a value for `buildargs` in `build` could cause + the library to attempt accessing attributes of a `None` value +- Fixed a bug where setting the `volume_driver` parameter in + `DockerClient.containers.create` would result in an error +- `APIClient.inspect_distribution` now correctly sets the authentication + headers on the request, allowing it to be used with private repositories + This change also applies to `DockerClient.get_registry_data` + +3.7.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/59?closed=1) + +### Bugfixes + +* Fix base_url to keep TCP protocol on utils.py by letting the responsibility of changing the +protocol to `parse_host` afterwards, letting `base_url` with the original value. +* XFAIL test_attach_stream_and_cancel on TLS + +3.7.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/58?closed=1) + +### Bugfixes + +* Set a different default number (which is now 9) for SSH pools +* Adds a BaseHTTPAdapter with a close method to ensure that the +pools is clean on close() +* Makes SSHHTTPAdapter reopen a closed connection when needed +like the others + +3.7.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/56?closed=1) + +### Features + +* Added support for multiplexed streams (for `attach` and `exec_start`). Learn + more at https://docker-py.readthedocs.io/en/stable/user_guides/multiplex.html +* Added the `use_config_proxy` parameter to the following methods: + `APIClient.build`, `APIClient.create_container`, `DockerClient.images.build` + and `DockerClient.containers.run` (`False` by default). **This parameter** + **will become `True` by default in the 4.0.0 release.** +* Placement preferences for Swarm services are better validated on the client + and documentation has been updated accordingly + +### Bugfixes + +* Fixed a bug where credential stores weren't queried for relevant registry + credentials with certain variations of the `config.json` file. +* `DockerClient.swarm.init` now returns a boolean value as advertised. + +3.6.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone=55?closed=1) + +### Features + +* Added support for connecting to the Docker Engine over SSH. Additional + dependencies for this feature can be installed with + `pip install "docker[ssh]"` +* Added support for the `named` parameter in `Image.save`, which may be + used to ensure the resulting tarball retains the image's name on save. + +### Bugfixes + +* Fixed a bug where builds on Windows with a context path using the `\\?\` + prefix would fail with some relative Dockerfile paths. +* Fixed an issue where pulls made with the `DockerClient` would fail when + setting the `stream` parameter to `True`. + +### Miscellaneous + +* The minimum requirement for the `requests` dependency has been bumped + to 2.20.0 + +3.5.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/54?closed=1) + +### Miscellaneous + +* Bumped version of `pyOpenSSL` in `requirements.txt` and `setup.py` to prevent + installation of a vulnerable version + +* Docs fixes + +3.5.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/53?closed=1) + +### Deprecation warning + +* Support for Python 3.3 will be dropped in the 4.0.0 release + +### Features + +* Updated dependencies to ensure support for Python 3.7 environments +* Added support for the `uts_mode` parameter in `HostConfig` +* The `UpdateConfig` constructor now allows `rollback` as a valid + value for `failure_action` +* Added support for `rollback_config` in `APIClient.create_service`, + `APIClient.update_service`, `DockerClient.services.create` and + `Service.update`. + +### Bugfixes + +* Credential helpers are now properly leveraged by the `build` method +* Fixed a bug that caused placement preferences to be ignored when provided + to `DockerClient.services.create` +* Fixed a bug that caused a `user` value of `0` to be ignored in + `APIClient.create_container` and `DockerClient.containers.create` + +3.4.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/52?closed=1) + +### Bugfixes + +* Fixed a bug that caused auth values in config files written using one of the + legacy formats to be ignored +* Fixed issues with handling of double-wildcard `**` patterns in + `.dockerignore` files + +3.4.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/51?closed=1) + +### Features + +* The `APIClient` and `DockerClient` constructors now accept a `credstore_env` + parameter. When set, values in this dictionary are added to the environment + when executing the credential store process. + +### Bugfixes + +* `DockerClient.networks.prune` now properly returns the operation's result +* Fixed a bug that caused custom Dockerfile paths in a subfolder of the build + context to be invalidated, preventing these builds from working +* The `plugin_privileges` method can now be called for plugins requiring + authentication to access +* Fixed a bug that caused attempts to read a data stream over an unsecured TCP + socket to crash on Windows clients +* Fixed a bug where using the `read_only` parameter when creating a service using + the `DockerClient` was being ignored +* Fixed an issue where `Service.scale` would not properly update the service's + mode, causing the operation to fail silently + +3.3.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/49?closed=1) + +### Features + +* Added support for `prune_builds` in `APIClient` and `DockerClient.images` +* Added support for `ignore_removed` parameter in + `DockerClient.containers.list` + +### Bugfixes + +* Fixed an issue that caused builds to fail when an in-context Dockerfile + would be specified using its absolute path +* Installation with pip 10.0.0 and above no longer fails +* Connection timeout for `stop` and `restart` now gets properly adjusted to + allow for the operation to finish in the specified time +* Improved docker credential store support on Windows + +3.2.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/50?closed=1) + +### Bugfixes + +* Fixed a bug with builds not properly identifying Dockerfile paths relative + to the build context +* Fixed an issue where builds would raise a `ValueError` when attempting to + build with a Dockerfile on a different Windows drive. + +3.2.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/45?closed=1) + +### Features + +* Generators returned by `attach()`, `logs()` and `events()` now have a + `cancel()` method to let consumers stop the iteration client-side. +* `build()` methods can now handle Dockerfiles supplied outside of the + build context. +* Added `sparse` argument to `DockerClient.containers.list()` +* Added `isolation` parameter to `build()` methods. +* Added `close()` method to `DockerClient` +* Added `APIClient.inspect_distribution()` method and + `DockerClient.images.get_registry_data()` + * The latter returns an instance of the new `RegistryData` class + +3.1.4 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/48?closed=1) + +### Bugfixes + +* Fixed a bug where build contexts containing directory symlinks would produce + invalid tar archives + +3.1.3 +----- + +### Bugfixes + +* Regenerated invalid wheel package + +3.1.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/47?closed=1) + +### Bugfixes + +* Fixed a bug that led to a Dockerfile not being included in the build context + in some situations when the Dockerfile's path was prefixed with `./` + +3.1.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/46?closed=1) + +### Bugfixes + +* Fixed a bug that caused costly DNS lookups on Mac OSX when connecting to the + engine through UNIX socket +* Fixed a bug that caused `.dockerignore` comments to be read as exclusion + patterns + +3.1.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/44?closed=1) + +### Features + +* Added support for `device_cgroup_rules` in host config +* Added support for `generic_resources` when creating a `Resources` + object. +* Added support for a configurable `chunk_size` parameter in `export`, + `get_archive` and `get_image` (`Image.save`) +* Added a `force_update` method to the `Service` class. +* In `Service.update`, when the `force_update` parameter is set to `True`, + the current `force_update` counter is incremented by one in the update + request. + +### Bugfixes + +* Fixed a bug where authentication through `login()` was being ignored if the + SDK was configured to use a credential store. +* Fixed a bug where download methods would use an absurdly small chunk size, + leading to slow data retrieval +* Fixed a bug where using `DockerClient.images.pull` to pull an image by digest + would lead to an exception being raised. +* `.dockerignore` rules should now be respected as defined by the spec, + including respect for last-line precedence and proper handling of absolute + paths +* The `pass` credential store is now properly supported. + +3.0.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/43?closed=1) + +### Bugfixes + +* Fixed a bug where `APIClient.login` didn't populate the `_auth_configs` + dictionary properly, causing subsequent `pull` and `push` operations to fail +* Fixed a bug where some build context files were incorrectly recognized as + being inaccessible. +* Fixed a bug where files with a negative mtime value would + cause errors when included in a build context + +3.0.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/39?closed=1) + +### Breaking changes + +* Support for API version < 1.21 has been removed. +* The following methods have been removed: + * `APIClient.copy` has been removed. Users should use `APIClient.get_archive` + instead. + * `APIClient.insert` has been removed. Users may use `APIClient.put_archive` + combined with `APIClient.commit` to replicate the method's behavior. + * `utils.ping_registry` and `utils.ping` have been removed. +* The following parameters have been removed: + * `stream` in `APIClient.build` + * `cpu_shares`, `cpuset`, `dns`, `mem_limit`, `memswap_limit`, + `volume_driver`, `volumes_from` in `APIClient.create_container`. These are + all replaced by their equivalent in `create_host_config` + * `insecure_registry` in `APIClient.login`, `APIClient.pull`, + `APIClient.push`, `DockerClient.images.push` and `DockerClient.images.pull` + * `viz` in `APIClient.images` +* The following parameters have been renamed: + * `endpoint_config` in `APIClient.create_service` and + `APIClient.update_service` is now `endpoint_spec` + * `name` in `DockerClient.images.pull` is now `repository` +* The return value for the following methods has changed: + * `APIClient.wait` and `Container.wait` now return a ``dict`` representing + the API's response instead of returning the status code directly. + * `DockerClient.images.load` now returns a list of `Image` objects that have + for the images that were loaded, instead of a log stream. + * `Container.exec_run` now returns a tuple of (exit_code, output) instead of + just the output. + * `DockerClient.images.build` now returns a tuple of (image, build_logs) + instead of just the image object. + * `APIClient.export`, `APIClient.get_archive` and `APIClient.get_image` now + return generators streaming the raw binary data from the server's response. + * When no tag is provided, `DockerClient.images.pull` now returns a list of + `Image`s associated to the pulled repository instead of just the `latest` + image. + +### Features + +* The Docker Python SDK is now officially supported on Python 3.6 +* Added `scale` method to the `Service` model ; this method is a shorthand + that calls `update_service` with the required number of replicas +* Added support for the `platform` parameter in `APIClient.build`, + `DockerClient.images.build`, `APIClient.pull` and `DockerClient.images.pull` +* Added support for the `until` parameter in `APIClient.logs` and + `Container.logs` +* Added support for the `workdir` argument in `APIClient.exec_create` and + `Container.exec_run` +* Added support for the `condition` argument in `APIClient.wait` and + `Container.wait` +* Users can now specify a publish mode for ports in `EndpointSpec` using + the `{published_port: (target_port, protocol, publish_mode)}` syntax. +* Added support for the `isolation` parameter in `ContainerSpec`, + `DockerClient.services.create` and `Service.update` +* `APIClient.attach_socket`, `APIClient.exec_create` now allow specifying a + `detach_keys` combination. If unspecified, the value from the `config.json` + file will be used +* TLS connections now default to using the TLSv1.2 protocol when available + + +### Bugfixes + +* Fixed a bug where whitespace-only lines in `.dockerignore` would break builds + on Windows +* Fixed a bug where broken symlinks inside a build context would cause the + build to fail +* Fixed a bug where specifying volumes with Windows drives would cause + incorrect parsing in `DockerClient.containers.run` +* Fixed a bug where the `networks` data provided to `create_service` and + `update_service` would be sent incorrectly to the Engine with API < 1.25 +* Pulling all tags from a repository with no `latest` tag using the + `DockerClient` will no longer raise a `NotFound` exception + +2.7.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/41?closed=1) + +### Features + +* Added `unlock_swarm` and `get_unlock_key` methods to the `APIClient`. + * Added `unlock` and `get_unlock_key` to `DockerClient.swarm`. +* Added a `greedy` parameter to `DockerClient.networks.list`, yielding + additional details about the listed networks. +* Added `cpu_rt_runtime` and `cpu_rt_period` as parameters to + `APIClient.create_host_config` and `DockerClient.containers.run`. +* Added the `order` argument to `UpdateConfig`. +* Added `fetch_current_spec` to `APIClient.update_service` and `Service.update` + that will retrieve the current configuration of the service and merge it with + the provided parameters to determine the new configuration. + +### Bugfixes + +* Fixed a bug where the `build` method tried to include inaccessible files + in the context, leading to obscure errors during the build phase + (inaccessible files inside the context now raise an `IOError` instead). +* Fixed a bug where the `build` method would try to read from FIFOs present + inside the build context, causing it to hang. +* `APIClient.stop` will no longer override the `stop_timeout` value present + in the container's configuration. +* Fixed a bug preventing removal of networks with names containing a space. +* Fixed a bug where `DockerClient.containers.run` would crash if the + `auto_remove` parameter was set to `True`. +* Changed the default value of `listen_addr` in `join_swarm` to match the + one in `init_swarm`. +* Fixed a bug where handling HTTP errors with no body would cause an unexpected + exception to be thrown while generating an `APIError` object. + +2.6.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/40?closed=1) + +### Bugfixes + +* Fixed a bug on Python 3 installations preventing the use of the `attach` and + `exec_run` methods. + + +2.6.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/38?closed=1) + +### Features + +* Added support for `mounts` in `APIClient.create_host_config` and + `DockerClient.containers.run` +* Added support for `consistency`, `tmpfs_size` and `tmpfs_mode` when + creating mount objects. +* `Mount` objects now support the `tmpfs` and `npipe` types. +* Added support for `extra_hosts` in the `build` methods. +* Added support for the configs API: + * In `APIClient`: `create_config`, `inspect_config`, `remove_config`, + `configs` + * In `DockerClient`: `configs.create`, `configs.get`, `configs.list` and + the `Config` model. + * Added `configs` parameter to `ContainerSpec`. Each item in the `configs` + list must be a `docker.types.ConfigReference` instance. +* Added support for the following parameters when creating a `ContainerSpec` + object: `groups`, `open_stdin`, `read_only`, `stop_signal`, `helathcheck`, + `hosts`, `ns_config`, `configs`, `privileges`. +* Added the following configuration classes to `docker.types`: + `ConfigReference`, `DNSConfig`, `Privileges`, `SwarmExternalCA`. +* Added support for `driver` in `APIClient.create_secret` and + `DockerClient.secrets.create`. +* Added support for `scope` in `APIClient.inspect_network` and + `APIClient.create_network`, and their `DockerClient` equivalent. +* Added support for the following parameters to `create_swarm_spec`: + `external_cas`, `labels`, `signing_ca_cert`, `signing_ca_key`, + `ca_force_rotate`, `autolock_managers`, `log_driver`. These additions + also apply to `DockerClient.swarm.init`. +* Added support for `insert_defaults` in `APIClient.inspect_service` and + `DockerClient.services.get`. + +### Bugfixes + +* Fixed a bug where reading a 0-length frame in log streams would incorrectly + interrupt streaming. +* Fixed a bug where the `id` member on `Swarm` objects wasn't being populated. +* Fixed a bug that would cause some data at the beginning of an upgraded + connection stream (`attach`, `exec_run`) to disappear. + +2.5.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/37?closed=1) + +### Bugfixes + +* Fixed a bug where patterns ending with `**` in `.dockerignore` would + raise an exception +* Fixed a bug where using `attach` with the `stream` argument set to `False` + would raise an exception + +2.5.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/34?closed=1) + +### Features + +* Added support for the `squash` parameter in `APIClient.build` and + `DockerClient.images.build`. +* When using API version 1.23 or above, `load_image` will now return a + generator of progress as JSON `dict`s. +* `remove_image` now returns the content of the API's response. + + +### Bugfixes + +* Fixed an issue where the `auto_remove` parameter in + `DockerClient.containers.run` was not taken into account. +* Fixed a bug where `.dockerignore` patterns starting with a slash + were ignored. +* Fixed an issue with the handling of `**` patterns in `.dockerignore` +* Fixed a bug where building `FROM` a private Docker Hub image when not + using a cred store would fail. +* Fixed a bug where calling `create_service` or `update_service` with + `task_template` as a `dict` would raise an exception. +* Fixed the handling of TTY-enabled containers in `attach` and `exec_run`. +* `DockerClient.containers.run` will no longer attempt to stream logs if the + log driver doesn't support the operation. + +### Miscellaneous + +* Added extra requirements for better TLS support on some platforms. + These can be installed or required through the `docker[tls]` notation. + +2.4.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/36?closed=1) + +### Bugfixes + +* Fixed a bug where the `split_port` utility would raise an exception when + passed a non-string argument. + +2.4.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/33?closed=1) + +### Features + +* Added support for the `target` and `network_mode` parameters in + `APIClient.build` and `DockerClient.images.build`. +* Added support for the `runtime` parameter in `APIClient.create_container` + and `DockerClient.containers.run`. +* Added support for the `ingress` parameter in `APIClient.create_network` and + `DockerClient.networks.create`. +* Added support for `placement` configuration in `docker.types.TaskTemplate`. +* Added support for `tty` configuration in `docker.types.ContainerSpec`. +* Added support for `start_period` configuration in `docker.types.Healthcheck`. +* The `credHelpers` section in Docker's configuration file is now recognized. +* Port specifications including IPv6 endpoints are now supported. + +### Bugfixes + +* Fixed a bug where instantiating a `DockerClient` using `docker.from_env` + wouldn't correctly set the default timeout value. +* Fixed a bug where `DockerClient.secrets` was not accessible as a property. +* Fixed a bug where `DockerClient.build` would sometimes return the wrong + image. +* Fixed a bug where values for `HostConfig.nano_cpus` exceeding 2^32 would + raise a type error. +* `Image.tag` now properly returns `True` when the operation is successful. +* `APIClient.logs` and `Container.logs` now raise an exception if the `since` + argument uses an unsupported type instead of ignoring the value. +* Fixed a bug where some methods would raise a `NullResource` exception when + the resource ID was provided using a keyword argument. + +### Miscellaneous + +* `APIClient` instances can now be pickled. + +2.3.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/31?closed=1) + +### Features + +* Added support for the following `HostConfig` parameters: `volume_driver`, + `cpu_count`, `cpu_percent`, `nano_cpus`, `cpuset_mems`. +* Added support for `verbose` parameter in `APIClient.inspect_network` and + `DockerClient.networks.get`. +* Added support for the `environment` parameter in `APIClient.exec_create` + and `Container.exec_run` +* Added `reload_config` method to `APIClient`, that lets the user reload + the `config.json` data from disk. +* Added `labels` property to the `Image` and `Container` classes. +* Added `image` property to the `Container` class. + +### Bugfixes + +* Fixed a bug where setting `replicas` to zero in `ServiceMode` would not + register as a valid entry. +* Fixed a bug where `DockerClient.images.build` would report a failure after + a successful build if a `tag` was set. +* Fixed an issue where `DockerClient.images.pull` would fail to return the + corresponding image object if a `tag` was set. +* Fixed a bug where a list of `mounts` provided to `APIClient.create_service` + would sometimes be parsed incorrectly. +* Fixed a bug where calling `Network.containers` would crash when no containers + were associated with the network. +* Fixed an issue where `Network.connect` and `Network.disconnect` would not + accept some of the documented parameters. +* Fixed a bug where the `cpuset_cpus` parameter would not be properly set in + `APIClient.create_host_config`. + +### Miscellaneous + +* The invalid `networks` argument in `DockerClient.containers.run` has been + replaced with a (working) singular `network` argument. + + +2.2.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/32?closed=1) + +### Bugfixes + +* Fixed a bug where the `status_code` attribute of `APIError` exceptions would + not reflect the expected value. +* Fixed an issue where the `events` method would time out unexpectedly if no + data was sent by the engine for a given amount of time. + + +2.2.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/30?closed=1) + +### Features + +* Default API version has been bumped to `1.26` (Engine 1.13.1+) +* Upgrade plugin: + * Added the `upgrade_plugin` method to the `APIClient` class + * Added the `upgrade` method to the `Plugin` class +* Service logs: + * Added the `service_logs` method to the `APIClient` class + * Added the `logs` method to the `Service` class +* Added the `df` method to `APIClient` and `DockerClient` +* Added support for `init` and `init_path` parameters in `HostConfig` + and `DockerClient.containers.run` +* Added support for `hostname` parameter in `ContainerSpec` and + `DockerClient.service.create` +* Added support for port range to single port in port mappings + (e.g. `8000-8010:80`) + +### Bugfixes + +* Fixed a bug where a missing container port in a port mapping would raise + an unexpected `TypeError` +* Fixed a bug where the `events` method in `APIClient` and `DockerClient` + would not respect custom headers set in `config.json` + + +2.1.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/27?closed=1) + +### Features + +* Added the following pruning methods: + * In `APIClient`: `prune_containers`, `prune_images`, `prune_networks`, + `prune_volumes` + * In `DockerClient`: `containers.prune`, `images.prune`, `networks.prune`, + `volumes.prune` +* Added support for the plugins API: + * In `APIClient`: `configure_plugin`, `create_plugin`, `disable_plugin`, + `enable_plugin`, `inspect_plugin`, `pull_plugin`, `plugins`, + `plugin_privileges`, `push_plugin`, `remove_plugin` + * In `DockerClient`: `plugins.create`, `plugins.get`, `plugins.install`, + `plugins.list`, and the `Plugin` model. +* Added support for the secrets API: + * In `APIClient`: `create_secret`, `inspect_secret`, `remove_secret`, + `secrets` + * In `DockerClient`: `secret.create`, `secret.get`, `secret.list` and + the `Secret` model. + * Added `secrets` parameter to `ContainerSpec`. Each item in the `secrets` + list must be a `docker.types.SecretReference` instance. +* Added support for `cache_from` in `APIClient.build` and + `DockerClient.images.build`. +* Added support for `auto_remove` and `storage_opt` in + `APIClient.create_host_config` and `DockerClient.containers.run` +* Added support for `stop_timeout` in `APIClient.create_container` and + `DockerClient.containers.run` +* Added support for the `force` parameter in `APIClient.remove_volume` and + `Volume.remove` +* Added support for `max_failure_ratio` and `monitor` in `UpdateConfig` +* Added support for `force_update` in `TaskTemplate` +* Made `name` parameter optional in `APIClient.create_volume` and + `DockerClient.volumes.create` + +### Bugfixes + +* Fixed a bug where building from a directory containing socket-type files + would raise an unexpected `AttributeError`. +* Fixed an issue that was preventing the `DockerClient.swarm.init` method to + take into account arguments passed to it. +* `Image.tag` now correctly returns a boolean value upon completion. +* Fixed several issues related to passing `volumes` in + `DockerClient.containers.run` +* Fixed an issue where `DockerClient.image.build` wouldn't return an `Image` + object even when the build was successful + + +2.0.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/29?closed=1) + +### Bugfixes + +* Installation of the package now fails if the `docker-py` package is + installed in order to prevent obscure naming conflicts when both + packages co-exist. +* Added missing `filters` parameter to `APIClient.networks`. +* Resource objects generated by the `DockerClient` are now hashable. +* Fixed a bug where retrieving untagged images using `DockerClient` + would raise a `TypeError` exception. +* `mode` parameter in `create_service` is now properly converted to + a valid data type for the Engine API. Use `ServiceMode` for advanced + configurations. +* Fixed a bug where the decoded `APIClient.events` stream would sometimes raise + an exception when a container is stopped or restarted. + +2.0.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/28?closed=1) + +### Bugfixes + +* Fixed a bug where forward slashes in some .dockerignore patterns weren't + being parsed correctly on Windows +* Fixed a bug where `Mount.parse_mount_string` would never set the read_only + parameter on the resulting `Mount`. +* Fixed a bug where `Mount.parse_mount_string` would incorrectly mark host + binds as being of `volume` type. + +2.0.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/22?closed=1) + +### Breaking changes + +* Dropped support for Python 2.6 +* `docker.Client` has been renamed to `docker.APIClient` +* `docker.from_env` now creates a `DockerClient` instance instead of an + `APIClient` instance. +* Removed HostConfig parameters from `APIClient.start` +* The minimum supported API version is now 1.21 (Engine version 1.9.0+) +* The name of the `pip` package is now `docker` (was: `docker-py`). New + versions of this library will only be published as `docker` from now on. +* `docker.ssladapter` is now `docker.transport.ssladapter` +* The package structure has been flattened in certain cases, which may affect + import for `docker.auth` and `docker.utils.ports` +* `docker.utils.types` has been moved to `docker.types` +* `create_host_config`, `create_ipam_pool` and `create_ipam_config` have been + removed from `docker.utils`. They have been replaced by the following classes + in `docker.types`: `HostConfig`, `IPAMPool` and `IPAMCOnfig`. + +### Features + +* Added a high-level, user-focused API as `docker.DockerClient`. See the + README and documentation for more information. +* Implemented `update_node` method in `APIClient`. +* Implemented `remove_node` method in `APIClient`. +* Added support for `restart_policy` in `update_container`. +* Added support for `labels` and `shmsize` in `build`. +* Added support for `attachable` in `create_network` +* Added support for `healthcheck` in `create_container`. +* Added support for `isolation` in `HostConfig`. +* Expanded support for `pid_mode` in `HostConfig` (now supports arbitrary + values for API version >= 1.24). +* Added support for `options` in `IPAMConfig` +* Added a `HealthCheck` class to `docker.types` to be used in + `create_container`. +* Added an `EndpointSpec` class to `docker.types` to be used in + `create_service` and `update_service`. + + +### Bugfixes + +* Fixed a bug where auth information would not be properly passed to the engine + during a `build` if the client used a credentials store. +* Fixed an issue with some exclusion patterns in `build`. +* Fixed an issue where context files were bundled with the wrong permissions + when calling `build` on Windows. +* Fixed an issue where auth info would not be retrieved from its default location + on Windows. +* Fixed an issue where lists of `networks` in `create_service` and + `update_service` wouldn't be properly converted for the engine. +* Fixed an issue where `endpoint_config` in `create_service` and + `update_service` would be ignored. +* `endpoint_config` in `create_service` and `update_service` has been + deprecated in favor of `endpoint_spec` +* Fixed a bug where `constraints` in a `TaskTemplate` object wouldn't be + properly converted for the engine. +* Fixed an issue where providing a dictionary for `env` in `ContainerSpec` + would provoke an `APIError` when sent to the engine. +* Fixed a bug where providing an `env_file` containing empty lines in + `create_container`would raise an exception. +* Fixed a bug where `detach` was being ignored by `exec_start`. + +### Documentation + +* Documentation for classes and methods is now included alongside the code as + docstrings. + +1.10.6 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/26?closed=1) + +### Bugfixes + +* Fixed an issue where setting a `NpipeSocket` instance to blocking mode would + put it in non-blocking mode and vice-versa. + + +1.10.5 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/25?closed=1) + +### Bugfixes + +* Fixed an issue where concurrent attempts to access to a named pipe by the + client would sometimes cause recoverable exceptions to be raised. + + +1.10.4 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/24?closed=1) + +### Bugfixes + +* Fixed an issue where `RestartPolicy.condition_types.ON_FAILURE` would yield + an invalid value. +* Fixed an issue where the SSL connection adapter would receive an invalid + argument. +* Fixed an issue that caused the Client to fail to reach API endpoints when + the provided `base_url` had a trailing slash. +* Fixed a bug where some `environment` values in `create_container` + containing unicode characters would raise an encoding error. +* Fixed a number of issues tied with named pipe transport on Windows. +* Fixed a bug where inclusion patterns in `.dockerignore` would cause some + excluded files to appear in the build context on Windows. + +### Miscellaneous + +* Adjusted version requirements for the `requests` library. +* It is now possible to run the docker-py test suite on Windows. + + +1.10.3 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.10.3+is%3Aclosed) + +### Bugfixes + +* Fixed an issue where identity tokens in configuration files weren't handled + by the library. + +### Miscellaneous + +* Increased the default number of connection pools from 10 to 25. This number + can now be configured using the `num_pools` parameter in the `Client` + constructor. + + +1.10.2 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.10.0+is%3Aclosed) + +### Bugfixes + +* Updated the docker-pycreds dependency as it was causing issues for some + users with dependency resolution in applications using docker-py. + + +1.10.1 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.10.0+is%3Aclosed) + +### Bugfixes + +* The docker.utils.types module was removed in favor of docker.types, but some + applications imported it explicitly. It has been re-added with an import + warning advising to use the new module path. + +1.10.0 +------ + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.10.0+is%3Aclosed) + +### Features + +* Added swarm mode and service management methods. See the documentation for + details. +* Added support for IPv6 Docker host addresses in the `Client` constructor. +* Added (read-only) support for the Docker credentials store. +* Added support for custom `auth_config` in `Client.push`. +* Added support for `labels` in `Client.create_volume`. +* Added support for `labels` and `enable_ipv6` in `Client.create_network`. +* Added support for `force` param in + `Client.disconnect_container_from_network`. +* Added support for `pids_limit`, `sysctls`, `userns_mode`, `cpuset_cpus`, + `cpu_shares`, `mem_reservation` and `kernel_memory` parameters in + `Client.create_host_config`. +* Added support for `link_local_ips` in `create_endpoint_config`. +* Added support for a `changes` parameter in `Client.import_image`. +* Added support for a `version` parameter in `Client.from_env`. + +### Bugfixes + +* Fixed a bug where `Client.build` would crash if the `config.json` file + contained a `HttpHeaders` entry. +* Fixed a bug where passing `decode=True` in some streaming methods would + crash when the daemon's response had an unexpected format. +* Fixed a bug where `environment` values with unicode characters weren't + handled properly in `create_container`. +* Fixed a bug where using the `npipe` protocol would sometimes break with + `ValueError: buffer size must be strictly positive`. + +### Miscellaneous + +* Fixed an issue where URL-quoting in docker-py was inconsistent with the + quoting done by the Docker CLI client. +* The client now sends TCP upgrade headers to hint potential proxies about + connection hijacking. +* The client now defaults to using the `npipe` protocol on Windows. + + +1.9.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.9.0+is%3Aclosed) + +### Features + +* Added **experimental** support for Windows named pipes (`npipe://` protocol). +* Added support for Block IO constraints in `Client.create_host_config`. This + includes parameters `blkio_weight`, `blkio_weight_device`, `device_read_bps`, + `device_write_bps`, `device_read_iops` and `device_write_iops`. +* Added support for the `internal` param in `Client.create_network`. +* Added support for `ipv4_address` and `ipv6_address` in utils function + `create_endpoint_config`. +* Added support for custom user agent setting in the `Client` constructor. + By default, docker-py now also declares itself in the `User-Agent` header. + +### Bugfixes + +* Fixed an issue where the HTTP timeout on streaming responses would sometimes + be set incorrectly. +* Fixed an issue where explicit relative paths in `.dockerignore` files were + not being recognized. + +1.8.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.8.1+is%3Aclosed) + +### Bugfixes + +* Fixed a bug where calling `login()` against the default registry would fail + with the 1.10.x engine +* Fixed a bug where values in environment files would be parsed incorrectly if + they contained an equal sign. +* Switched to a better supported backport of the `match_hostname` function, + fixing dependency issues in some environments. + + +1.8.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.8.0+is%3Aclosed) + +### Features + +* Added `Client.update_container` method (Update resource configs of a + container) +* Added support for gzipped context in `Client.build` +* Added ability to specify IP address when connecting a container to a + network +* Added `tmpfs` support to `Client.create_host_config` +* Added support for the `changes` param in `Client.commit` +* Added support for the `follow` param in `Client.logs` +* Added support for the `check_duplicate` param in `Client.create_network` +* Added support for the `decode` param in `Client.push` and `Client.pull` +* Added `docker.from_env` shortcut function. Instantiates a client with + `kwargs_from_env` +* `kwargs_from_env` now supports an optional `environment` parameter. + If present, values will be fetched from this dictionary instead of + `os.environ` + + +### Bugfixes + +* Fixed a bug where TLS verification would fail when using IP addresses + in the certificate's `subjectAltName` fields +* Fixed an issue where the default TLS version in TLSConfig would + break in some environments. `docker-py` now uses TLSv1 by default + This setting can be overridden using the `ssl_version` param in + `kwargs_from_env` or the `TLSConfig` constructor +* Fixed a bug where `tcp` hosts would fail to connect to TLS-enabled + endpoints +* Fixed a bug where loading a valid docker configuration file would fail +* Fixed a bug where some environment variables specified through + `create_container` would be improperly formatted +* Fixed a bug where using the unix socket connection would raise + an error in some edge-case situations + +### Miscellaneous + +* Default API version is now 1.22 (introduced in Docker 1.10.0) + + +1.7.2 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.7.2+is%3Aclosed) + +### Bugfixes + +* Fixed a bug where TLS verification was improperly executed when providing + a custom CA certificate. + +1.7.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.7.1+is%3Aclosed) + +### Features + +* Added support for `shm_size` in `Client.create_host_config` + +### Bugfixes + +* Fixed a bug where Dockerfile would sometimes be excluded from the build + context. +* Fixed a bug where a docker config file containing unknown keys would raise + an exception. +* Fixed an issue with SSL connections behaving improperly when pyOpenSSL + was installed in the same environment. +* Several TLS configuration improvements + + +1.7.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.7.0+is%3Aclosed) + +### Features + +* Added support for cusom IPAM configuration in `Client.create_network` +* Added input support to `Client.exec_create` +* Added support for `stop_signal` in `Client.create_host_config` +* Added support for custom HTTP headers in Docker config file. +* Added support for unspecified transfer protocol in `base_url` when TLS is + enabled. + + +### Bugfixes + +* Fixed a bug where the `filters` parameter in `Client.volumes` would not be + applied properly. +* Fixed a bug where memory limits would parse to incorrect values. +* Fixed a bug where the `devices` parameter in `Client.create_host_config` + would sometimes be misinterpreted. +* Fixed a bug where instantiating a `Client` object would sometimes crash if + `base_url` was unspecified. +* Fixed a bug where an error message related to TLS configuration would link + to a non-existent (outdated) docs page. + + +### Miscellaneous + +* Processing of `.dockerignore` has been made significantly faster. +* Dropped explicit support for Python 3.2 + +1.6.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.6.0+is%3Aclosed) + +### Features + +* Added support for the `since` param in `Client.logs` (introduced in API + version 1.19) +* Added support for the `DOCKER_CONFIG` environment variable when looking up + auth config +* Added support for the `stream` param in `Client.stats` (when set to `False`, + allows user to retrieve a single snapshot instead of a constant data stream) +* Added support for the `mem_swappiness`, `oom_kill_disable` params + in `Client.create_host_config` +* Added support for build arguments in `Client.build` through the `buildargs` + param. + + +### Bugfixes + +* Fixed a bug where streaming data over HTTPS would sometimes behave + incorrectly with Python 3.x +* Fixed a bug where commands containing unicode characters would be incorrectly + handled by `Client.create_container`. +* Fixed a bug where auth config credentials containing unicode characters would + cause failures when pushing / pulling images. +* Setting `tail=0` in `Client.logs` no longer shows past logs. +* Fixed a bug where `Client.pull` and `Client.push` couldn't handle image names + containing a dot. + + +### Miscellaneous + +* Default API version is now 1.21 (introduced in Docker 1.9.0) +* Several test improvements and cleanup that should make the suite easier to + expand and maintain moving forward. + + +1.5.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.5.0+is%3Aclosed) + +### Features + +* Added support for the networking API introduced in Docker 1.9.0 + (`Client.networks`, `Client.create_network`, `Client.remove_network`, + `Client.inspect_network`, `Client.connect_container_to_network`, + `Client.disconnect_container_from_network`). +* Added support for the volumes API introduced in Docker 1.9.0 + (`Client.volumes`, `Client.create_volume`, `Client.inspect_volume`, + `Client.remove_volume`). +* Added support for the `group_add` parameter in `create_host_config`. +* Added support for the CPU CFS (`cpu_quota` and `cpu_period`) parameters + in `create_host_config`. +* Added support for the archive API endpoint (`Client.get_archive`, + `Client.put_archive`). +* Added support for `ps_args` parameter in `Client.top`. + + +### Bugfixes + +* Fixed a bug where specifying volume binds with unicode characters would + fail. +* Fixed a bug where providing an explicit protocol in `Client.port` would fail + to yield the expected result. +* Fixed a bug where the priority protocol returned by `Client.port` would be UDP + instead of the expected TCP. + +### Miscellaneous + +* Broke up Client code into several files to facilitate maintenance and + contribution. +* Added contributing guidelines to the repository. + +1.4.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.4.0+is%3Aclosed) + +### Deprecation warning + +* `docker.utils.create_host_config` is deprecated in favor of + `Client.create_host_config`. + +### Features + +* Added `utils.parse_env_file` to support env-files. + See [docs](https://docker-py.readthedocs.io/en/latest/api/#create_container) + for usage. +* Added support for arbitrary log drivers +* Added support for URL paths in the docker host URL (`base_url`) +* Drastically improved support for .dockerignore syntax + +### Bugfixes + +* Fixed a bug where exec_inspect would allow invocation when the API version + was too low. +* Fixed a bug where `docker.utils.ports.split_port` would break if an open + range was provided. +* Fixed a bug where invalid image IDs / container IDs could be provided to + bypass or reroute request URLs +* Default `base_url` now adapts depending on the OS (better Windows support) +* Fixed a bug where using an integer as the user param in + `Client.create_container` would result in a failure. + +### Miscellaneous + +* Docs fixes +* Integration tests are now run as part of our continuous integration. +* Updated dependency on `six` library + +1.3.1 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.3.1+is%3Aclosed) + +### Bugfixes + +* Fixed a bug where empty chunks in streams was misinterpreted as EOF. +* `datetime` arguments passed to `Client.events` parameters `since` and + `until` are now always considered to be UTC. +* Fixed a bug with Docker 1.7.x where the wrong auth headers were being passed + in `Client.build`, failing builds that depended on private images. +* `Client.exec_create` can now retrieve the `Id` key from a dictionary for its + container param. + +### Miscellaneous + +* 404 API status now raises `docker.errors.NotFound`. This exception inherits + `APIError` which was used previously. +* Docs fixes +* Test fixes + +1.3.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.3.0+is%3Aclosed) + +### Deprecation warning + +* As announced in the 1.2.0 release, `Client.execute` has been removed in favor + of `Client.exec_create` and `Client.exec_start`. + +### Features + +* `extra_hosts` parameter in host config can now also be provided as a list. +* Added support for `memory_limit` and `memswap_limit` in host config to + comply with recent deprecations. +* Added support for `volume_driver` in `Client.create_container` +* Added support for advanced modes in volume binds (using the `mode` key) +* Added support for `decode` in `Client.build` (decodes JSON stream on the fly) +* docker-py will now look for login configuration under the new config path, + and fall back to the old `~/.dockercfg` path if not present. + +### Bugfixes + +* Configuration file lookup now also work on platforms that don't define a + `$HOME` environment variable. +* Fixed an issue where pinging a v2 private registry wasn't working properly, + preventing users from pushing and pulling. +* `pull` parameter in `Client.build` now defaults to `False`. Fixes a bug where + the default options would try to force a pull of non-remote images. +* Fixed a bug where getting logs from tty-enabled containers wasn't working + properly with more recent versions of Docker +* `Client.push` and `Client.pull` will now raise exceptions if the HTTP + status indicates an error. +* Fixed a bug with adapter lookup when using the Unix socket adapter + (this affected some weird edge cases, see issue #647 for details) +* Fixed a bug where providing `timeout=None` to `Client.stop` would result + in an exception despite the usecase being valid. +* Added `git@` to the list of valid prefixes for remote build paths. + +### Dependencies + +* The websocket-client dependency has been updated to a more recent version. + This new version also supports Python 3.x, making `attach_socket` available + on those versions as well. + +### Documentation + +* Various fixes + +1.2.3 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.2.3+is%3Aclosed) + +### Deprecation warning + +* Passing host config in the `Client.start` method is now deprecated. Please + use the `host_config` in `Client.create_container` instead. + +### Features + +* Added support for `privileged` param in `Client.exec_create` + (only available in API >= 1.19) +* Volume binds can now also be specified as a list of strings. + +### Bugfixes + +* Fixed a bug where the `read_only` param in host_config wasn't handled + properly. +* Fixed a bug in `Client.execute` (this method is still deprecated). +* The `cpuset` param in `Client.create_container` is also passed as + the `CpusetCpus` param (`Cpuset` deprecated in recent versions of the API) +* Fixed an issue with integration tests being run inside a container + (`make integration-test`) +* Fixed a bug where an empty string would be considered a valid container ID + or image ID. +* Fixed a bug in `Client.insert` + + +### Documentation + +* Various fixes + +1.2.2 +----- + +### Bugfixes + +* Fixed a bug where parameters passed to `Client.exec_resize` would be ignored (#576) +* Fixed a bug where auth config wouldn't be resolved properly in `Client.pull` (#577) + +1.2.1 +----- + +### Bugfixes + +* Fixed a bug where the check_resource decorator would break with some + argument-passing methods. (#573) + +1.2.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.2.0+is%3Aclosed) + +### Deprecation warning + +* `Client.execute` is being deprecated in favor of the more dev-friendly + `Client.exec_start` and `Client.exec_create`. **It will be removed in 1.3.0** + +### Features + +* Added `exec_create`, `exec_start`, `exec_inspect` and `exec_resize` to + client, accurately mirroring the + [Exec API](https://docs.docker.com/reference/api/docker_remote_api_v1.18/#exec-create) +* Added `auth_config` param to `Client.pull` (allows to use one-off credentials + for this pull request) +* Added support for `ipc_mode` in host config. +* Added support for the `log_config` param in host config. +* Added support for the `ulimit` param in host config. +* Added support for container resource limits in `Client.build`. +* When a resource identifier (image or container ID) is passed to a Client + method, we now check for `None` values to avoid crashing + (now raises `docker.errors.NullResource`) +* Added tools to parse port ranges inside the new `docker.utils.ports` package. +* Added a `version_info` attribute to the `docker` package. + +### Bugfixes + +* Fixed a bug in `Client.port` where absence of a certain key in the + container's JSON would raise an error (now just returns `None`) +* Fixed a bug with the `trunc` parameter in `Client.containers` having no + effect (moved functionality to the client) +* Several improvements have been made to the `Client.import_image` method. +* Fixed pushing / pulling to + [v2 registries](https://github.com/docker/distribution) +* Fixed a bug where passing a container dictionary to `Client.commit` + would fail + +### Miscellaneous + +* Default API version has been bumped to 1.18 (Docker Engine 1.6.0) +* Several testing coverage improvements +* Docs fixes and improvements + +1.1.0 +----- + +### Features + +* Added `dockerfile` param support to `Client.build` (mirrors + `docker build -f` behavior) +* Added the ability to specify `'auto'` as `version` in `Client.__init__`, + allowing the constructor to autodetect the daemon's API version. + +### Bugfixes + +* Fixed a bug where decoding a result stream using the `decode` parameter + would break when using Python 3.x +* Fixed a bug where some files in `.dockerignore` weren't being handled + properly +* Fixed `resolve_authconfig` issues by bringing it closer to Docker Engine's + behavior. This should fix all issues encountered with private registry auth +* Fixed an issue where passwords containing a colon weren't being handled + properly. +* Bumped `requests` version requirement, which should fix most of the SSL + issues encountered recently. + +### Miscellaneous + +* Several integration test improvements. +* Fixed some unclosed resources in unit tests. +* Several docs improvements. + +1.0.0 +----- + +### Features + +* Added new `Client.rename` method (`docker rename`) +* Added now `Client.stats` method (`docker stats`) +* Added `read_only` param support to `utils.create_host_config` and + `Client.start` (`docker run --read-only`) +* Added `pid_mode` param support to `utils.create_host_config` and + `Client.start` (`docker run --pid='host'`) +* Added `since`, `until` and `filters` params to `Client.events`. +* Added `decode` parameter to `Client.stats` and `Client.events` to decode + JSON objects on the fly (False by default). + +### Bugfixes + +* Fixed a bug that caused `Client.build` to crash when the provided source was + a remote source. + +### Miscellaneous + +* Default API version has been bumped to 1.17 (Docker Engine 1.5.0) +* `Client.timeout` is now a public attribute, and users are encouraged to use it + when request timeouts need to be changed at runtime. +* Added `Client.api_version` as a read-only property. +* The `memswap_limit` argument in `Client.create_container` now accepts string + type values similar to `mem_limit` ('6g', '120000k', etc.) +* Improved documentation + +0.7.2 +----- + +### Features + +* Added support for `mac_address` in `Client.create_container` + +### Bugfixes + +* Fixed a bug where streaming responses (`pull`, `push`, `logs`, etc.) were + unreliable (#300) +* Fixed a bug where resolve_authconfig wouldn't properly resolve configuration + for private repositories (#468) +* Fixed a bug where some errors wouldn't be properly constructed in + `client.py`, leading to unhelpful exceptions bubbling up (#466) +* Fixed a bug where `Client.build` would try to close context when externally + provided (`custom_context == True`) (#458) +* Fixed an issue in `create_host_config` where empty sequences wouldn't be + interpreted properly (#462) + +### Miscellaneous + +* Added `resolve_authconfig` tests. + +0.7.1 +----- + +### Bugfixes + +* `setup.py` now indicates a maximum version of requests to work around the + boot2docker / `assert_hostname` bug. +* Removed invalid exception when using the Registry Hub's FQDN when pulling. +* Fixed an issue where early HTTP errors weren't handled properly in streaming + responses. +* Fixed a bug where sockets would close unexpectedly using Python 3.x +* Various fixes for integration tests. + +### Miscellaneous + +* Small doc fixes + +0.7.0 +----- + +### Breaking changes + +* Passing `dns` or `volumes_from` in `Client.start` with API version < 1.10 + will now raise an exception (previously only triggered a warning) + +### Features + +* Added support for `host_config` in `Client.create_container` +* Added utility method `docker.utils.create_host_config` to help build a + proper `HostConfig` dictionary. +* Added support for the `pull` parameter in `Client.build` +* Added support for the `forcerm` parameter in `Client.build` +* Added support for `extra_hosts` in `Client.start` +* Added support for a custom `timeout` in `Client.wait` +* Added support for custom `.dockercfg` loading in `Client.login` + (`dockercfg_path` argument) + +### Bugfixes + +* Fixed a bug where some output wouldn't be streamed properly in streaming + chunked responses +* Fixed a bug where the `devices` param didn't recognize the proper delimiter +* `Client.login` now properly expands the `registry` URL if provided. +* Fixed a bug where unicode characters in passed for `environment` in + `create_container` would break. + +### Miscellaneous + +* Several unit tests and integration tests improvements. +* `Client` constructor now enforces passing the `version` parameter as a + string. +* Build context files are now ordered by filename when creating the archive + (for consistency with docker mainline behavior) + +0.6.0 +----- +* **This version introduces breaking changes!** + +### Breaking changes + +* The default SSL protocol is now the highest TLS v1.x (was SSL v2.3 before) + (Poodle fix) +* The `history` command now returns a dict instead of a raw JSON string. + +### Features + +* Added the `execute` command. +* Added `pause` and `unpause` commands. +* Added support fo the `cpuset` param in `create_container` +* Added support for host devices (`devices` param in `start`) +* Added support for the `tail` param in `logs`. +* Added support for the `filters` param in `images` and `containers` +* The `kwargs_from_env` method is now available in the `docker.utils` + module. This should make it easier for boot2docker user to connect + to their daemon. + +### Bugfixes + +* Fixed a bug where empty directories weren't correctly included when + providing a context to `Client.build`. +* Fixed a bug where UNIX socket connections weren't properly cleaned up, + causing `ResourceWarning`s to appear in some cases. +* Fixed a bug where docker-py would crash if the docker daemon was stopped + while reading a streaming response +* Fixed a bug with streaming responses in Python 3 +* `remove_image` now supports a dict containing an `Id` key as its `id` + parameter (similar to other methods requiring a resource ID) + +### Documentation + +* Added new MkDocs documentation. Currently hosted on + [ReadTheDocs](https://docker-py.readthedocs.io/en/latest/) + +### Miscellaneous + +* Added tests to sdist +* Added a Makefile for running tests in Docker +* Updated Dockerfile + +0.5.3 +----- + +* Fixed attaching when connecting to the daemon over a UNIX socket. + +0.5.2 +----- + +* Fixed a bug where sockets were closed immediately when attaching over + TLS. + +0.5.1 +----- + +* Added a `assert_hostname` option to `TLSConfig` which can be used to + disable verification of hostnames. +* Fixed SSL not working due to an incorrect version comparison +* Fixed streams not working on Windows + +0.5.0 +----- + +* **This version introduces breaking changes!** +* Added `insecure_registry` parameter in `Client.push` and `Client.pull`. + *It defaults to False and code pushing to non-HTTPS private registries + might break as a result.* +* Added support for adding and dropping capabilities +* Added support for restart policy +* Added support for string values in `Client.create_container`'s `mem_limit` +* Added support for `.dockerignore` file in `Client.build` + +### Bugfixes + +* Fixed timeout behavior in `Client.stop` + +### Miscellaneous + +* `Client.create_container` provides better validation of the `volumes` + parameter +* Improved integration tests + +0.4.0 +----- + +* **This version introduces breaking changes!** +* The `base_url` parameter in the `Client` constructor should now allow most + of the `DOCKER_HOST` environment values (except for the fd:// protocol) + * As a result, URLs that don't specify a port are now invalid (similar + to the official client's behavior) +* Added TLS support (see [documentation](https://github.com/dotcloud/docker-py#connection-to-daemon-using-https)) + +### Bugfixes + +* Fixed an issue with `Client.build` streamed logs in Python 3 + +### Miscellaneous + +* Added unit tests coverage +* Various integration tests fixes + +0.3.2 +----- + +* Default API version is now 1.12 (support for docker 1.0) +* Added new methods `Client.get_image` and `Client.load_image` + (`docker save` and `docker load`) +* Added new method `Client.ping` +* Added new method `Client.resize` +* `Client.build` can now be provided with a custom context using the + `custom_context` parameter. +* Added support for `memswap_limit` parameter in `create_container` +* Added support for `force` parameter in `remove_container` +* Added support for `force` and `noprune` parameters in `remove_image` +* Added support for `timestamps` parameter in `logs` +* Added support for `dns_search` parameter in `start` +* Added support for `network_mode` parameter in `start` +* Added support for `size` parameter in `containers` +* Added support for `volumes_from` and `dns` parameters in `start`. As of + API version >= 1.10, these parameters no longer belong to `create_container` +* `Client.logs` now uses the logs endpoint when API version is sufficient + +### Bugfixes + +* Fixed a bug in pull where the `repo:tag` notation wasn't interpreted + properly +* Fixed a bug in streaming methods with python 3 (unicode, bytes/str related) +* Fixed a bug in `Client.start` where legacy notation for volumes wasn't + supported anymore. + +### Miscellaneous + +* The client now raises `DockerException`s when appropriate. You can import + `DockerException` (and its subclasses) from the `docker.errors` module to + catch them if needed. +* `docker.APIError` has been moved to the new `docker.errors` module as well. +* `Client.insert` is deprecated in API version > 1.11 +* Improved integration tests should now run much faster. +* There is now a single source of truth for the docker-py version number. + +0.3.1 +----- + +* Default API version is now 1.9 +* Streaming responses no longer yield blank lines. +* `Client.create_container` now supports the `domainname` parameter. +* `volumes_from` parameter in `Client.create_container` now supports + iterables. +* Auth credentials are provided to the docker daemon when using `Client.build` + (new feature in API version 1.9) + + +### Bugfixes + +* Various fixes for response streams (`logs`, `pull`, etc.). +* Fixed a bug with `Client.push` when using API version < 1.5 +* Fixed a bug with API version checks. + +### Miscellaneous + +* `mock` has been removed from the runtime requirements. +* Added installation instructions in the README. + +0.3.0 +----- + +* **This version introduces breaking changes!** +* Support for API version 1.7 through 1.9 (Docker 0.8.0+) +* Default API version is now 1.8 +* The client has been updated to support Requests 2.x. `requests==2.2.1` + is now the recommended version. +* Links can now be specified as tuples in `Client.start` (see docs for + more information) +* Added support for various options in `Client.create_container` + (`network_disabled`, `cpu_shares`, `working_dir` and `entrypoint`) +* `Client.attach` has been reworked to work similarly to `Client.logs` + minus the historical data. +* Logs can now be streamed using the `stream` parameter. +* Added support for `tcp://` URLs as client `base_url`. +* Various auth improvements. +* Added support for custom `Client.build` timeout. + + +### Bugfixes + +* Fixed a bug where determining the protocol of a private registry + would sometimes yield the wrong result. +* Fixed a bug where `Client.copy` wouldn't accept a dict as argument. +* Fixed several streaming bugs. +* Removed unused parameter in `Client.import_image`. +* The client's `base_url` now tolerates trailing slashes. + +#### Miscellaneous + +* Updated integration tests +* Small doc fixes + +0.2.3 +----- + +* Support for API version 1.6 +* Added support for links +* Added support for global request timeout +* Added `signal` parameter in `Client.kill` +* Added support for `publish_all_ports` in `Client.start` +* `Client.pull`, `Client.push` and `Client.build` can be streamed now +* Added support for websockets in `Client.attach` +* Fixed ports for Docker 0.6.5+ +* Added `Client.events` method (access to the `/events` endpoint) +* Changed the way the ports and volumes are provided in `Client.start` and + `Client.create_container̀` to make them simpler and more intuitive. + +### Bugfixes + +* Fixed a bug where private registries on HTTPS weren't handled properly +* Fixed a bug where auth would break with Python 3 + +### Miscellaneous + +* Test improvements +* Slight doc improvements + + +0.2.2 +----- + +* Added support for the `rm` parameter in `Client.build` +* Added support for tarball imports in `Client.import_image` through `data` + parameter. +* The `command` parameter in `Client.create_container` is now optional (for + containers that include a default run command) + +### Bugfixes + +* Fixed Python 3 support +* Fixed a bug where anonymous push/pull would break when no authconfig is + present +* Fixed a bug where the `quiet` parameter wouldn't be taken into account in + `Client.containers` +* Fixed a bug where `Client.push` would break when pushing to private + registries. +* Removed unused `registry` parameter in `Client.pull`. +* Removed obsolete custom error message in `Client.create_container`. + +### Miscellaneous + +* docker-py is now unit-tested, and Travis-CI has been enabled on the + source repository. + +0.2.1 +----- + +* Improvements to the `tox.ini` file + +### Bugfixes + +* Fixed a bug where the package would fail with an `ImportError` if requests + was installed using `apt-get` +* Fixed a bug where `Client.build` would fail if given a `path` parameter. +* Fixed several bugs in `Client.login`. It should now work with API versions + 1.4, 1.5. +* Please note that `Client.login` currently doesn't write auth to the + `.dockercfg` file, thus **auth is not persistent when using this method.** + +0.2.0 +----- + +* **This version introduces breaking changes!** +* `Client.kill`, `Client.remove_container`, `Client.remove_image`, +`Client.restart`, `Client.start`, `Client.stop` and `Client.wait` don't support +varargs anymore. +* Added commands `Client.top` and `Client.copy` +* Added `lxc_conf` parameter to `Client.start` +* Added support for authentication in `Client.pull` (API version >=1.5) +* Added support for privileged containers. +* Error management overhaul. The new version should be more consistent and +* All methods that expected a container ID as argument now also support a dict +containing an `Id` key. +* Added license header to python files. +* Several `README.md` updates. + +### Bugfixes + +* Fixed several bugs with auth config parsing. +* Fixed a bug in `Client.push` where it would raise an exception if +the auth config wasn't loaded. +* Fixed a bug in `Client.pull` where private registry images wouldn't be parsed +properly if it contained port information. + + +0.1.5 +----- + +* `Client.build` now uses tempfiles to store build context instead of storing +it in memory +* Added `nocache` option to `Client.build` +* `Client.remove_container` now raises an exception when trying to remove a +running container +* `Client.create_container` now accepts dicts for the `environment` parameter + +### Bugfixes + +* Fixed a bug in `Client.create_container` on Python 2.6 where unicode +commands would fail to be parsed +* Fixed a bug in `Client.build` where the `tag` parameter would not be taken +into account + +0.1.4 +----- + +* Added support for API connection through UNIX socket (default for docker 0.5.2+) + +0.1.3 +----- + +* The client now tries to load the auth config from `~/.dockercfg`. This is necessary to use the push command if API version is >1.0 + +0.1.2 +----- + +* Added a `quiet parameter` to `Client.build` (mirrors the `q` parameter in the API) + +0.1.1 +----- + +* Fixed a bug where the build command would list tar contents before sending the request +* Fixed a bug in `Client.port` + + +0.1.0 +----- +* **This version introduces breaking changes!** +* Switched to server side build system +* Removed the BuilderClient +* Added support for contextual builds +* Added support for remote URL builds +* Added python 3 support +* Added bind mounts support +* Added API version support +* Fixed a bug where `Client.port` would fail if provided with a port of type number +* Fixed a bug where `Client._post_json` wouldn't set the Content-Type header to `application/json` + +0.0.6 +----- +* Added support for custom loggers in `Client.build` +* Added `Client.attach` command +* Added support for `ADD` command in builder +* Fixed a bug in `Client.logs` +* Improved unit tests + + +0.0.5 +----- +* Added tag support for the builder +* Use `shlex` to parse plain string commands when creating a container +* Fixed several bugs in the builder +* Fixed the `quiet` option in `Client.images` +* Unit tests + +0.0.4 +----- +* Improved error reporting + +0.0.3 +----- +* Fixed a bug in `Client.tag` +* Fixed a bug where generated images would be removed after a successful build + +0.0.2 +----- +* Implemented first version of the builder client diff --git a/docs/change_log.md b/docs/change_log.md deleted file mode 100644 index aac4acb1c5..0000000000 --- a/docs/change_log.md +++ /dev/null @@ -1,610 +0,0 @@ -Change Log -========== - -1.2.3 ------ - -[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.2.3+is%3Aclosed) - -### Deprecation warning - -* Passing host config in the `Client.start` method is now deprecated. Please use the - `host_config` in `Client.create_container` instead. - -### Features - -* Added support for `privileged` param in `Client.exec_create` - (only available in API >= 1.19) -* Volume binds can now also be specified as a list of strings. - -### Bugfixes - -* Fixed a bug where the `read_only` param in host_config wasn't handled - properly. -* Fixed a bug in `Client.execute` (this method is still deprecated). -* The `cpuset` param in `Client.create_container` is also passed as - the `CpusetCpus` param (`Cpuset` deprecated in recent versions of the API) -* Fixed an issue with integration tests being run inside a container - (`make integration-test`) -* Fixed a bug where an empty string would be considered a valid container ID - or image ID. -* Fixed a bug in `Client.insert` - - -### Documentation - -* Various fixes - -1.2.2 ------ - -### Bugfixes - -* Fixed a bug where parameters passed to `Client.exec_resize` would be ignored (#576) -* Fixed a bug where auth config wouldn't be resolved properly in `Client.pull` (#577) - -1.2.1 ------ - -### Bugfixes - -* Fixed a bug where the check_resource decorator would break with some - argument-passing methods. (#573) - -1.2.0 ------ - -[List of PRs / issues for this release](https://github.com/docker/docker-py/issues?q=milestone%3A1.2.0+is%3Aclosed) - -### Deprecation warning - -* `Client.execute` is being deprecated in favor of the more dev-friendly - `Client.exec_start` and `Client.exec_create`. **It will be removed in 1.3.0** - -### Features - -* Added `exec_create`, `exec_start`, `exec_inspect` and `exec_resize` to - client, accurately mirroring the - [Exec API](https://docs.docker.com/reference/api/docker_remote_api_v1.18/#exec-create) -* Added `auth_config` param to `Client.pull` (allows to use one-off credentials - for this pull request) -* Added support for `ipc_mode` in host config. -* Added support for the `log_config` param in host config. -* Added support for the `ulimit` param in host config. -* Added support for container resource limits in `Client.build`. -* When a resource identifier (image or container ID) is passed to a Client - method, we now check for `None` values to avoid crashing - (now raises `docker.errors.NullResource`) -* Added tools to parse port ranges inside the new `docker.utils.ports` package. -* Added a `version_info` attribute to the `docker` package. - -### Bugfixes - -* Fixed a bug in `Client.port` where absence of a certain key in the - container's JSON would raise an error (now just returns `None`) -* Fixed a bug with the `trunc` parameter in `Client.containers` having no - effect (moved functionality to the client) -* Several improvements have been made to the `Client.import_image` method. -* Fixed pushing / pulling to - [v2 registries](https://github.com/docker/distribution) -* Fixed a bug where passing a container dictionary to `Client.commit` - would fail - -### Miscellaneous - -* Default API version has been bumped to 1.18 (Docker Engine 1.6.0) -* Several testing coverage improvements -* Docs fixes and improvements - -1.1.0 ------ - -### Features - -* Added `dockerfile` param support to `Client.build` (mirrors - `docker build -f` behavior) -* Added the ability to specify `'auto'` as `version` in `Client.__init__`, - allowing the constructor to autodetect the daemon's API version. - -### Bugfixes - -* Fixed a bug where decoding a result stream using the `decode` parameter - would break when using Python 3.x -* Fixed a bug where some files in `.dockerignore` weren't being handled - properly -* Fixed `resolve_authconfig` issues by bringing it closer to Docker Engine's - behavior. This should fix all issues encountered with private registry auth -* Fixed an issue where passwords containing a colon weren't being handled - properly. -* Bumped `requests` version requirement, which should fix most of the SSL - issues encountered recently. - -### Miscellaneous - -* Several integration test improvements. -* Fixed some unclosed resources in unit tests. -* Several docs improvements. - -1.0.0 ------ - -### Features - -* Added new `Client.rename` method (`docker rename`) -* Added now `Client.stats` method (`docker stats`) -* Added `read_only` param support to `utils.create_host_config` and - `Client.start` (`docker run --read-only`) -* Added `pid_mode` param support to `utils.create_host_config` and - `Client.start` (`docker run --pid='host'`) -* Added `since`, `until` and `filters` params to `Client.events`. -* Added `decode` parameter to `Client.stats` and `Client.events` to decode - JSON objects on the fly (False by default). - -### Bugfixes - -* Fixed a bug that caused `Client.build` to crash when the provided source was - a remote source. - -### Miscellaneous - -* Default API version has been bumped to 1.17 (Docker Engine 1.5.0) -* `Client.timeout` is now a public attribute, and users are encouraged to use it - when request timeouts need to be changed at runtime. -* Added `Client.api_version` as a read-only property. -* The `memswap_limit` argument in `Client.create_container` now accepts string - type values similar to `mem_limit` ('6g', '120000k', etc.) -* Improved documentation - -0.7.2 ------ - -### Features - -* Added support for `mac_address` in `Client.create_container` - -### Bugfixes - -* Fixed a bug where streaming responses (`pull`, `push`, `logs`, etc.) were - unreliable (#300) -* Fixed a bug where resolve_authconfig wouldn't properly resolve configuration - for private repositories (#468) -* Fixed a bug where some errors wouldn't be properly constructed in - `client.py`, leading to unhelpful exceptions bubbling up (#466) -* Fixed a bug where `Client.build` would try to close context when externally - provided (`custom_context == True`) (#458) -* Fixed an issue in `create_host_config` where empty sequences wouldn't be - interpreted properly (#462) - -### Miscellaneous - -* Added `resolve_authconfig` tests. - -0.7.1 ------ - -### Bugfixes - -* `setup.py` now indicates a maximum version of requests to work around the - boot2docker / `assert_hostname` bug. -* Removed invalid exception when using the Registry Hub's FQDN when pulling. -* Fixed an issue where early HTTP errors weren't handled properly in streaming - responses. -* Fixed a bug where sockets would close unexpectedly using Python 3.x -* Various fixes for integration tests. - -### Miscellaneous - -* Small doc fixes - -0.7.0 ------ - -### Breaking changes - -* Passing `dns` or `volumes_from` in `Client.start` with API version < 1.10 - will now raise an exception (previously only triggered a warning) - -### Features - -* Added support for `host_config` in `Client.create_container` -* Added utility method `docker.utils.create_host_config` to help build a - proper `HostConfig` dictionary. -* Added support for the `pull` parameter in `Client.build` -* Added support for the `forcerm` parameter in `Client.build` -* Added support for `extra_hosts` in `Client.start` -* Added support for a custom `timeout` in `Client.wait` -* Added support for custom `.dockercfg` loading in `Client.login` - (`dockercfg_path` argument) - -### Bugfixes - -* Fixed a bug where some output wouldn't be streamed properly in streaming - chunked responses -* Fixed a bug where the `devices` param didn't recognize the proper delimiter -* `Client.login` now properly expands the `registry` URL if provided. -* Fixed a bug where unicode characters in passed for `environment` in - `create_container` would break. - -### Miscellaneous - -* Several unit tests and integration tests improvements. -* `Client` constructor now enforces passing the `version` parameter as a - string. -* Build context files are now ordered by filename when creating the archive - (for consistency with docker mainline behavior) - -0.6.0 ------ -* **This version introduces breaking changes!** - -### Breaking changes - -* The default SSL protocol is now the highest TLS v1.x (was SSL v2.3 before) - (Poodle fix) -* The `history` command now returns a dict instead of a raw JSON string. - -### Features - -* Added the `execute` command. -* Added `pause` and `unpause` commands. -* Added support fo the `cpuset` param in `create_container` -* Added support for host devices (`devices` param in `start`) -* Added support for the `tail` param in `logs`. -* Added support for the `filters` param in `images` and `containers` -* The `kwargs_from_env` method is now available in the `docker.utils` - module. This should make it easier for boot2docker user to connect - to their daemon. - -### Bugfixes - -* Fixed a bug where empty directories weren't correctly included when - providing a context to `Client.build`. -* Fixed a bug where UNIX socket connections weren't properly cleaned up, - causing `ResourceWarning`s to appear in some cases. -* Fixed a bug where docker-py would crash if the docker daemon was stopped - while reading a streaming response -* Fixed a bug with streaming responses in Python 3 -* `remove_image` now supports a dict containing an `Id` key as its `id` - parameter (similar to other methods requiring a resource ID) - -### Documentation - -* Added new MkDocs documentation. Currently hosted on - [ReadTheDocs](http://docker-py.readthedocs.org/en/latest/) - -### Miscellaneous - -* Added tests to sdist -* Added a Makefile for running tests in Docker -* Updated Dockerfile - -0.5.3 ------ - -* Fixed attaching when connecting to the daemon over a UNIX socket. - -0.5.2 ------ - -* Fixed a bug where sockets were closed immediately when attaching over - TLS. - -0.5.1 ------ - -* Added a `assert_hostname` option to `TLSConfig` which can be used to - disable verification of hostnames. -* Fixed SSL not working due to an incorrect version comparison -* Fixed streams not working on Windows - -0.5.0 ------ - -* **This version introduces breaking changes!** -* Added `insecure_registry` parameter in `Client.push` and `Client.pull`. - *It defaults to False and code pushing to non-HTTPS private registries - might break as a result.* -* Added support for adding and dropping capabilities -* Added support for restart policy -* Added support for string values in `Client.create_container`'s `mem_limit` -* Added support for `.dockerignore` file in `Client.build` - -### Bugfixes - -* Fixed timeout behavior in `Client.stop` - -### Miscellaneous - -* `Client.create_container` provides better validation of the `volumes` - parameter -* Improved integration tests - -0.4.0 ------ - -* **This version introduces breaking changes!** -* The `base_url` parameter in the `Client` constructor should now allow most - of the `DOCKER_HOST` environment values (except for the fd:// protocol) - * As a result, URLs that don't specify a port are now invalid (similar - to the official client's behavior) -* Added TLS support (see [documentation](https://github.com/dotcloud/docker-py#connection-to-daemon-using-https)) - -### Bugfixes - -* Fixed an issue with `Client.build` streamed logs in Python 3 - -### Miscellaneous - -* Added unit tests coverage -* Various integration tests fixes - -0.3.2 ------ - -* Default API version is now 1.12 (support for docker 1.0) -* Added new methods `Client.get_image` and `Client.load_image` - (`docker save` and `docker load`) -* Added new method `Client.ping` -* Added new method `Client.resize` -* `Client.build` can now be provided with a custom context using the - `custom_context` parameter. -* Added support for `memswap_limit` parameter in `create_container` -* Added support for `force` parameter in `remove_container` -* Added support for `force` and `noprune` parameters in `remove_image` -* Added support for `timestamps` parameter in `logs` -* Added support for `dns_search` parameter in `start` -* Added support for `network_mode` parameter in `start` -* Added support for `size` parameter in `containers` -* Added support for `volumes_from` and `dns` parameters in `start`. As of - API version >= 1.10, these parameters no longer belong to `create_container` -* `Client.logs` now uses the logs endpoint when API version is sufficient - -### Bugfixes - -* Fixed a bug in pull where the `repo:tag` notation wasn't interpreted - properly -* Fixed a bug in streaming methods with python 3 (unicode, bytes/str related) -* Fixed a bug in `Client.start` where legacy notation for volumes wasn't - supported anymore. - -### Miscellaneous - -* The client now raises `DockerException`s when appropriate. You can import - `DockerException` (and its subclasses) from the `docker.errors` module to - catch them if needed. -* `docker.APIError` has been moved to the new `docker.errors` module as well. -* `Client.insert` is deprecated in API version > 1.11 -* Improved integration tests should now run much faster. -* There is now a single source of truth for the docker-py version number. - -0.3.1 ------ - -* Default API version is now 1.9 -* Streaming responses no longer yield blank lines. -* `Client.create_container` now supports the `domainname` parameter. -* `volumes_from` parameter in `Client.create_container` now supports - iterables. -* Auth credentials are provided to the docker daemon when using `Client.build` - (new feature in API version 1.9) - - -### Bugfixes - -* Various fixes for response streams (`logs`, `pull`, etc.). -* Fixed a bug with `Client.push` when using API version < 1.5 -* Fixed a bug with API version checks. - -### Miscellaneous - -* `mock` has been removed from the runtime requirements. -* Added installation instructions in the README. - -0.3.0 ------ - -* **This version introduces breaking changes!** -* Support for API version 1.7 through 1.9 (Docker 0.8.0+) -* Default API version is now 1.8 -* The client has been updated to support Requests 2.x. `requests==2.2.1` - is now the recommended version. -* Links can now be specified as tuples in `Client.start` (see docs for - more information) -* Added support for various options in `Client.create_container` - (`network_disabled`, `cpu_shares`, `working_dir` and `entrypoint`) -* `Client.attach` has been reworked to work similarly to `Client.logs` - minus the historical data. -* Logs can now be streamed using the `stream` parameter. -* Added support for `tcp://` URLs as client `base_url`. -* Various auth improvements. -* Added support for custom `Client.build` timeout. - - -### Bugfixes - -* Fixed a bug where determining the protocol of a private registry - would sometimes yield the wrong result. -* Fixed a bug where `Client.copy` wouldn't accept a dict as argument. -* Fixed several streaming bugs. -* Removed unused parameter in `Client.import_image`. -* The client's `base_url` now tolerates trailing slashes. - -#### Miscellaneous - -* Updated integration tests -* Small doc fixes - -0.2.3 ------ - -* Support for API version 1.6 -* Added support for links -* Added support for global request timeout -* Added `signal` parameter in `Client.kill` -* Added support for `publish_all_ports` in `Client.start` -* `Client.pull`, `Client.push` and `Client.build` can be streamed now -* Added support for websockets in `Client.attach` -* Fixed ports for Docker 0.6.5+ -* Added `Client.events` method (access to the `/events` endpoint) -* Changed the way the ports and volumes are provided in `Client.start` and - `Client.create_container̀` to make them simpler and more intuitive. - -### Bugfixes - -* Fixed a bug where private registries on HTTPS weren't handled properly -* Fixed a bug where auth would break with Python 3 - -### Miscellaneous - -* Test improvements -* Slight doc improvements - - -0.2.2 ------ - -* Added support for the `rm` parameter in `Client.build` -* Added support for tarball imports in `Client.import_image` through `data` - parameter. -* The `command` parameter in `Client.create_container` is now optional (for - containers that include a default run command) - -### Bugfixes - -* Fixed Python 3 support -* Fixed a bug where anonymous push/pull would break when no authconfig is - present -* Fixed a bug where the `quiet` parameter wouldn't be taken into account in - `Client.containers` -* Fixed a bug where `Client.push` would break when pushing to private - registries. -* Removed unused `registry` parameter in `Client.pull`. -* Removed obsolete custom error message in `Client.create_container`. - -### Miscellaneous - -* docker-py is now unit-tested, and Travis-CI has been enabled on the - source repository. - -0.2.1 ------ - -* Improvements to the `tox.ini` file - -### Bugfixes - -* Fixed a bug where the package would fail with an `ImportError` if requests - was installed using `apt-get` -* Fixed a bug where `Client.build` would fail if given a `path` parameter. -* Fixed several bugs in `Client.login`. It should now work with API versions - 1.4, 1.5. -* Please note that `Client.login` currently doesn't write auth to the - `.dockercfg` file, thus **auth is not persistent when using this method.** - -0.2.0 ------ - -* **This version introduces breaking changes!** -* `Client.kill`, `Client.remove_container`, `Client.remove_image`, -`Client.restart`, `Client.start`, `Client.stop` and `Client.wait` don't support -varargs anymore. -* Added commands `Client.top` and `Client.copy` -* Added `lxc_conf` parameter to `Client.start` -* Added support for authentication in `Client.pull` (API version >=1.5) -* Added support for privileged containers. -* Error management overhaul. The new version should be more consistent and -* All methods that expected a container ID as argument now also support a dict -containing an `Id` key. -* Added license header to python files. -* Several `README.md` updates. - -### Bugfixes - -* Fixed several bugs with auth config parsing. -* Fixed a bug in `Client.push` where it would raise an exception if -the auth config wasn't loaded. -* Fixed a bug in `Client.pull` where private registry images wouldn't be parsed -properly if it contained port information. - - -0.1.5 ------ - -* `Client.build` now uses tempfiles to store build context instead of storing -it in memory -* Added `nocache` option to `Client.build` -* `Client.remove_container` now raises an exception when trying to remove a -running container -* `Client.create_container` now accepts dicts for the `environment` parameter - -### Bugfixes - -* Fixed a bug in `Client.create_container` on Python 2.6 where unicode -commands would fail to be parsed -* Fixed a bug in `Client.build` where the `tag` parameter would not be taken -into account - -0.1.4 ------ - -* Added support for API connection through UNIX socket (default for docker 0.5.2+) - -0.1.3 ------ - -* The client now tries to load the auth config from `~/.dockercfg`. This is necessary to use the push command if API version is >1.0 - -0.1.2 ------ - -* Added a `quiet parameter` to `Client.build` (mirrors the `q` parameter in the API) - -0.1.1 ------ - -* Fixed a bug where the build command would list tar contents before sending the request -* Fixed a bug in `Client.port` - - -0.1.0 ------ -* **This version introduces breaking changes!** -* Switched to server side build system -* Removed the BuilderClient -* Added support for contextual builds -* Added support for remote URL builds -* Added python 3 support -* Added bind mounts support -* Added API version support -* Fixed a bug where `Client.port` would fail if provided with a port of type number -* Fixed a bug where `Client._post_json` wouldn't set the Content-Type header to `application/json` - -0.0.6 ------ -* Added support for custom loggers in `Client.build` -* Added `Client.attach` command -* Added support for `ADD` command in builder -* Fixed a bug in `Client.logs` -* Improved unit tests - - -0.0.5 ------ -* Added tag support for the builder -* Use `shlex` to parse plain string commands when creating a container -* Fixed several bugs in the builder -* Fixed the `quiet` option in `Client.images` -* Unit tests - -0.0.4 ------ -* Improved error reporting - -0.0.3 ------ -* Fixed a bug in `Client.tag` -* Fixed a bug where generated images would be removed after a successful build - -0.0.2 ------ -* Implemented first version of the builder client diff --git a/docs/client.rst b/docs/client.rst new file mode 100644 index 0000000000..85a1396f63 --- /dev/null +++ b/docs/client.rst @@ -0,0 +1,35 @@ +Client +====== +.. py:module:: docker.client + + +Creating a client +----------------- + +To communicate with the Docker daemon, you first need to instantiate a client. The easiest way to do that is by calling the function :py:func:`~docker.client.from_env`. It can also be configured manually by instantiating a :py:class:`~docker.client.DockerClient` class. + +.. autofunction:: from_env() + +Client reference +---------------- + +.. autoclass:: DockerClient() + + .. autoattribute:: configs + .. autoattribute:: containers + .. autoattribute:: images + .. autoattribute:: networks + .. autoattribute:: nodes + .. autoattribute:: plugins + .. autoattribute:: secrets + .. autoattribute:: services + .. autoattribute:: swarm + .. autoattribute:: volumes + + .. automethod:: close() + .. automethod:: df() + .. automethod:: events() + .. automethod:: info() + .. automethod:: login() + .. automethod:: ping() + .. automethod:: version() diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..02694d3cdf --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,361 @@ +# +# docker-sdk-python documentation build configuration file, created by +# sphinx-quickstart on Wed Sep 14 15:48:58 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import datetime +import os +import sys +from importlib.metadata import version + +sys.path.insert(0, os.path.abspath('..')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'myst_parser' +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + + +source_suffix = { + '.rst': 'restructuredtext', + '.txt': 'markdown', + '.md': 'markdown', +} + +# The encoding of source files. +# +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Docker SDK for Python' +year = datetime.datetime.now().year +copyright = f'{year} Docker Inc' +author = 'Docker Inc' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# see https://github.com/pypa/setuptools_scm#usage-from-sphinx +release = version('docker') +# for example take major/minor +version = '.'.join(release.split('.')[:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# +# today = '' +# +# Else, today_fmt is used as the format for a strftime call. +# +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# +add_module_names = False + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'description': 'A Python library for the Docker Engine API', + 'fixed_sidebar': True, +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +# +# html_title = u'docker-sdk-python v2.0' + +# A shorter title for the navigation bar. Default is the same as html_title. +# +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# +# html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# +html_sidebars = { + '**': [ + 'about.html', + 'navigation.html', + 'searchbox.html', + ] +} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# +# html_additional_pages = {} + +# If false, no module index is generated. +# +# html_domain_indices = True + +# If false, no index is generated. +# +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'docker-sdk-pythondoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'docker-sdk-python.tex', 'docker-sdk-python Documentation', + 'Docker Inc.', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = False + +# If true, show page references after internal links. +# +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# It false, will not define \strong, \code, itleref, \crossref ... but only +# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added +# packages. +# +# latex_keep_old_macro_names = True + +# If false, no module index is generated. +# +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +# +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'docker-sdk-python', 'docker-sdk-python Documentation', + author, 'docker-sdk-python', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# +# texinfo_appendices = [] + +# If false, no module index is generated. +# +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# +# texinfo_no_detailmenu = False + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = False diff --git a/docs/configs.rst b/docs/configs.rst new file mode 100644 index 0000000000..d907ad4216 --- /dev/null +++ b/docs/configs.rst @@ -0,0 +1,30 @@ +Configs +======= + +.. py:module:: docker.models.configs + +Manage configs on the server. + +Methods available on ``client.configs``: + +.. rst-class:: hide-signature +.. py:class:: ConfigCollection + + .. automethod:: create + .. automethod:: get + .. automethod:: list + + +Config objects +-------------- + +.. autoclass:: Config() + + .. autoattribute:: id + .. autoattribute:: name + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. automethod:: reload + .. automethod:: remove diff --git a/docs/containers.rst b/docs/containers.rst new file mode 100644 index 0000000000..7c41bfdd92 --- /dev/null +++ b/docs/containers.rst @@ -0,0 +1,56 @@ +Containers +========== + +.. py:module:: docker.models.containers + +Run and manage containers on the server. + +Methods available on ``client.containers``: + +.. rst-class:: hide-signature +.. autoclass:: ContainerCollection + + .. automethod:: run(image, command=None, **kwargs) + .. automethod:: create(image, command=None, **kwargs) + .. automethod:: get(id_or_name) + .. automethod:: list(**kwargs) + .. automethod:: prune + +Container objects +----------------- + +.. autoclass:: Container() + + .. py:attribute:: attrs + .. autoattribute:: id + .. autoattribute:: image + .. autoattribute:: labels + .. autoattribute:: name + .. autoattribute:: short_id + .. autoattribute:: status + + The raw representation of this object from the server. + + .. automethod:: attach + .. automethod:: attach_socket + .. automethod:: commit + .. automethod:: diff + .. automethod:: exec_run + .. automethod:: export + .. automethod:: get_archive + .. automethod:: kill + .. automethod:: logs + .. automethod:: pause + .. automethod:: put_archive + .. automethod:: reload + .. automethod:: remove + .. automethod:: rename + .. automethod:: resize + .. automethod:: restart + .. automethod:: start + .. automethod:: stats + .. automethod:: stop + .. automethod:: top + .. automethod:: unpause + .. automethod:: update + .. automethod:: wait diff --git a/docs/contributing.md b/docs/contributing.md deleted file mode 100644 index e776458338..0000000000 --- a/docs/contributing.md +++ /dev/null @@ -1,36 +0,0 @@ -# Contributing -See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). -The following is specific to docker-py. - -## Running the tests & Code Quality - - -To get the source source code and run the unit tests, run: -``` -$ git clone git://github.com/docker/docker-py.git -$ cd docker-py -$ pip install tox -$ tox -``` - -## Building the docs -Docs are built with [MkDocs](http://www.mkdocs.org/). For development, you can -run the following in the project directory: -``` -$ pip install -r docs-requirements.txt -$ mkdocs serve -``` - -## Release Checklist - -Before a new release, please go through the following checklist: - -* Bump version in docker/version.py -* Add a release note in docs/change_log.md -* Git tag the version -* Upload to pypi - -## Vulnerability Reporting -For any security issues, please do NOT file an issue or pull request on github! -Please contact [security@docker.com](mailto:security@docker.com) or read [the -Docker security page](https://www.docker.com/resources/security/). diff --git a/docs/host-devices.md b/docs/host-devices.md deleted file mode 100644 index f1ee3e1b63..0000000000 --- a/docs/host-devices.md +++ /dev/null @@ -1,18 +0,0 @@ -# Access to devices on the host - -If you need to directly expose some host devices to a container, you can use -the devices parameter in the `host_config` param in `Client.create_container` -as shown below: - -```python -c.create_container( - 'busybox', 'true', host_config=docker.utils.create_host_config(devices=[ - '/dev/sda:/dev/xvda:rwm' - ]) -) -``` - -Each string is a single mapping using the colon (':') as the separator. So the -above example essentially allow the container to have read write permissions to -access the host's /dev/sda via a node named /dev/xvda in the container. The -devices parameter is a list to allow multiple devices to be mapped. diff --git a/docs/hostconfig.md b/docs/hostconfig.md deleted file mode 100644 index 001be172cc..0000000000 --- a/docs/hostconfig.md +++ /dev/null @@ -1,101 +0,0 @@ -# HostConfig object - -The Docker Remote API introduced [support for HostConfig in version 1.15](http://docs.docker.com/reference/api/docker_remote_api_v1.15/#create-a-container). This object contains all the parameters you could previously pass to `Client.start`. -*It is highly recommended that users pass the HostConfig in the `host_config`* -*param of `Client.create_container` instead of `Client.start`* - -## HostConfig helper - -### docker.utils.create_host_config - -Creates a HostConfig dictionary to be used with `Client.create_container`. - -`binds` allows to bind a directory in the host to the container. See [Using -volumes](volumes.md) for more information. - -`port_bindings` exposes container ports to the host. -See [Port bindings](port-bindings.md) for more information. - -`lxc_conf` allows to pass LXC configuration options using a dictionary. - -`privileged` starts the container in privileged mode. - -[Links](http://docs.docker.io/en/latest/use/working_with_links_names/) can be -specified with the `links` argument. They can either be specified as a -dictionary mapping name to alias or as a list of `(name, alias)` tuples. - -`dns` and `volumes_from` are only available if they are used with version v1.10 -of docker remote API. Otherwise they are ignored. - -`network_mode` is available since v1.11 and sets the Network mode for the -container ('bridge': creates a new network stack for the container on the -Docker bridge, 'none': no networking for this container, 'container:[name|id]': -reuses another container network stack), 'host': use the host network stack -inside the container. - -`restart_policy` is available since v1.2.0 and sets the RestartPolicy for how a -container should or should not be restarted on exit. By default the policy is -set to no meaning do not restart the container when it exits. The user may -specify the restart policy as a dictionary for example: -```python -{ - "MaximumRetryCount": 0, - "Name": "always" -} -``` - -For always restarting the container on exit or can specify to restart the -container to restart on failure and can limit number of restarts. For example: -```python -{ - "MaximumRetryCount": 5, - "Name": "on-failure" -} -``` - -`cap_add` and `cap_drop` are available since v1.2.0 and can be used to add or -drop certain capabilities. The user may specify the capabilities as an array -for example: -```python -[ - "SYS_ADMIN", - "MKNOD" -] -``` - - -**Params** - -* binds: Volumes to bind. See [Using volumes](volumes.md) for more information. -* port_bindings (dict): Port bindings. See [Port bindings](port-bindings.md) - for more information. -* lxc_conf (dict): LXC config -* publish_all_ports (bool): Whether to publish all ports to the host -* links (dict or list of tuples): either as a dictionary mapping name to alias or - as a list of `(name, alias)` tuples -* privileged (bool): Give extended privileges to this container -* dns (list): Set custom DNS servers -* dns_search (list): DNS search domains -* volumes_from (str or list): List of container names or Ids to get volumes - from. Optionally a single string joining container id's with commas -* network_mode (str): One of `['bridge', None, 'container:', 'host']` -* restart_policy (dict): "Name" param must be one of `['on-failure', 'always']` -* cap_add (list of str): Add kernel capabilities -* cap_drop (list of str): Drop kernel capabilities -* extra_hosts (dict): custom host-to-IP mappings (host:ip) -* read_only (bool): mount the container's root filesystem as read only -* pid_mode (str): if set to "host", use the host PID namespace inside the - container -* security_opt (list): A list of string values to customize labels for MLS - systems, such as SELinux. -* ulimits (list): A list of dicts or `docker.utils.Ulimit` objects. A list - of ulimits to be set in the container. -* log_config (`docker.utils.LogConfig` or dict): Logging configuration to container - -**Returns** (dict) HostConfig dictionary - -```python ->>> from docker.utils import create_host_config ->>> create_host_config(privileged=True, cap_drop=['MKNOD'], volumes_from=['nostalgic_newton']) -{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} -``` diff --git a/docs/images.rst b/docs/images.rst new file mode 100644 index 0000000000..4d425e95a1 --- /dev/null +++ b/docs/images.rst @@ -0,0 +1,62 @@ +Images +====== + +.. py:module:: docker.models.images + +Manage images on the server. + +Methods available on ``client.images``: + +.. rst-class:: hide-signature +.. py:class:: ImageCollection + + .. automethod:: build + .. automethod:: get + .. automethod:: get_registry_data + .. automethod:: list(**kwargs) + .. automethod:: load + .. automethod:: prune + .. automethod:: pull + .. automethod:: push + .. automethod:: remove + .. automethod:: search + + +Image objects +------------- + +.. autoclass:: Image() + + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. autoattribute:: id + .. autoattribute:: labels + .. autoattribute:: short_id + .. autoattribute:: tags + + + + .. automethod:: history + .. automethod:: reload + .. automethod:: save + .. automethod:: tag + +RegistryData objects +-------------------- + +.. autoclass:: RegistryData() + + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. autoattribute:: id + .. autoattribute:: short_id + + + + .. automethod:: has_platform + .. automethod:: pull + .. automethod:: reload diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 5b851f0a4b..0000000000 --- a/docs/index.md +++ /dev/null @@ -1,15 +0,0 @@ -# docker-py documentation - -An API client for docker written in Python - -## Installation - -Our latest stable is always available on PyPi. - - pip install docker-py - -## Documentation -Full documentation is available in the `/docs/` directory. - -## License -Docker is licensed under the Apache License, Version 2.0. See LICENSE for full license text diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..93b30d4a07 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,96 @@ +Docker SDK for Python +===================== + +A Python library for the Docker Engine API. It lets you do anything the ``docker`` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc. + +For more information about the Engine API, `see its documentation `_. + +Installation +------------ + +The latest stable version `is available on PyPI `_. Either add ``docker`` to your ``requirements.txt`` file or install with pip:: + + pip install docker + +Getting started +--------------- + +To talk to a Docker daemon, you first need to instantiate a client. You can use :py:func:`~docker.client.from_env` to connect using the default socket or the configuration in your environment: + +.. code-block:: python + + import docker + client = docker.from_env() + +You can now run containers: + +.. code-block:: python + + >>> client.containers.run("ubuntu", "echo hello world") + 'hello world\n' + +You can run containers in the background: + +.. code-block:: python + + >>> client.containers.run("bfirsh/reticulate-splines", detach=True) + + +You can manage containers: + +.. code-block:: python + + >>> client.containers.list() + [, , ...] + + >>> container = client.containers.get('45e6d2de7c54') + + >>> container.attrs['Config']['Image'] + "bfirsh/reticulate-splines" + + >>> container.logs() + "Reticulating spline 1...\n" + + >>> container.stop() + +You can stream logs: + +.. code-block:: python + + >>> for line in container.logs(stream=True): + ... print(line.strip()) + Reticulating spline 2... + Reticulating spline 3... + ... + +You can manage images: + +.. code-block:: python + + >>> client.images.pull('nginx') + + + >>> client.images.list() + [, , ...] + +That's just a taste of what you can do with the Docker SDK for Python. For more, :doc:`take a look at the reference `. + +.. toctree:: + :hidden: + :maxdepth: 2 + + client + configs + containers + images + networks + nodes + plugins + secrets + services + swarm + volumes + api + tls + user_guides/index + change-log diff --git a/docs/networks.rst b/docs/networks.rst new file mode 100644 index 0000000000..b585f0bdaa --- /dev/null +++ b/docs/networks.rst @@ -0,0 +1,34 @@ +Networks +======== + +.. py:module:: docker.models.networks + +Create and manage networks on the server. For more information about networks, `see the Engine documentation `_. + +Methods available on ``client.networks``: + +.. rst-class:: hide-signature +.. py:class:: NetworkCollection + + .. automethod:: create + .. automethod:: get + .. automethod:: list + .. automethod:: prune + +Network objects +----------------- + +.. autoclass:: Network() + + .. autoattribute:: id + .. autoattribute:: short_id + .. autoattribute:: name + .. autoattribute:: containers + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. automethod:: connect + .. automethod:: disconnect + .. automethod:: reload + .. automethod:: remove diff --git a/docs/nodes.rst b/docs/nodes.rst new file mode 100644 index 0000000000..8ef1e20b29 --- /dev/null +++ b/docs/nodes.rst @@ -0,0 +1,30 @@ +Nodes +===== + +.. py:module:: docker.models.nodes + +Get and list nodes in a swarm. Before you can use these methods, you first need to :doc:`join or initialize a swarm `. + +Methods available on ``client.nodes``: + +.. rst-class:: hide-signature +.. py:class:: NodeCollection + + .. automethod:: get(id_or_name) + .. automethod:: list(**kwargs) + +Node objects +------------ + +.. autoclass:: Node() + + .. autoattribute:: id + .. autoattribute:: short_id + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. autoattribute:: version + + .. automethod:: reload + .. automethod:: update diff --git a/docs/plugins.rst b/docs/plugins.rst new file mode 100644 index 0000000000..560bc38262 --- /dev/null +++ b/docs/plugins.rst @@ -0,0 +1,38 @@ +Plugins +======= + +.. py:module:: docker.models.plugins + +Manage plugins on the server. + +Methods available on ``client.plugins``: + +.. rst-class:: hide-signature +.. py:class:: PluginCollection + + .. automethod:: get + .. automethod:: install + .. automethod:: list + + +Plugin objects +-------------- + +.. autoclass:: Plugin() + + .. autoattribute:: id + .. autoattribute:: short_id + .. autoattribute:: name + .. autoattribute:: enabled + .. autoattribute:: settings + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. automethod:: configure + .. automethod:: disable + .. automethod:: enable + .. automethod:: reload + .. automethod:: push + .. automethod:: remove + .. automethod:: upgrade diff --git a/docs/port-bindings.md b/docs/port-bindings.md deleted file mode 100644 index 7456b86f08..0000000000 --- a/docs/port-bindings.md +++ /dev/null @@ -1,39 +0,0 @@ -# Port bindings -Port bindings is done in two parts. Firstly, by providing a list of ports to -open inside the container in the `Client().create_container()` method. -Bindings are declared in the `host_config` parameter. - -```python -container_id = c.create_container( - 'busybox', 'ls', ports=[1111, 2222], - host_config=docker.utils.create_host_config(port_bindings={ - 1111: 4567, - 2222: None - }) -) -``` - - -You can limit the host address on which the port will be exposed like such: - -```python -docker.utils.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)}) -``` - -Or without host port assignment: - -```python -docker.utils.create_host_config(port_bindings={1111: ('127.0.0.1',)}) -``` - -If you wish to use UDP instead of TCP (default), you need to declare ports -as such in both the config and host config: - -```python -container_id = c.create_container( - 'busybox', 'ls', ports=[(1111, 'udp'), 2222], - host_config=docker.utils.create_host_config(port_bindings={ - '1111/udp': 4567, 2222: None - }) -) -``` diff --git a/docs/secrets.rst b/docs/secrets.rst new file mode 100644 index 0000000000..d1c39f1a16 --- /dev/null +++ b/docs/secrets.rst @@ -0,0 +1,30 @@ +Secrets +======= + +.. py:module:: docker.models.secrets + +Manage secrets on the server. + +Methods available on ``client.secrets``: + +.. rst-class:: hide-signature +.. py:class:: SecretCollection + + .. automethod:: create + .. automethod:: get + .. automethod:: list + + +Secret objects +-------------- + +.. autoclass:: Secret() + + .. autoattribute:: id + .. autoattribute:: name + .. py:attribute:: attrs + + The raw representation of this object from the server. + + .. automethod:: reload + .. automethod:: remove diff --git a/docs/services.rst b/docs/services.rst new file mode 100644 index 0000000000..8f44428872 --- /dev/null +++ b/docs/services.rst @@ -0,0 +1,39 @@ +Services +======== + +.. py:module:: docker.models.services + +Manage services on a swarm. For more information about services, `see the Engine documentation `_. + +Before you can use any of these methods, you first need to :doc:`join or initialize a swarm `. + +Methods available on ``client.services``: + +.. rst-class:: hide-signature +.. py:class:: ServiceCollection + + .. automethod:: create + .. automethod:: get + .. automethod:: list + +Service objects +--------------- + +.. autoclass:: Service() + + .. autoattribute:: id + .. autoattribute:: short_id + .. autoattribute:: name + .. autoattribute:: version + .. py:attribute:: attrs + + The raw representation of this object from the server. + + + .. automethod:: force_update + .. automethod:: logs + .. automethod:: reload + .. automethod:: remove + .. automethod:: scale + .. automethod:: tasks + .. automethod:: update diff --git a/docs/swarm.rst b/docs/swarm.rst new file mode 100644 index 0000000000..cab9def70a --- /dev/null +++ b/docs/swarm.rst @@ -0,0 +1,26 @@ +Swarm +===== + +.. py:module:: docker.models.swarm + +Manage `Docker Engine's swarm mode `_. + +To use any swarm methods, you first need to make the Engine part of a swarm. This can be done by either initializing a new swarm with :py:meth:`~Swarm.init`, or joining an existing swarm with :py:meth:`~Swarm.join`. + +These methods are available on ``client.swarm``: + +.. rst-class:: hide-signature +.. py:class:: Swarm + + .. automethod:: get_unlock_key() + .. automethod:: init() + .. automethod:: join() + .. automethod:: leave() + .. automethod:: unlock() + .. automethod:: update() + .. automethod:: reload() + + .. autoattribute:: version + .. py:attribute:: attrs + + The raw representation of this object from the server. diff --git a/docs/tls.md b/docs/tls.md deleted file mode 100644 index 85a22ee357..0000000000 --- a/docs/tls.md +++ /dev/null @@ -1,86 +0,0 @@ -## Connection to daemon using HTTPS - -**Note:** *These instructions are docker-py specific. Please refer to -[http://docs.docker.com/articles/https/](http://docs.docker.com/articles/https/) -first.* - -## TLSConfig - -**Params**: - -* client_cert (tuple of str): Path to client cert, path to client key -* ca_cert (str): Path to CA cert file -* verify (bool or str): This can be `False` or a path to a CA Cert file -* ssl_version (int): A valid [SSL version]( -https://docs.python.org/3.4/library/ssl.html#ssl.PROTOCOL_TLSv1) -* assert_hostname (bool): Verify hostname of docker daemon - -### configure_client - -**Params**: - -* client: ([Client](api.md#client-api)): A client to apply this config to - - -## Authenticate server based on public/default CA pool - -```python -client = docker.Client(base_url='', tls=True) -``` - -Equivalent CLI options: -```bash -docker --tls ... -``` - -If you want to use TLS but don't want to verify the server certificate -(for example when testing with a self-signed certificate): - -```python -tls_config = docker.tls.TLSConfig(verify=False) -client = docker.Client(base_url='', tls=tls_config) -``` - -## Authenticate server based on given CA - -```python -tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem') -client = docker.Client(base_url='', tls=tls_config) -``` - -Equivalent CLI options: -```bash -docker --tlsverify --tlscacert /path/to/ca.pem ... -``` - -## Authenticate with client certificate, do not authenticate server based on given CA - -```python -tls_config = docker.tls.TLSConfig( - client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem') -) -client = docker.Client(base_url='', tls=tls_config) -``` - -Equivalent CLI options: -```bash -docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ... -``` - -## Authenticate with client certificate, authenticate server based on given CA - -```python -tls_config = docker.tls.TLSConfig( - client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem'), - verify='/path/to/ca.pem' -) -client = docker.Client(base_url='', tls=tls_config) -``` - -Equivalent CLI options: -```bash -docker --tlsverify \ - --tlscert /path/to/client-cert.pem \ - --tlskey /path/to/client-key.pem \ - --tlscacert /path/to/ca.pem ... -``` diff --git a/docs/tls.rst b/docs/tls.rst new file mode 100644 index 0000000000..b95b468c5b --- /dev/null +++ b/docs/tls.rst @@ -0,0 +1,37 @@ +Using TLS +========= + +.. py:module:: docker.tls + +Both the main :py:class:`~docker.client.DockerClient` and low-level +:py:class:`~docker.api.client.APIClient` can connect to the Docker daemon with TLS. + +This is all configured automatically for you if you're using :py:func:`~docker.client.from_env`, but if you need some extra control it is possible to configure it manually by using a :py:class:`TLSConfig` object. + +Examples +-------- + +For example, to check the server against a specific CA certificate: + +.. code-block:: python + + tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem', verify=True) + client = docker.DockerClient(base_url='', tls=tls_config) + +This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``. + +To authenticate with client certs: + +.. code-block:: python + + tls_config = docker.tls.TLSConfig( + client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem') + ) + client = docker.DockerClient(base_url='', tls=tls_config) + +This is the equivalent of ``docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ...``. + +Reference +--------- + +.. autoclass:: TLSConfig() diff --git a/docs/user_guides/index.rst b/docs/user_guides/index.rst new file mode 100644 index 0000000000..79b3a909e3 --- /dev/null +++ b/docs/user_guides/index.rst @@ -0,0 +1,8 @@ +User guides and tutorials +========================= + +.. toctree:: + :maxdepth: 2 + + multiplex + swarm_services \ No newline at end of file diff --git a/docs/user_guides/multiplex.rst b/docs/user_guides/multiplex.rst new file mode 100644 index 0000000000..7add69b121 --- /dev/null +++ b/docs/user_guides/multiplex.rst @@ -0,0 +1,62 @@ +Handling multiplexed streams +============================ + +.. note:: + The following instruction assume you're interested in getting output from + an ``exec`` command. These instruction are similarly applicable to the + output of ``attach``. + +First create a container that runs in the background: + +>>> client = docker.from_env() +>>> container = client.containers.run( +... 'bfirsh/reticulate-splines', detach=True) + +Prepare the command we are going to use. It prints "hello stdout" +in `stdout`, followed by "hello stderr" in `stderr`: + +>>> cmd = '/bin/sh -c "echo hello stdout ; echo hello stderr >&2"' + +We'll run this command with all four the combinations of ``stream`` +and ``demux``. + +With ``stream=False`` and ``demux=False``, the output is a string +that contains both the `stdout` and the `stderr` output: + +>>> res = container.exec_run(cmd, stream=False, demux=False) +>>> res.output +b'hello stderr\nhello stdout\n' + +With ``stream=True``, and ``demux=False``, the output is a +generator that yields strings containing the output of both +`stdout` and `stderr`: + +>>> res = container.exec_run(cmd, stream=True, demux=False) +>>> next(res.output) +b'hello stdout\n' +>>> next(res.output) +b'hello stderr\n' +>>> next(res.output) +Traceback (most recent call last): + File "", line 1, in +StopIteration + +With ``stream=True`` and ``demux=True``, the generator now +separates the streams, and yield tuples +``(stdout, stderr)``: + +>>> res = container.exec_run(cmd, stream=True, demux=True) +>>> next(res.output) +(b'hello stdout\n', None) +>>> next(res.output) +(None, b'hello stderr\n') +>>> next(res.output) +Traceback (most recent call last): + File "", line 1, in +StopIteration + +Finally, with ``stream=False`` and ``demux=True``, the output is a tuple ``(stdout, stderr)``: + +>>> res = container.exec_run(cmd, stream=False, demux=True) +>>> res.output +(b'hello stdout\n', b'hello stderr\n') \ No newline at end of file diff --git a/docs/user_guides/swarm_services.md b/docs/user_guides/swarm_services.md new file mode 100644 index 0000000000..5c3a80d2d4 --- /dev/null +++ b/docs/user_guides/swarm_services.md @@ -0,0 +1,69 @@ +# Swarm services + +> Warning: +> This is a stale document and may contain outdated information. +> Refer to the API docs for updated classes and method signatures. + +Starting with Engine version 1.12 (API 1.24), it is possible to manage services +using the Docker Engine API. Note that the engine needs to be part of a +[Swarm cluster](../swarm.html) before you can use the service-related methods. + +## Creating a service + +The `APIClient.create_service` method lets you create a new service inside the +cluster. The method takes several arguments, `task_template` being mandatory. +This dictionary of values is most easily produced by instantiating a +`TaskTemplate` object. + +```python +container_spec = docker.types.ContainerSpec( + image='busybox', command=['echo', 'hello'] +) +task_tmpl = docker.types.TaskTemplate(container_spec) +service_id = client.create_service(task_tmpl, name=name) +``` + +## Listing services + +List all existing services using the `APIClient.services` method. + +```python +client.services(filters={'name': 'mysql'}) +``` + +## Retrieving service configuration + +To retrieve detailed information and configuration for a specific service, you +may use the `APIClient.inspect_service` method using the service's ID or name. + +```python +client.inspect_service(service='my_service_name') +``` + +## Updating service configuration + +The `APIClient.update_service` method lets you update a service's configuration. +The mandatory `version` argument (used to prevent concurrent writes) can be +retrieved using `APIClient.inspect_service`. + +```python +container_spec = docker.types.ContainerSpec( + image='busybox', command=['echo', 'hello world'] +) +task_tmpl = docker.types.TaskTemplate(container_spec) + +svc_version = client.inspect_service(svc_id)['Version']['Index'] + +client.update_service( + svc_id, svc_version, name='new_name', task_template=task_tmpl +) +``` + +## Removing a service + +A service may be removed simply using the `APIClient.remove_service` method. +Either the service name or service ID can be used as argument. + +```python +client.remove_service('my_service_name') +``` diff --git a/docs/volumes.md b/docs/volumes.md deleted file mode 100644 index 16c3228e52..0000000000 --- a/docs/volumes.md +++ /dev/null @@ -1,34 +0,0 @@ -# Using volumes - -Volume declaration is done in two parts. Provide a list of mountpoints to -the `Client().create_container()` method, and declare mappings in the -`host_config` section. - -```python -container_id = c.create_container( - 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], - host_config=docker.utils.create_host_config(binds={ - '/home/user1/': { - 'bind': '/mnt/vol2', - 'mode': 'rw', - }, - '/var/www': { - 'bind': '/mnt/vol1', - 'mode': 'ro', - } - }) -) -``` - -You can alternatively specify binds as a list. This code is equivalent to the -example above: - -```python -container_id = c.create_container( - 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], - host_config=docker.utils.create_host_config(binds=[ - '/home/user1/:/mnt/vol2', - '/var/www:/mnt/vol1:ro', - ]) -) -``` diff --git a/docs/volumes.rst b/docs/volumes.rst new file mode 100644 index 0000000000..fcd022a574 --- /dev/null +++ b/docs/volumes.rst @@ -0,0 +1,32 @@ +Volumes +======= + +.. py:module:: docker.models.volumes + +Manage volumes on the server. + +Methods available on ``client.volumes``: + +.. rst-class:: hide-signature +.. py:class:: VolumeCollection + + .. automethod:: create + .. automethod:: get + .. automethod:: list + .. automethod:: prune + +Volume objects +-------------- + +.. autoclass:: Volume() + + .. autoattribute:: id + .. autoattribute:: short_id + .. autoattribute:: name + .. py:attribute:: attrs + + The raw representation of this object from the server. + + + .. automethod:: reload + .. automethod:: remove diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index 8293cbc4b5..0000000000 --- a/mkdocs.yml +++ /dev/null @@ -1,17 +0,0 @@ -site_name: docker-py Documentation -site_description: An API client for Docker written in Python -site_favicon: favicon_whale.png -site_url: http://docker-py.readthedocs.org -repo_url: https://github.com/docker/docker-py/ -theme: readthedocs -pages: -- Home: index.md -- Client API: api.md -- Port Bindings: port-bindings.md -- Using Volumes: volumes.md -- Using TLS: tls.md -- Host devices: host-devices.md -- Host configuration: hostconfig.md -- Using with boot2docker: boot2docker.md -- Change Log: change_log.md -- Contributing: contributing.md diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..525a9b81a8 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,102 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "docker" +dynamic = ["version"] +description = "A Python library for the Docker Engine API." +readme = "README.md" +license = "Apache-2.0" +requires-python = ">=3.8" +maintainers = [ + { name = "Docker Inc.", email = "no-reply@docker.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: Other Environment", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Software Development", + "Topic :: Utilities", +] + +dependencies = [ + "requests >= 2.26.0", + "urllib3 >= 1.26.0", + "pywin32>=304; sys_platform == \"win32\"", +] + +[project.optional-dependencies] +# ssh feature allows DOCKER_HOST=ssh://... style connections +ssh = [ + "paramiko>=2.4.3", +] +# tls is always supported, the feature is a no-op for backwards compatibility +tls = [] +# websockets can be used as an alternate container attach mechanism but +# by default docker-py hijacks the TCP connection and does not use Websockets +# unless attach_socket(container, ws=True) is called +websockets = [ + "websocket-client >= 1.3.0", +] +# docs are dependencies required to build the ReadTheDocs site +# this is only needed for CI / working on the docs! +docs = [ + "myst-parser==0.18.0", + "Sphinx==5.1.1", + +] +# dev are dependencies required to test & lint this project +# this is only needed if you are making code changes to docker-py! +dev = [ + "coverage==7.2.7", + "pytest==7.4.2", + "pytest-cov==4.1.0", + "pytest-timeout==2.1.0", + "ruff==0.1.8", +] + +[project.urls] +Changelog = "https://docker-py.readthedocs.io/en/stable/change-log.html" +Documentation = "https://docker-py.readthedocs.io" +Homepage = "https://github.com/docker/docker-py" +Source = "https://github.com/docker/docker-py" +Tracker = "https://github.com/docker/docker-py/issues" + +[tool.hatch.version] +source = "vcs" + +[tool.hatch.build.hooks.vcs] +version-file = "docker/_version.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/docker", +] + +[tool.ruff] +target-version = "py38" +extend-select = [ + "B", + "C", + "F", + "I", + "UP", + "W", +] +ignore = [ + "UP012", # unnecessary `UTF-8` argument (we want to be explicit) + "C901", # too complex (there's a whole bunch of these) +] + +[tool.ruff.per-file-ignores] +"**/__init__.py" = ["F401"] diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..d4f718e782 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +addopts = --tb=short -rxs + +junit_suite_name = docker-py +junit_family = xunit2 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index b23ea488ec..0000000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -requests==2.5.3 -six>=1.3.0 -websocket-client==0.32.0 diff --git a/scripts/release.sh b/scripts/release.sh new file mode 100755 index 0000000000..d9e7a055a1 --- /dev/null +++ b/scripts/release.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Create the official release +# + +VERSION=$1 +REPO=docker/docker-py +GITHUB_REPO=git@github.com:$REPO + +if [ -z $VERSION ]; then + echo "Usage: $0 VERSION [upload]" + exit 1 +fi + +echo "##> Removing stale build files and other untracked files" +git clean -x -d -i +test -z "$(git clean -x -d -n)" || exit 1 + +echo "##> Tagging the release as $VERSION" +git tag $VERSION +if [[ $? != 0 ]]; then + head_commit=$(git show --pretty=format:%H HEAD) + tag_commit=$(git show --pretty=format:%H $VERSION) + if [[ $head_commit != $tag_commit ]]; then + echo "ERROR: tag already exists, but isn't the current HEAD" + exit 1 + fi +fi +if [[ $2 == 'upload' ]]; then + echo "##> Pushing tag to github" + git push $GITHUB_REPO $VERSION || exit 1 +fi + + +echo "##> sdist & wheel" +python setup.py sdist bdist_wheel + +if [[ $2 == 'upload' ]]; then + echo '##> Uploading sdist to pypi' + twine upload dist/docker-$VERSION* +fi diff --git a/scripts/versions.py b/scripts/versions.py new file mode 100755 index 0000000000..75e5355ebf --- /dev/null +++ b/scripts/versions.py @@ -0,0 +1,76 @@ +import operator +import re +from collections import namedtuple + +import requests + +base_url = 'https://download.docker.com/linux/static/{0}/x86_64/' +categories = [ + 'edge', + 'stable', + 'test' +] + +STAGES = ['tp', 'beta', 'rc'] + + +class Version(namedtuple('_Version', 'major minor patch stage edition')): + + @classmethod + def parse(cls, version): + edition = None + version = version.lstrip('v') + version, _, stage = version.partition('-') + if stage: + if not any(marker in stage for marker in STAGES): + edition = stage + stage = None + elif '-' in stage: + edition, stage = stage.split('-', 1) + major, minor, patch = version.split('.', 2) + return cls(major, minor, patch, stage, edition) + + @property + def major_minor(self): + return self.major, self.minor + + @property + def order(self): + """Return a representation that allows this object to be sorted + correctly with the default comparator. + """ + # non-GA releases should appear before GA releases + # Order: tp -> beta -> rc -> GA + if self.stage: + for st in STAGES: + if st in self.stage: + stage = (STAGES.index(st), self.stage) + break + else: + stage = (len(STAGES),) + + return (int(self.major), int(self.minor), int(self.patch)) + stage + + def __str__(self): + stage = f'-{self.stage}' if self.stage else '' + edition = f'-{self.edition}' if self.edition else '' + return '.'.join(map(str, self[:3])) + edition + stage + + +def main(): + results = set() + for url in [base_url.format(cat) for cat in categories]: + res = requests.get(url) + content = res.text + versions = [Version.parse(v) for v in re.findall( + r'"docker-([0-9]+\.[0-9]+\.[0-9]+-?.*)\.tgz"', content + )] + sorted_versions = sorted( + versions, reverse=True, key=operator.attrgetter('order') + ) + latest = sorted_versions[0] + results.add(str(latest)) + print(' '.join(results)) + +if __name__ == '__main__': + main() diff --git a/setup.py b/setup.py deleted file mode 100644 index 485d33c77a..0000000000 --- a/setup.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -import os -import sys -from setuptools import setup - -ROOT_DIR = os.path.dirname(__file__) -SOURCE_DIR = os.path.join(ROOT_DIR) - -requirements = [ - 'requests >= 2.5.2', - 'six >= 1.3.0', -] - -if sys.version_info[0] < 3: - requirements.append('websocket-client >= 0.32.0') - -exec(open('docker/version.py').read()) - -with open('./test-requirements.txt') as test_reqs_txt: - test_requirements = [line for line in test_reqs_txt] - - -setup( - name="docker-py", - version=version, - description="Python client for Docker.", - url='https://github.com/docker/docker-py/', - packages=[ - 'docker', 'docker.auth', 'docker.unixconn', 'docker.utils', - 'docker.utils.ports', 'docker.ssladapter' - ], - install_requires=requirements, - tests_require=test_requirements, - zip_safe=False, - test_suite='tests', - classifiers=[ - 'Development Status :: 4 - Beta', - 'Environment :: Other Environment', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', - 'Topic :: Utilities', - 'License :: OSI Approved :: Apache Software License', - ], -) diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 969f7a2342..0000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -mock==1.0.1 -coverage==3.7.1 diff --git a/tests/Dockerfile b/tests/Dockerfile new file mode 100644 index 0000000000..1d967e563b --- /dev/null +++ b/tests/Dockerfile @@ -0,0 +1,35 @@ +# syntax=docker/dockerfile:1 + +ARG PYTHON_VERSION=3.12 +FROM python:${PYTHON_VERSION} + +RUN apt-get update && apt-get -y install --no-install-recommends \ + gnupg2 \ + pass + +# Add SSH keys and set permissions +COPY tests/ssh/config/client /root/.ssh +COPY tests/ssh/config/server/known_ed25519.pub /root/.ssh/known_hosts +RUN sed -i '1s;^;dpy-dind-ssh ;' /root/.ssh/known_hosts +RUN chmod -R 600 /root/.ssh + +COPY ./tests/gpg-keys /gpg-keys +RUN gpg2 --import gpg-keys/secret +RUN gpg2 --import-ownertrust gpg-keys/ownertrust +RUN yes | pass init $(gpg2 --no-auto-check-trustdb --list-secret-key | awk '/^sec/{getline; $1=$1; print}') +RUN gpg2 --check-trustdb +ARG CREDSTORE_VERSION=v0.6.3 +RUN curl -sSL -o /opt/docker-credential-pass.tar.gz \ + https://github.com/docker/docker-credential-helpers/releases/download/$CREDSTORE_VERSION/docker-credential-pass-$CREDSTORE_VERSION-amd64.tar.gz && \ + tar -xf /opt/docker-credential-pass.tar.gz -O > /usr/local/bin/docker-credential-pass && \ + rm -rf /opt/docker-credential-pass.tar.gz && \ + chmod +x /usr/local/bin/docker-credential-pass + +WORKDIR /src +COPY . . + +ARG VERSION=0.0.0.dev0 +RUN --mount=type=cache,target=/cache/pip \ + PIP_CACHE_DIR=/cache/pip \ + SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ + pip install .[dev,ssh,websockets] diff --git a/tests/Dockerfile-dind-certs b/tests/Dockerfile-dind-certs new file mode 100644 index 0000000000..9d5c58fb2d --- /dev/null +++ b/tests/Dockerfile-dind-certs @@ -0,0 +1,53 @@ +# syntax=docker/dockerfile:1 + +ARG PYTHON_VERSION=3.12 + +FROM python:${PYTHON_VERSION} +RUN mkdir /tmp/certs +VOLUME /certs + +WORKDIR /tmp/certs + +# ---- CA (with proper v3_ca) ---- +RUN openssl genrsa -aes256 -passout pass:foobar -out ca-key.pem 4096 +COPY <<'EOF' /tmp/ca.cnf +[req] +prompt = no +distinguished_name = req_distinguished_name +x509_extensions = v3_ca + +[req_distinguished_name] +countryName = AU + +[v3_ca] +basicConstraints = critical, CA:TRUE +keyUsage = critical, keyCertSign, cRLSign +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +EOF +RUN openssl req -new -x509 -passin pass:foobar -config /tmp/ca.cnf -days 365 -key ca-key.pem -sha256 -out ca.pem + +# ---- Server cert (SAN + KU/EKU) ---- +RUN openssl genrsa -out server-key.pem 4096 +RUN openssl req -subj "/CN=docker" -sha256 -new -key server-key.pem -out server.csr +COPY <<'EOF' /tmp/server-ext.cnf +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = DNS:docker, DNS:localhost +EOF +RUN openssl x509 -req -days 365 -passin pass:foobar -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile /tmp/server-ext.cnf + +# ---- Client cert (KU/EKU) ---- +RUN openssl genrsa -out key.pem 4096 +RUN openssl req -passin pass:foobar -subj '/CN=client' -new -key key.pem -out client.csr +COPY <<'EOF' /tmp/client-ext.cnf +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature +extendedKeyUsage = clientAuth +EOF +RUN openssl x509 -req -passin pass:foobar -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile /tmp/client-ext.cnf +RUN chmod -v 0400 ca-key.pem key.pem server-key.pem +RUN chmod -v 0444 ca.pem server-cert.pem cert.pem + +CMD cp -R /tmp/certs/* /certs && while true; do sleep 1; done diff --git a/tests/Dockerfile-ssh-dind b/tests/Dockerfile-ssh-dind new file mode 100644 index 0000000000..49529f84b7 --- /dev/null +++ b/tests/Dockerfile-ssh-dind @@ -0,0 +1,20 @@ +# syntax=docker/dockerfile:1 + +ARG API_VERSION=1.45 +ARG ENGINE_VERSION=26.1 + +FROM docker:${ENGINE_VERSION}-dind + +RUN apk add --no-cache --upgrade \ + openssh + +COPY tests/ssh/config/server /etc/ssh/ + +# set authorized keys for client paswordless connection +COPY tests/ssh/config/client/id_rsa.pub /root/.ssh/authorized_keys + +# RUN echo "root:root" | chpasswd +RUN chmod -R 600 /etc/ssh \ + && chmod -R 600 /root/.ssh \ + && ln -s /usr/local/bin/docker /usr/bin/docker +EXPOSE 22 diff --git a/tests/base.py b/tests/base.py deleted file mode 100644 index 1d5a370d5e..0000000000 --- a/tests/base.py +++ /dev/null @@ -1,11 +0,0 @@ -import sys -import unittest - -import six - - -class BaseTestCase(unittest.TestCase): - def assertIn(self, object, collection): - if six.PY2 and sys.version_info[1] <= 6: - return self.assertTrue(object in collection) - return super(BaseTestCase, self).assertIn(object, collection) diff --git a/tests/fake_api.py b/tests/fake_api.py deleted file mode 100644 index d201838e71..0000000000 --- a/tests/fake_api.py +++ /dev/null @@ -1,467 +0,0 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fake_stat - -CURRENT_VERSION = 'v1.19' - -FAKE_CONTAINER_ID = '3cc2351ab11b' -FAKE_IMAGE_ID = 'e9aa60c60128' -FAKE_EXEC_ID = 'd5d177f121dc' -FAKE_IMAGE_NAME = 'test_image' -FAKE_TARBALL_PATH = '/path/to/tarball' -FAKE_REPO_NAME = 'repo' -FAKE_TAG_NAME = 'tag' -FAKE_FILE_NAME = 'file' -FAKE_URL = 'myurl' -FAKE_PATH = '/path' - -# Each method is prefixed with HTTP method (get, post...) -# for clarity and readability - - -def get_fake_raw_version(): - status_code = 200 - response = { - "ApiVersion": "1.18", - "GitCommit": "fake-commit", - "GoVersion": "go1.3.3", - "Version": "1.5.0" - } - return status_code, response - - -def get_fake_version(): - status_code = 200 - response = {'GoVersion': '1', 'Version': '1.1.1', - 'GitCommit': 'deadbeef+CHANGES'} - return status_code, response - - -def get_fake_info(): - status_code = 200 - response = {'Containers': 1, 'Images': 1, 'Debug': False, - 'MemoryLimit': False, 'SwapLimit': False, - 'IPv4Forwarding': True} - return status_code, response - - -def get_fake_search(): - status_code = 200 - response = [{'Name': 'busybox', 'Description': 'Fake Description'}] - return status_code, response - - -def get_fake_images(): - status_code = 200 - response = [{ - 'Id': FAKE_IMAGE_ID, - 'Created': '2 days ago', - 'Repository': 'busybox', - 'RepoTags': ['busybox:latest', 'busybox:1.0'], - }] - return status_code, response - - -def get_fake_image_history(): - status_code = 200 - response = [ - { - "Id": "b750fe79269d", - "Created": 1364102658, - "CreatedBy": "/bin/bash" - }, - { - "Id": "27cf78414709", - "Created": 1364068391, - "CreatedBy": "" - } - ] - - return status_code, response - - -def post_fake_import_image(): - status_code = 200 - response = 'Import messages...' - - return status_code, response - - -def get_fake_containers(): - status_code = 200 - response = [{ - 'Id': FAKE_CONTAINER_ID, - 'Image': 'busybox:latest', - 'Created': '2 days ago', - 'Command': 'true', - 'Status': 'fake status' - }] - return status_code, response - - -def post_fake_start_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_resize_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_create_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def get_fake_inspect_container(): - status_code = 200 - response = { - 'Id': FAKE_CONTAINER_ID, - 'Config': {'Privileged': True}, - 'ID': FAKE_CONTAINER_ID, - 'Image': 'busybox:latest', - "State": { - "Running": True, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-09-25T14:01:18.869545111+02:00", - "Ghost": False - }, - "MacAddress": "02:42:ac:11:00:0a" - } - return status_code, response - - -def get_fake_inspect_image(): - status_code = 200 - response = { - 'id': FAKE_IMAGE_ID, - 'parent': "27cf784147099545", - 'created': "2013-03-23T22:24:18.818426-07:00", - 'container': FAKE_CONTAINER_ID, - 'container_config': - { - "Hostname": "", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": False, - "AttachStdout": False, - "AttachStderr": False, - "PortSpecs": "", - "Tty": True, - "OpenStdin": True, - "StdinOnce": False, - "Env": "", - "Cmd": ["/bin/bash"], - "Dns": "", - "Image": "base", - "Volumes": "", - "VolumesFrom": "", - "WorkingDir": "" - }, - 'Size': 6823592 - } - return status_code, response - - -def get_fake_port(): - status_code = 200 - response = { - 'HostConfig': { - 'Binds': None, - 'ContainerIDFile': '', - 'Links': None, - 'LxcConf': None, - 'PortBindings': { - '1111': None, - '1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}], - '2222': None - }, - 'Privileged': False, - 'PublishAllPorts': False - }, - 'NetworkSettings': { - 'Bridge': 'docker0', - 'PortMapping': None, - 'Ports': { - '1111': None, - '1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}], - '2222': None}, - 'MacAddress': '02:42:ac:11:00:0a' - } - } - return status_code, response - - -def get_fake_insert_image(): - status_code = 200 - response = {'StatusCode': 0} - return status_code, response - - -def get_fake_wait(): - status_code = 200 - response = {'StatusCode': 0} - return status_code, response - - -def get_fake_logs(): - status_code = 200 - response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n' - b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n') - return status_code, response - - -def get_fake_diff(): - status_code = 200 - response = [{'Path': '/test', 'Kind': 1}] - return status_code, response - - -def get_fake_events(): - status_code = 200 - response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID, - 'from': FAKE_IMAGE_ID, 'time': 1423247867}] - return status_code, response - - -def get_fake_export(): - status_code = 200 - response = 'Byte Stream....' - return status_code, response - - -def post_fake_exec_create(): - status_code = 200 - response = {'Id': FAKE_EXEC_ID} - return status_code, response - - -def post_fake_exec_start(): - status_code = 200 - response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n' - b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n' - b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n') - return status_code, response - - -def post_fake_exec_resize(): - status_code = 201 - return status_code, '' - - -def get_fake_exec_inspect(): - return 200, { - 'OpenStderr': True, - 'OpenStdout': True, - 'Container': get_fake_inspect_container()[1], - 'Running': False, - 'ProcessConfig': { - 'arguments': ['hello world'], - 'tty': False, - 'entrypoint': 'echo', - 'privileged': False, - 'user': '' - }, - 'ExitCode': 0, - 'ID': FAKE_EXEC_ID, - 'OpenStdin': False - } - - -def post_fake_stop_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_kill_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_pause_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_unpause_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_restart_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_rename_container(): - status_code = 204 - return status_code, None - - -def delete_fake_remove_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_image_create(): - status_code = 200 - response = {'Id': FAKE_IMAGE_ID} - return status_code, response - - -def delete_fake_remove_image(): - status_code = 200 - response = {'Id': FAKE_IMAGE_ID} - return status_code, response - - -def get_fake_get_image(): - status_code = 200 - response = 'Byte Stream....' - return status_code, response - - -def post_fake_load_image(): - status_code = 200 - response = {'Id': FAKE_IMAGE_ID} - return status_code, response - - -def post_fake_commit(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_push(): - status_code = 200 - response = {'Id': FAKE_IMAGE_ID} - return status_code, response - - -def post_fake_build_container(): - status_code = 200 - response = {'Id': FAKE_CONTAINER_ID} - return status_code, response - - -def post_fake_tag_image(): - status_code = 200 - response = {'Id': FAKE_IMAGE_ID} - return status_code, response - - -def get_fake_stats(): - status_code = 200 - response = fake_stat.OBJ - return status_code, response - -# Maps real api url to fake response callback -prefix = 'http+docker://localunixsocket' -fake_responses = { - '{0}/version'.format(prefix): - get_fake_raw_version, - '{1}/{0}/version'.format(CURRENT_VERSION, prefix): - get_fake_version, - '{1}/{0}/info'.format(CURRENT_VERSION, prefix): - get_fake_info, - '{1}/{0}/images/search'.format(CURRENT_VERSION, prefix): - get_fake_search, - '{1}/{0}/images/json'.format(CURRENT_VERSION, prefix): - get_fake_images, - '{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix): - get_fake_image_history, - '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix): - post_fake_import_image, - '{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix): - get_fake_containers, - '{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix): - post_fake_start_container, - '{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix): - post_fake_resize_container, - '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix): - get_fake_inspect_container, - '{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix): - post_fake_rename_container, - '{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix): - post_fake_tag_image, - '{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix): - get_fake_wait, - '{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix): - get_fake_logs, - '{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix): - get_fake_diff, - '{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix): - get_fake_export, - '{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix): - post_fake_exec_create, - '{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix): - post_fake_exec_start, - '{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix): - get_fake_exec_inspect, - '{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix): - post_fake_exec_resize, - - '{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix): - get_fake_stats, - '{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix): - post_fake_stop_container, - '{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix): - post_fake_kill_container, - '{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix): - post_fake_pause_container, - '{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix): - post_fake_unpause_container, - '{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix): - get_fake_port, - '{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix): - post_fake_restart_container, - '{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix): - delete_fake_remove_container, - '{1}/{0}/images/create'.format(CURRENT_VERSION, prefix): - post_fake_image_create, - '{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix): - delete_fake_remove_image, - '{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix): - get_fake_get_image, - '{1}/{0}/images/load'.format(CURRENT_VERSION, prefix): - post_fake_load_image, - '{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix): - get_fake_inspect_image, - '{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix): - get_fake_insert_image, - '{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix): - post_fake_push, - '{1}/{0}/commit'.format(CURRENT_VERSION, prefix): - post_fake_commit, - '{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix): - post_fake_create_container, - '{1}/{0}/build'.format(CURRENT_VERSION, prefix): - post_fake_build_container, - '{1}/{0}/events'.format(CURRENT_VERSION, prefix): - get_fake_events -} diff --git a/tests/gpg-keys/ownertrust b/tests/gpg-keys/ownertrust new file mode 100644 index 0000000000..141ea57e8d --- /dev/null +++ b/tests/gpg-keys/ownertrust @@ -0,0 +1,3 @@ +# List of assigned trustvalues, created Wed 25 Apr 2018 01:28:17 PM PDT +# (Use "gpg --import-ownertrust" to restore them) +9781B87DAB042E6FD51388A5464ED987A7B21401:6: diff --git a/tests/gpg-keys/secret b/tests/gpg-keys/secret new file mode 100644 index 0000000000..412294db84 Binary files /dev/null and b/tests/gpg-keys/secret differ diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 0000000000..3d60a3faf9 --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,160 @@ +import functools +import os +import os.path +import random +import re +import socket +import tarfile +import tempfile +import time + +import paramiko +import pytest + +import docker + + +def make_tree(dirs, files): + base = tempfile.mkdtemp() + + for path in dirs: + os.makedirs(os.path.join(base, path)) + + for path in files: + with open(os.path.join(base, path), 'w') as f: + f.write("content") + + return base + + +def simple_tar(path): + f = tempfile.NamedTemporaryFile() + t = tarfile.open(mode='w', fileobj=f) + + abs_path = os.path.abspath(path) + t.add(abs_path, arcname=os.path.basename(path), recursive=False) + + t.close() + f.seek(0) + return f + + +def untar_file(tardata, filename): + with tarfile.open(mode='r', fileobj=tardata) as t: + f = t.extractfile(filename) + result = f.read() + f.close() + return result + + +def skip_if_desktop(): + def fn(f): + @functools.wraps(f) + def wrapped(self, *args, **kwargs): + info = self.client.info() + if info['Name'] == 'docker-desktop': + pytest.skip('Test does not support Docker Desktop') + return f(self, *args, **kwargs) + + return wrapped + + return fn + +def requires_api_version(version): + test_version = os.environ.get( + 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION + ) + + return pytest.mark.skipif( + docker.utils.version_lt(test_version, version), + reason=f"API version is too low (< {version})" + ) + + +def requires_experimental(until=None): + test_version = os.environ.get( + 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION + ) + + def req_exp(f): + @functools.wraps(f) + def wrapped(self, *args, **kwargs): + if not self.client.info()['ExperimentalBuild']: + pytest.skip('Feature requires Docker Engine experimental mode') + return f(self, *args, **kwargs) + + if until and docker.utils.version_gte(test_version, until): + return f + return wrapped + + return req_exp + + +def wait_on_condition(condition, delay=0.1, timeout=40): + start_time = time.time() + while not condition(): + if time.time() - start_time > timeout: + raise AssertionError(f"Timeout: {condition}") + time.sleep(delay) + + +def random_name(): + return f'dockerpytest_{random.getrandbits(64):x}' + + +def force_leave_swarm(client): + """Actually force leave a Swarm. There seems to be a bug in Swarm that + occasionally throws "context deadline exceeded" errors when leaving.""" + while True: + try: + if isinstance(client, docker.DockerClient): + return client.swarm.leave(force=True) + return client.leave_swarm(force=True) # elif APIClient + except docker.errors.APIError as e: + if e.explanation == "context deadline exceeded": + continue + else: + return + + +def swarm_listen_addr(): + return f'0.0.0.0:{random.randrange(10000, 25000)}' + + +def assert_cat_socket_detached_with_keys(sock, inputs): + if hasattr(sock, '_sock'): + sock = sock._sock + + for i in inputs: + sock.sendall(i) + time.sleep(0.5) + + # If we're using a Unix socket, the sock.send call will fail with a + # BrokenPipeError ; INET sockets will just stop receiving / sending data + # but will not raise an error + if isinstance(sock, paramiko.Channel): + with pytest.raises(OSError): + sock.sendall(b'make sure the socket is closed\n') + else: + if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1): + # We do not want to use pytest.raises here because future versions + # of the daemon no longer cause this to raise an error. + try: + sock.sendall(b'make sure the socket is closed\n') + except OSError: + return + + sock.sendall(b"make sure the socket is closed\n") + data = sock.recv(128) + # New in 18.06: error message is broadcast over the socket when reading + # after detach + assert data == b'' or data.startswith( + b'exec attach failed: error on attach stdin: read escape sequence' + ) + + +def ctrl_with(char): + if re.match('[a-z]', char): + return chr(ord(char) - ord('a') + 1).encode('ascii') + else: + raise Exception('char must be [a-z]') diff --git a/tests/testdata/certs/cert.pem b/tests/integration/__init__.py similarity index 100% rename from tests/testdata/certs/cert.pem rename to tests/integration/__init__.py diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py new file mode 100644 index 0000000000..0f560159b3 --- /dev/null +++ b/tests/integration/api_build_test.py @@ -0,0 +1,597 @@ +import io +import os +import shutil +import tempfile + +import pytest + +from docker import errors +from docker.utils.proxy import ProxyConfig + +from ..helpers import random_name, requires_api_version, requires_experimental +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class BuildTest(BaseAPIIntegrationTest): + def test_build_with_proxy(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=a"', + 'RUN env | grep "ftp_proxy=a"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build(fileobj=script, decode=True) + + def test_build_with_proxy_and_buildargs(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=XXX"', + 'RUN env | grep "ftp_proxy=xxx"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build( + fileobj=script, + decode=True, + buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'} + ) + + def test_build_streaming(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN mkdir -p /tmp/test', + 'EXPOSE 8080', + 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' + ' /tmp/silence.tar.gz' + ]).encode('ascii')) + stream = self.client.build(fileobj=script, decode=True) + logs = [] + for chunk in stream: + logs.append(chunk) + assert len(logs) > 0 + + def test_build_from_stringio(self): + return + script = io.StringIO('\n'.join([ + 'FROM busybox', + 'RUN mkdir -p /tmp/test', + 'EXPOSE 8080', + 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' + ' /tmp/silence.tar.gz' + ])) + stream = self.client.build(fileobj=script) + logs = '' + for chunk in stream: + chunk = chunk.decode('utf-8') + logs += chunk + assert logs != '' + + def test_build_with_dockerignore(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write("\n".join([ + 'FROM busybox', + 'ADD . /test', + ])) + + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write("\n".join([ + 'ignored', + 'Dockerfile', + '.dockerignore', + ' ignored-with-spaces ', # check that spaces are trimmed + '!ignored/subdir/excepted-file', + '! ignored/subdir/excepted-with-spaces ' + '', # empty line, + '#*', # comment line + ])) + + with open(os.path.join(base_dir, 'not-ignored'), 'w') as f: + f.write("this file should not be ignored") + + with open(os.path.join(base_dir, '#file.txt'), 'w') as f: + f.write('this file should not be ignored') + + with open(os.path.join(base_dir, 'ignored-with-spaces'), 'w') as f: + f.write("this file should be ignored") + + subdir = os.path.join(base_dir, 'ignored', 'subdir') + os.makedirs(subdir) + with open(os.path.join(subdir, 'file'), 'w') as f: + f.write("this file should be ignored") + + with open(os.path.join(subdir, 'excepted-file'), 'w') as f: + f.write("this file should not be ignored") + + with open(os.path.join(subdir, 'excepted-with-spaces'), 'w') as f: + f.write("this file should not be ignored") + + tag = 'docker-py-test-build-with-dockerignore' + stream = self.client.build( + path=base_dir, + tag=tag, + ) + for _chunk in stream: + pass + + c = self.client.create_container(tag, ['find', '/test', '-type', 'f']) + self.client.start(c) + self.client.wait(c) + logs = self.client.logs(c) + + logs = logs.decode('utf-8') + + assert sorted(filter(None, logs.split('\n'))) == sorted([ + '/test/#file.txt', + '/test/ignored/subdir/excepted-with-spaces', + '/test/ignored/subdir/excepted-file', + '/test/not-ignored' + ]) + + def test_build_with_buildargs(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'ARG test', + 'USER $test' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag='buildargs', buildargs={'test': 'OK'} + ) + self.tmp_imgs.append('buildargs') + for _chunk in stream: + pass + + info = self.client.inspect_image('buildargs') + assert info['Config']['User'] == 'OK' + + @requires_api_version('1.22') + def test_build_shmsize(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'CMD sh -c "echo \'Hello, World!\'"', + ]).encode('ascii')) + + tag = 'shmsize' + shmsize = 134217728 + + stream = self.client.build( + fileobj=script, tag=tag, shmsize=shmsize + ) + self.tmp_imgs.append(tag) + for _chunk in stream: + pass + + # There is currently no way to get the shmsize + # that was used to build the image + + @requires_api_version('1.24') + def test_build_isolation(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'CMD sh -c "echo \'Deaf To All But The Song\'' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag='isolation', + isolation='default' + ) + + for _chunk in stream: + pass + + @requires_api_version('1.23') + def test_build_labels(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + ]).encode('ascii')) + + labels = {'test': 'OK'} + + stream = self.client.build( + fileobj=script, tag='labels', labels=labels + ) + self.tmp_imgs.append('labels') + for _chunk in stream: + pass + + info = self.client.inspect_image('labels') + assert info['Config']['Labels'] == labels + + @requires_api_version('1.25') + def test_build_with_cache_from(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'ENV FOO=bar', + 'RUN touch baz', + 'RUN touch bax', + ]).encode('ascii')) + + stream = self.client.build(fileobj=script, tag='build1') + self.tmp_imgs.append('build1') + for _chunk in stream: + pass + + stream = self.client.build( + fileobj=script, tag='build2', cache_from=['build1'], + decode=True + ) + self.tmp_imgs.append('build2') + counter = 0 + for chunk in stream: + if 'Using cache' in chunk.get('stream', ''): + counter += 1 + assert counter == 3 + self.client.remove_image('build2') + + counter = 0 + stream = self.client.build( + fileobj=script, tag='build2', cache_from=['nosuchtag'], + decode=True + ) + for chunk in stream: + if 'Using cache' in chunk.get('stream', ''): + counter += 1 + assert counter == 0 + + @requires_api_version('1.29') + def test_build_container_with_target(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox as first', + 'RUN mkdir -p /tmp/test', + 'RUN touch /tmp/silence.tar.gz', + 'FROM alpine:latest', + 'WORKDIR /root/' + 'COPY --from=first /tmp/silence.tar.gz .', + 'ONBUILD RUN echo "This should not be in the final image"' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, target='first', tag='build1' + ) + self.tmp_imgs.append('build1') + for _chunk in stream: + pass + + info = self.client.inspect_image('build1') + assert 'OnBuild' not in info['Config'] or not info['Config']['OnBuild'] + + @requires_api_version('1.25') + def test_build_with_network_mode(self): + # Set up pingable endpoint on custom network + network = self.client.create_network(random_name())['Id'] + self.tmp_networks.append(network) + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + self.client.start(container) + self.client.connect_container_to_network( + container, network, aliases=['pingtarget.docker'] + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN ping -c1 pingtarget.docker' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, network_mode=network, + tag='dockerpytest_customnetbuild' + ) + + self.tmp_imgs.append('dockerpytest_customnetbuild') + for _chunk in stream: + pass + + assert self.client.inspect_image('dockerpytest_customnetbuild') + + script.seek(0) + stream = self.client.build( + fileobj=script, network_mode='none', + tag='dockerpytest_nonebuild', nocache=True, decode=True + ) + + self.tmp_imgs.append('dockerpytest_nonebuild') + logs = list(stream) + assert 'errorDetail' in logs[-1] + assert logs[-1]['errorDetail']['code'] == 1 + + with pytest.raises(errors.NotFound): + self.client.inspect_image('dockerpytest_nonebuild') + + @requires_api_version('1.27') + def test_build_with_extra_hosts(self): + img_name = 'dockerpytest_extrahost_build' + self.tmp_imgs.append(img_name) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN ping -c1 hello.world.test', + 'RUN ping -c1 extrahost.local.test', + 'RUN cp /etc/hosts /hosts-file' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag=img_name, + extra_hosts={ + 'extrahost.local.test': '127.0.0.1', + 'hello.world.test': '127.0.0.1', + }, decode=True + ) + for chunk in stream: + if 'errorDetail' in chunk: + pytest.fail(chunk) + + assert self.client.inspect_image(img_name) + ctnr = self.run_container(img_name, 'cat /hosts-file') + logs = self.client.logs(ctnr) + logs = logs.decode('utf-8') + assert '127.0.0.1\textrahost.local.test' in logs + assert '127.0.0.1\thello.world.test' in logs + + @requires_experimental(until=None) + @requires_api_version('1.25') + def test_build_squash(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN echo blah > /file_1', + 'RUN echo blahblah > /file_2', + 'RUN echo blahblahblah > /file_3' + ]).encode('ascii')) + + def build_squashed(squash): + tag = 'squash' if squash else 'nosquash' + stream = self.client.build( + fileobj=script, tag=tag, squash=squash + ) + self.tmp_imgs.append(tag) + for _chunk in stream: + pass + + return self.client.inspect_image(tag) + + non_squashed = build_squashed(False) + squashed = build_squashed(True) + assert len(non_squashed['RootFS']['Layers']) == 4 + assert len(squashed['RootFS']['Layers']) == 2 + + def test_build_stderr_data(self): + control_chars = ['\x1b[91m', '\x1b[0m'] + snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)' + script = io.BytesIO(b'\n'.join([ + b'FROM busybox', + f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8') + ])) + + stream = self.client.build( + fileobj=script, decode=True, nocache=True + ) + lines = [] + for chunk in stream: + lines.append(chunk.get('stream')) + expected = f'{control_chars[0]}{snippet}\n{control_chars[1]}' + assert any(line == expected for line in lines) + + def test_build_gzip_encoding(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write("\n".join([ + 'FROM busybox', + 'ADD . /test', + ])) + + stream = self.client.build( + path=base_dir, decode=True, nocache=True, + gzip=True + ) + + lines = [] + for chunk in stream: + lines.append(chunk) + + assert 'Successfully built' in lines[-1]['stream'] + + def test_build_with_dockerfile_empty_lines(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write('FROM busybox\n') + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write('\n'.join([ + ' ', + '', + '\t\t', + '\t ', + ])) + + stream = self.client.build( + path=base_dir, decode=True, nocache=True + ) + + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully built' in lines[-1]['stream'] + + def test_build_gzip_custom_encoding(self): + with pytest.raises(errors.DockerException): + self.client.build(path='.', gzip=True, encoding='text/html') + + @requires_api_version('1.32') + @requires_experimental(until=None) + def test_build_invalid_platform(self): + script = io.BytesIO(b'FROM busybox\n') + + with pytest.raises(errors.APIError) as excinfo: + stream = self.client.build(fileobj=script, platform='foobar') + for _ in stream: + pass + + # Some API versions incorrectly returns 500 status; assert 4xx or 5xx + assert excinfo.value.is_error() + assert 'unknown operating system' in excinfo.exconly() \ + or 'invalid platform' in excinfo.exconly() + + def test_build_out_of_context_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write('.dockerignore\n') + df_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, df_dir) + df_name = os.path.join(df_dir, 'Dockerfile') + with open(df_name, 'wb') as df: + df.write(('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])).encode('utf-8')) + df.flush() + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile=df_name, tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 3 + assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata) + + def test_build_in_context_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile='custom.dockerfile', tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'custom.dockerfile'] + ) == sorted(lsdata) + + def test_build_in_context_nested_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + subdir = os.path.join(base_dir, 'hello', 'world') + os.makedirs(subdir) + with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile='hello/world/custom.dockerfile', + tag=img_name, decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'hello'] + ) == sorted(lsdata) + + def test_build_in_context_abs_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile') + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(abs_dockerfile_path, 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'custom.dockerfile'] + ) == sorted(lsdata) + + @requires_api_version('1.31') + @pytest.mark.xfail( + True, + reason='Currently fails on 18.09: ' + 'https://github.com/moby/moby/issues/37920' + ) + def test_prune_builds(self): + prune_result = self.client.prune_builds() + assert 'SpaceReclaimed' in prune_result + assert isinstance(prune_result['SpaceReclaimed'], int) diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py new file mode 100644 index 0000000000..ae71a57bf9 --- /dev/null +++ b/tests/integration/api_client_test.py @@ -0,0 +1,75 @@ +import time +import unittest +import warnings + +import docker +from docker.utils import kwargs_from_env + +from .base import BaseAPIIntegrationTest + + +class InformationTest(BaseAPIIntegrationTest): + def test_version(self): + res = self.client.version() + assert 'GoVersion' in res + assert 'Version' in res + + def test_info(self): + res = self.client.info() + assert 'Containers' in res + assert 'Images' in res + assert 'Debug' in res + + +class AutoDetectVersionTest(unittest.TestCase): + def test_client_init(self): + client = docker.APIClient(version='auto', **kwargs_from_env()) + client_version = client._version + api_version = client.version(api_version=False)['ApiVersion'] + assert client_version == api_version + api_version_2 = client.version()['ApiVersion'] + assert client_version == api_version_2 + client.close() + + +class ConnectionTimeoutTest(unittest.TestCase): + def setUp(self): + self.timeout = 0.5 + self.client = docker.api.APIClient( + version=docker.constants.MINIMUM_DOCKER_API_VERSION, + base_url='http://192.168.10.2:4243', + timeout=self.timeout + ) + + def test_timeout(self): + start = time.time() + res = None + # This call isn't supposed to complete, and it should fail fast. + try: + res = self.client.inspect_container('id') + except Exception: + pass + end = time.time() + assert res is None + assert end - start < 2 * self.timeout + + +class UnixconnTest(unittest.TestCase): + """ + Test UNIX socket connection adapter. + """ + + def test_resource_warnings(self): + """ + Test no warnings are produced when using the client. + """ + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + client = docker.APIClient(version='auto', **kwargs_from_env()) + client.images() + client.close() + del client + + assert len(w) == 0, f"No warnings produced: {w[0].message}" diff --git a/tests/integration/api_config_test.py b/tests/integration/api_config_test.py new file mode 100644 index 0000000000..4261599d84 --- /dev/null +++ b/tests/integration/api_config_test.py @@ -0,0 +1,84 @@ +import pytest + +import docker + +from ..helpers import force_leave_swarm, requires_api_version +from .base import BaseAPIIntegrationTest + + +@requires_api_version('1.30') +class ConfigAPITest(BaseAPIIntegrationTest): + @classmethod + def setup_class(cls): + client = cls.get_client_instance() + force_leave_swarm(client) + cls._init_swarm(client) + + @classmethod + def teardown_class(cls): + client = cls.get_client_instance() + force_leave_swarm(client) + + def test_create_config(self): + config_id = self.client.create_config( + 'favorite_character', 'sakuya izayoi' + ) + self.tmp_configs.append(config_id) + assert 'ID' in config_id + data = self.client.inspect_config(config_id) + assert data['Spec']['Name'] == 'favorite_character' + + def test_create_config_unicode_data(self): + config_id = self.client.create_config( + 'favorite_character', 'いざよいさくや' + ) + self.tmp_configs.append(config_id) + assert 'ID' in config_id + data = self.client.inspect_config(config_id) + assert data['Spec']['Name'] == 'favorite_character' + + def test_inspect_config(self): + config_name = 'favorite_character' + config_id = self.client.create_config( + config_name, 'sakuya izayoi' + ) + self.tmp_configs.append(config_id) + data = self.client.inspect_config(config_id) + assert data['Spec']['Name'] == config_name + assert 'ID' in data + assert 'Version' in data + + def test_remove_config(self): + config_name = 'favorite_character' + config_id = self.client.create_config( + config_name, 'sakuya izayoi' + ) + self.tmp_configs.append(config_id) + + assert self.client.remove_config(config_id) + with pytest.raises(docker.errors.NotFound): + self.client.inspect_config(config_id) + + def test_list_configs(self): + config_name = 'favorite_character' + config_id = self.client.create_config( + config_name, 'sakuya izayoi' + ) + self.tmp_configs.append(config_id) + + data = self.client.configs(filters={'name': ['favorite_character']}) + assert len(data) == 1 + assert data[0]['ID'] == config_id['ID'] + + @requires_api_version('1.37') + def test_create_config_with_templating(self): + config_id = self.client.create_config( + 'favorite_character', 'sakuya izayoi', + templating={'name': 'golang'} + ) + self.tmp_configs.append(config_id) + assert 'ID' in config_id + data = self.client.inspect_config(config_id) + assert data['Spec']['Name'] == 'favorite_character' + assert 'Templating' in data['Spec'] + assert data['Spec']['Templating']['Name'] == 'golang' diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py new file mode 100644 index 0000000000..21c2f35797 --- /dev/null +++ b/tests/integration/api_container_test.py @@ -0,0 +1,1624 @@ +import os +import re +import signal +import tempfile +import threading +from datetime import datetime + +import pytest +import requests + +import docker +from docker.constants import IS_WINDOWS_PLATFORM +from docker.utils.socket import next_frame_header, read_exactly + +from .. import helpers +from ..helpers import ( + assert_cat_socket_detached_with_keys, + ctrl_with, + requires_api_version, + skip_if_desktop, +) +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class ListContainersTest(BaseAPIIntegrationTest): + def test_list_containers(self): + res0 = self.client.containers(all=True) + size = len(res0) + res1 = self.client.create_container(TEST_IMG, 'true') + assert 'Id' in res1 + self.client.start(res1['Id']) + self.tmp_containers.append(res1['Id']) + res2 = self.client.containers(all=True) + assert size + 1 == len(res2) + retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])] + assert len(retrieved) == 1 + retrieved = retrieved[0] + assert 'Command' in retrieved + assert retrieved['Command'] == 'true' + assert 'Image' in retrieved + assert re.search(r'alpine:.*', retrieved['Image']) + assert 'Status' in retrieved + + +class CreateContainerTest(BaseAPIIntegrationTest): + + def test_create(self): + res = self.client.create_container(TEST_IMG, 'true') + assert 'Id' in res + self.tmp_containers.append(res['Id']) + + def test_create_with_host_pid_mode(self): + ctnr = self.client.create_container( + TEST_IMG, 'true', host_config=self.client.create_host_config( + pid_mode='host', network_mode='none' + ) + ) + assert 'Id' in ctnr + self.tmp_containers.append(ctnr['Id']) + self.client.start(ctnr) + inspect = self.client.inspect_container(ctnr) + assert 'HostConfig' in inspect + host_config = inspect['HostConfig'] + assert 'PidMode' in host_config + assert host_config['PidMode'] == 'host' + + def test_create_with_links(self): + res0 = self.client.create_container( + TEST_IMG, 'cat', + detach=True, stdin_open=True, + environment={'FOO': '1'}) + + container1_id = res0['Id'] + self.tmp_containers.append(container1_id) + + self.client.start(container1_id) + + res1 = self.client.create_container( + TEST_IMG, 'cat', + detach=True, stdin_open=True, + environment={'FOO': '1'}) + + container2_id = res1['Id'] + self.tmp_containers.append(container2_id) + + self.client.start(container2_id) + + # we don't want the first / + link_path1 = self.client.inspect_container(container1_id)['Name'][1:] + link_alias1 = 'mylink1' + link_env_prefix1 = link_alias1.upper() + + link_path2 = self.client.inspect_container(container2_id)['Name'][1:] + link_alias2 = 'mylink2' + link_env_prefix2 = link_alias2.upper() + + res2 = self.client.create_container( + TEST_IMG, 'env', host_config=self.client.create_host_config( + links={link_path1: link_alias1, link_path2: link_alias2}, + network_mode='bridge' + ) + ) + container3_id = res2['Id'] + self.tmp_containers.append(container3_id) + self.client.start(container3_id) + assert self.client.wait(container3_id)['StatusCode'] == 0 + + logs = self.client.logs(container3_id).decode('utf-8') + assert f'{link_env_prefix1}_NAME=' in logs + assert f'{link_env_prefix1}_ENV_FOO=1' in logs + assert f'{link_env_prefix2}_NAME=' in logs + assert f'{link_env_prefix2}_ENV_FOO=1' in logs + + def test_create_with_restart_policy(self): + container = self.client.create_container( + TEST_IMG, ['sleep', '2'], + host_config=self.client.create_host_config( + restart_policy={"Name": "always", "MaximumRetryCount": 0}, + network_mode='none' + ) + ) + id = container['Id'] + self.client.start(id) + self.client.wait(id) + with pytest.raises(docker.errors.APIError) as exc: + self.client.remove_container(id) + err = exc.value.explanation.lower() + assert 'stop the container before' in err + self.client.remove_container(id, force=True) + + def test_create_container_with_volumes_from(self): + vol_names = ['foobar_vol0', 'foobar_vol1'] + + res0 = self.client.create_container( + TEST_IMG, 'true', name=vol_names[0] + ) + container1_id = res0['Id'] + self.tmp_containers.append(container1_id) + self.client.start(container1_id) + + res1 = self.client.create_container( + TEST_IMG, 'true', name=vol_names[1] + ) + container2_id = res1['Id'] + self.tmp_containers.append(container2_id) + self.client.start(container2_id) + + res = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True, + host_config=self.client.create_host_config( + volumes_from=vol_names, network_mode='none' + ) + ) + container3_id = res['Id'] + self.tmp_containers.append(container3_id) + self.client.start(container3_id) + + info = self.client.inspect_container(res['Id']) + assert len(info['HostConfig']['VolumesFrom']) == len(vol_names) + + def create_container_readonly_fs(self): + ctnr = self.client.create_container( + TEST_IMG, ['mkdir', '/shrine'], + host_config=self.client.create_host_config( + read_only=True, network_mode='none' + ) + ) + assert 'Id' in ctnr + self.tmp_containers.append(ctnr['Id']) + self.client.start(ctnr) + res = self.client.wait(ctnr)['StatusCode'] + assert res != 0 + + def create_container_with_name(self): + res = self.client.create_container(TEST_IMG, 'true', name='foobar') + assert 'Id' in res + self.tmp_containers.append(res['Id']) + inspect = self.client.inspect_container(res['Id']) + assert 'Name' in inspect + assert '/foobar' == inspect['Name'] + + def create_container_privileged(self): + res = self.client.create_container( + TEST_IMG, 'true', host_config=self.client.create_host_config( + privileged=True, network_mode='none' + ) + ) + assert 'Id' in res + self.tmp_containers.append(res['Id']) + self.client.start(res['Id']) + inspect = self.client.inspect_container(res['Id']) + assert 'Config' in inspect + assert 'Id' in inspect + assert inspect['Id'].startswith(res['Id']) + assert 'Image' in inspect + assert 'State' in inspect + assert 'Running' in inspect['State'] + if not inspect['State']['Running']: + assert 'ExitCode' in inspect['State'] + assert inspect['State']['ExitCode'] == 0 + # Since Nov 2013, the Privileged flag is no longer part of the + # container's config exposed via the API (safety concerns?). + # + if 'Privileged' in inspect['Config']: + assert inspect['Config']['Privileged'] is True + + def test_create_with_mac_address(self): + mac_address_expected = "02:42:ac:11:00:0a" + container = self.client.create_container( + TEST_IMG, ['sleep', '60'], mac_address=mac_address_expected) + + id = container['Id'] + + self.client.start(container) + res = self.client.inspect_container(container['Id']) + assert mac_address_expected == res['NetworkSettings']['MacAddress'] + + self.client.kill(id) + + @requires_api_version('1.41') + def test_create_with_cgroupns(self): + host_config = self.client.create_host_config(cgroupns='private') + + container = self.client.create_container( + image=TEST_IMG, + command=['sleep', '60'], + host_config=host_config, + ) + self.tmp_containers.append(container) + + res = self.client.inspect_container(container) + assert 'private' == res['HostConfig']['CgroupnsMode'] + + def test_group_id_ints(self): + container = self.client.create_container( + TEST_IMG, 'id -G', + host_config=self.client.create_host_config(group_add=[1000, 1001]) + ) + self.tmp_containers.append(container) + self.client.start(container) + self.client.wait(container) + + logs = self.client.logs(container).decode('utf-8') + groups = logs.strip().split(' ') + assert '1000' in groups + assert '1001' in groups + + def test_group_id_strings(self): + container = self.client.create_container( + TEST_IMG, 'id -G', host_config=self.client.create_host_config( + group_add=['1000', '1001'] + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + self.client.wait(container) + + logs = self.client.logs(container).decode('utf-8') + + groups = logs.strip().split(' ') + assert '1000' in groups + assert '1001' in groups + + def test_valid_log_driver_and_log_opt(self): + log_config = docker.types.LogConfig( + type='json-file', + config={'max-file': '100'} + ) + + container = self.client.create_container( + TEST_IMG, ['true'], + host_config=self.client.create_host_config(log_config=log_config) + ) + self.tmp_containers.append(container['Id']) + self.client.start(container) + + info = self.client.inspect_container(container) + container_log_config = info['HostConfig']['LogConfig'] + + assert container_log_config['Type'] == log_config.type + assert container_log_config['Config'] == log_config.config + + def test_invalid_log_driver_raises_exception(self): + log_config = docker.types.LogConfig( + type='asdf', + config={} + ) + + expected_msgs = [ + "logger: no log driver named 'asdf' is registered", + "error looking up logging plugin asdf: plugin \"asdf\" not found", + ] + with pytest.raises(docker.errors.APIError) as excinfo: + # raises an internal server error 500 + container = self.client.create_container( + TEST_IMG, ['true'], host_config=self.client.create_host_config( + log_config=log_config + ) + ) + self.client.start(container) + + assert excinfo.value.explanation in expected_msgs + + def test_valid_no_log_driver_specified(self): + log_config = docker.types.LogConfig( + type="", + config={'max-file': '100'} + ) + + container = self.client.create_container( + TEST_IMG, ['true'], + host_config=self.client.create_host_config(log_config=log_config) + ) + self.tmp_containers.append(container['Id']) + self.client.start(container) + + info = self.client.inspect_container(container) + container_log_config = info['HostConfig']['LogConfig'] + + assert container_log_config['Type'] == "json-file" + assert container_log_config['Config'] == log_config.config + + def test_valid_no_config_specified(self): + log_config = docker.types.LogConfig( + type="json-file", + config=None + ) + + container = self.client.create_container( + TEST_IMG, ['true'], + host_config=self.client.create_host_config(log_config=log_config) + ) + self.tmp_containers.append(container['Id']) + self.client.start(container) + + info = self.client.inspect_container(container) + container_log_config = info['HostConfig']['LogConfig'] + + assert container_log_config['Type'] == "json-file" + assert container_log_config['Config'] == {} + + def test_create_with_memory_constraints_with_str(self): + ctnr = self.client.create_container( + TEST_IMG, 'true', + host_config=self.client.create_host_config( + memswap_limit='1G', + mem_limit='700M' + ) + ) + assert 'Id' in ctnr + self.tmp_containers.append(ctnr['Id']) + self.client.start(ctnr) + inspect = self.client.inspect_container(ctnr) + + assert 'HostConfig' in inspect + host_config = inspect['HostConfig'] + for limit in ['Memory', 'MemorySwap']: + assert limit in host_config + + def test_create_with_memory_constraints_with_int(self): + ctnr = self.client.create_container( + TEST_IMG, 'true', + host_config=self.client.create_host_config(mem_swappiness=40) + ) + assert 'Id' in ctnr + self.tmp_containers.append(ctnr['Id']) + self.client.start(ctnr) + inspect = self.client.inspect_container(ctnr) + + assert 'HostConfig' in inspect + host_config = inspect['HostConfig'] + assert 'MemorySwappiness' in host_config + + def test_create_with_environment_variable_no_value(self): + container = self.client.create_container( + TEST_IMG, + ['echo'], + environment={'Foo': None, 'Other': 'one', 'Blank': ''}, + ) + self.tmp_containers.append(container['Id']) + config = self.client.inspect_container(container['Id']) + assert 'Foo' in config['Config']['Env'] + assert 'Other=one' in config['Config']['Env'] + assert 'Blank=' in config['Config']['Env'] + + @requires_api_version('1.22') + def test_create_with_tmpfs(self): + tmpfs = { + '/tmp1': 'size=3M' + } + + container = self.client.create_container( + TEST_IMG, + ['echo'], + host_config=self.client.create_host_config( + tmpfs=tmpfs)) + + self.tmp_containers.append(container['Id']) + config = self.client.inspect_container(container) + assert config['HostConfig']['Tmpfs'] == tmpfs + + @requires_api_version('1.24') + def test_create_with_isolation(self): + container = self.client.create_container( + TEST_IMG, ['echo'], host_config=self.client.create_host_config( + isolation='default' + ) + ) + self.tmp_containers.append(container['Id']) + config = self.client.inspect_container(container) + assert config['HostConfig']['Isolation'] == 'default' + + @requires_api_version('1.25') + def test_create_with_auto_remove(self): + host_config = self.client.create_host_config( + auto_remove=True + ) + container = self.client.create_container( + TEST_IMG, ['echo', 'test'], host_config=host_config + ) + self.tmp_containers.append(container['Id']) + config = self.client.inspect_container(container) + assert config['HostConfig']['AutoRemove'] is True + + @requires_api_version('1.25') + def test_create_with_stop_timeout(self): + container = self.client.create_container( + TEST_IMG, ['echo', 'test'], stop_timeout=25 + ) + self.tmp_containers.append(container['Id']) + config = self.client.inspect_container(container) + assert config['Config']['StopTimeout'] == 25 + + @requires_api_version('1.24') + @pytest.mark.xfail(True, reason='Not supported on most drivers') + def test_create_with_storage_opt(self): + host_config = self.client.create_host_config( + storage_opt={'size': '120G'} + ) + container = self.client.create_container( + TEST_IMG, ['echo', 'test'], host_config=host_config + ) + self.tmp_containers.append(container) + config = self.client.inspect_container(container) + assert config['HostConfig']['StorageOpt'] == { + 'size': '120G' + } + + @requires_api_version('1.25') + def test_create_with_init(self): + ctnr = self.client.create_container( + TEST_IMG, 'true', + host_config=self.client.create_host_config( + init=True + ) + ) + self.tmp_containers.append(ctnr['Id']) + config = self.client.inspect_container(ctnr) + assert config['HostConfig']['Init'] is True + + @requires_api_version('1.24') + @pytest.mark.xfail(not os.path.exists('/sys/fs/cgroup/cpu.rt_runtime_us'), + reason='CONFIG_RT_GROUP_SCHED isn\'t enabled') + def test_create_with_cpu_rt_options(self): + ctnr = self.client.create_container( + TEST_IMG, 'true', host_config=self.client.create_host_config( + cpu_rt_period=1000, cpu_rt_runtime=500 + ) + ) + self.tmp_containers.append(ctnr) + config = self.client.inspect_container(ctnr) + assert config['HostConfig']['CpuRealtimeRuntime'] == 500 + assert config['HostConfig']['CpuRealtimePeriod'] == 1000 + + @requires_api_version('1.28') + def test_create_with_device_cgroup_rules(self): + rule = 'c 7:128 rwm' + ctnr = self.client.create_container( + TEST_IMG, 'true', host_config=self.client.create_host_config( + device_cgroup_rules=[rule] + ) + ) + self.tmp_containers.append(ctnr) + config = self.client.inspect_container(ctnr) + assert config['HostConfig']['DeviceCgroupRules'] == [rule] + + def test_create_with_uts_mode(self): + container = self.client.create_container( + TEST_IMG, ['echo'], host_config=self.client.create_host_config( + uts_mode='host' + ) + ) + self.tmp_containers.append(container) + config = self.client.inspect_container(container) + assert config['HostConfig']['UTSMode'] == 'host' + + +@pytest.mark.xfail( + IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform' +) +class VolumeBindTest(BaseAPIIntegrationTest): + def setUp(self): + super().setUp() + + self.mount_dest = '/mnt' + + # Get a random pathname - we don't need it to exist locally + self.mount_origin = tempfile.mkdtemp() + self.filename = 'shared.txt' + + self.run_with_volume( + False, + TEST_IMG, + ['touch', os.path.join(self.mount_dest, self.filename)], + ) + + def test_create_with_binds_rw(self): + + container = self.run_with_volume( + False, + TEST_IMG, + ['ls', self.mount_dest], + ) + logs = self.client.logs(container).decode('utf-8') + assert self.filename in logs + inspect_data = self.client.inspect_container(container) + self.check_container_data(inspect_data, True) + + def test_create_with_binds_ro(self): + self.run_with_volume( + False, + TEST_IMG, + ['touch', os.path.join(self.mount_dest, self.filename)], + ) + container = self.run_with_volume( + True, + TEST_IMG, + ['ls', self.mount_dest], + ) + logs = self.client.logs(container).decode('utf-8') + + assert self.filename in logs + + inspect_data = self.client.inspect_container(container) + self.check_container_data(inspect_data, False) + + @skip_if_desktop() + def test_create_with_binds_rw_rshared(self): + container = self.run_with_volume_propagation( + False, + 'rshared', + TEST_IMG, + ['touch', os.path.join(self.mount_dest, self.filename)], + ) + inspect_data = self.client.inspect_container(container) + self.check_container_data(inspect_data, True, 'rshared') + container = self.run_with_volume_propagation( + True, + 'rshared', + TEST_IMG, + ['ls', self.mount_dest], + ) + logs = self.client.logs(container).decode('utf-8') + assert self.filename in logs + inspect_data = self.client.inspect_container(container) + self.check_container_data(inspect_data, False, 'rshared') + + @requires_api_version('1.30') + def test_create_with_mounts(self): + mount = docker.types.Mount( + type="bind", source=self.mount_origin, target=self.mount_dest + ) + host_config = self.client.create_host_config(mounts=[mount]) + container = self.run_container( + TEST_IMG, ['ls', self.mount_dest], + host_config=host_config + ) + assert container + logs = self.client.logs(container).decode('utf-8') + assert self.filename in logs + inspect_data = self.client.inspect_container(container) + self.check_container_data(inspect_data, True) + + @requires_api_version('1.30') + def test_create_with_mounts_ro(self): + mount = docker.types.Mount( + type="bind", source=self.mount_origin, target=self.mount_dest, + read_only=True + ) + host_config = self.client.create_host_config(mounts=[mount]) + container = self.run_container( + TEST_IMG, ['ls', self.mount_dest], + host_config=host_config + ) + assert container + logs = self.client.logs(container).decode('utf-8') + assert self.filename in logs + inspect_data = self.client.inspect_container(container) + self.check_container_data(inspect_data, False) + + @requires_api_version('1.30') + def test_create_with_volume_mount(self): + mount = docker.types.Mount( + type="volume", source=helpers.random_name(), + target=self.mount_dest, labels={'com.dockerpy.test': 'true'} + ) + host_config = self.client.create_host_config(mounts=[mount]) + container = self.client.create_container( + TEST_IMG, ['true'], host_config=host_config, + ) + assert container + inspect_data = self.client.inspect_container(container) + assert 'Mounts' in inspect_data + filtered = list(filter( + lambda x: x['Destination'] == self.mount_dest, + inspect_data['Mounts'] + )) + assert len(filtered) == 1 + mount_data = filtered[0] + assert mount['Source'] == mount_data['Name'] + assert mount_data['RW'] is True + + @requires_api_version('1.45') + def test_create_with_subpath_volume_mount(self): + source_volume = helpers.random_name() + self.client.create_volume(name=source_volume) + + setup_container = None + test_container = None + + + # Create a file structure in the volume to test with + setup_container = self.client.create_container( + TEST_IMG, + [ + "sh", + "-c", + 'mkdir -p /vol/subdir && echo "test content" > /vol/subdir/testfile.txt', + ], + host_config=self.client.create_host_config( + binds=[f"{source_volume}:/vol"] + ), + ) + self.client.start(setup_container) + self.client.wait(setup_container) + + # Now test with subpath + mount = docker.types.Mount( + type="volume", + source=source_volume, + target=self.mount_dest, + read_only=True, + subpath="subdir", + ) + + + host_config = self.client.create_host_config(mounts=[mount]) + test_container = self.client.create_container( + TEST_IMG, + ["cat", os.path.join(self.mount_dest, "testfile.txt")], + host_config=host_config, + ) + + self.client.start(test_container) + self.client.wait(test_container) # Wait for container to finish + output = self.client.logs(test_container).decode("utf-8").strip() + + # If the subpath feature is working, we should be able to see the content + # of the file in the subdir + assert output == "test content" + + + def check_container_data(self, inspect_data, rw, propagation='rprivate'): + assert 'Mounts' in inspect_data + filtered = list(filter( + lambda x: x['Destination'] == self.mount_dest, + inspect_data['Mounts'] + )) + assert len(filtered) == 1 + mount_data = filtered[0] + assert mount_data['Source'] == self.mount_origin + assert mount_data['RW'] == rw + assert mount_data['Propagation'] == propagation + + def run_with_volume(self, ro, *args, **kwargs): + return self.run_container( + *args, + volumes={self.mount_dest: {}}, + host_config=self.client.create_host_config( + binds={ + self.mount_origin: { + 'bind': self.mount_dest, + 'ro': ro, + }, + }, + network_mode='none' + ), + **kwargs + ) + + def run_with_volume_propagation(self, ro, propagation, *args, **kwargs): + return self.run_container( + *args, + volumes={self.mount_dest: {}}, + host_config=self.client.create_host_config( + binds={ + self.mount_origin: { + 'bind': self.mount_dest, + 'ro': ro, + 'propagation': propagation + }, + }, + network_mode='none' + ), + **kwargs + ) + + +class ArchiveTest(BaseAPIIntegrationTest): + def test_get_file_archive_from_container(self): + data = 'The Maid and the Pocket Watch of Blood' + ctnr = self.client.create_container( + TEST_IMG, f'sh -c "echo {data} > /vol1/data.txt"', + volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + self.client.wait(ctnr) + with tempfile.NamedTemporaryFile() as destination: + strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt') + for d in strm: + destination.write(d) + destination.seek(0) + retrieved_data = helpers.untar_file(destination, 'data.txt')\ + .decode('utf-8') + assert data == retrieved_data.strip() + + def test_get_file_stat_from_container(self): + data = 'The Maid and the Pocket Watch of Blood' + ctnr = self.client.create_container( + TEST_IMG, f'sh -c "echo -n {data} > /vol1/data.txt"', + volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + self.client.wait(ctnr) + strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt') + assert 'name' in stat + assert stat['name'] == 'data.txt' + assert 'size' in stat + assert stat['size'] == len(data) + + def test_copy_file_to_container(self): + data = b'Deaf To All But The Song' + with tempfile.NamedTemporaryFile(delete=False) as test_file: + test_file.write(data) + test_file.seek(0) + ctnr = self.client.create_container( + TEST_IMG, + f"cat {os.path.join('/vol1/', os.path.basename(test_file.name))}", + volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + with helpers.simple_tar(test_file.name) as test_tar: + self.client.put_archive(ctnr, '/vol1', test_tar) + self.client.start(ctnr) + self.client.wait(ctnr) + logs = self.client.logs(ctnr) + assert logs.strip() == data + + def test_copy_directory_to_container(self): + files = ['a.py', 'b.py', 'foo/b.py'] + dirs = ['foo', 'bar'] + base = helpers.make_tree(dirs, files) + ctnr = self.client.create_container( + TEST_IMG, 'ls -p /vol1', volumes=['/vol1'] + ) + self.tmp_containers.append(ctnr) + with docker.utils.tar(base) as test_tar: + self.client.put_archive(ctnr, '/vol1', test_tar) + self.client.start(ctnr) + self.client.wait(ctnr) + logs = self.client.logs(ctnr).decode('utf-8') + results = logs.strip().split() + assert 'a.py' in results + assert 'b.py' in results + assert 'foo/' in results + assert 'bar/' in results + + +class RenameContainerTest(BaseAPIIntegrationTest): + def test_rename_container(self): + version = self.client.version()['Version'] + name = 'hong_meiling' + res = self.client.create_container(TEST_IMG, 'true') + assert 'Id' in res + self.tmp_containers.append(res['Id']) + self.client.rename(res, name) + inspect = self.client.inspect_container(res['Id']) + assert 'Name' in inspect + if version == '1.5.0': + assert name == inspect['Name'] + else: + assert f'/{name}' == inspect['Name'] + + +class StartContainerTest(BaseAPIIntegrationTest): + def test_start_container(self): + res = self.client.create_container(TEST_IMG, 'true') + assert 'Id' in res + self.tmp_containers.append(res['Id']) + self.client.start(res['Id']) + inspect = self.client.inspect_container(res['Id']) + assert 'Config' in inspect + assert 'Id' in inspect + assert inspect['Id'].startswith(res['Id']) + assert 'Image' in inspect + assert 'State' in inspect + assert 'Running' in inspect['State'] + if not inspect['State']['Running']: + assert 'ExitCode' in inspect['State'] + assert inspect['State']['ExitCode'] == 0 + + def test_start_container_with_dict_instead_of_id(self): + res = self.client.create_container(TEST_IMG, 'true') + assert 'Id' in res + self.tmp_containers.append(res['Id']) + self.client.start(res) + inspect = self.client.inspect_container(res['Id']) + assert 'Config' in inspect + assert 'Id' in inspect + assert inspect['Id'].startswith(res['Id']) + assert 'Image' in inspect + assert 'State' in inspect + assert 'Running' in inspect['State'] + if not inspect['State']['Running']: + assert 'ExitCode' in inspect['State'] + assert inspect['State']['ExitCode'] == 0 + + def test_run_shlex_commands(self): + commands = [ + 'true', + 'echo "The Young Descendant of Tepes & Septette for the ' + 'Dead Princess"', + 'echo -n "The Young Descendant of Tepes & Septette for the ' + 'Dead Princess"', + '/bin/sh -c "echo Hello World"', + '/bin/sh -c \'echo "Hello World"\'', + 'echo "\"Night of Nights\""', + 'true && echo "Night of Nights"' + ] + for cmd in commands: + container = self.client.create_container(TEST_IMG, cmd) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0, cmd + + +class WaitTest(BaseAPIIntegrationTest): + def test_wait(self): + res = self.client.create_container(TEST_IMG, ['sleep', '3']) + id = res['Id'] + self.tmp_containers.append(id) + self.client.start(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + inspect = self.client.inspect_container(id) + assert 'Running' in inspect['State'] + assert inspect['State']['Running'] is False + assert 'ExitCode' in inspect['State'] + assert inspect['State']['ExitCode'] == exitcode + + def test_wait_with_dict_instead_of_id(self): + res = self.client.create_container(TEST_IMG, ['sleep', '3']) + id = res['Id'] + self.tmp_containers.append(id) + self.client.start(res) + exitcode = self.client.wait(res)['StatusCode'] + assert exitcode == 0 + inspect = self.client.inspect_container(res) + assert 'Running' in inspect['State'] + assert inspect['State']['Running'] is False + assert 'ExitCode' in inspect['State'] + assert inspect['State']['ExitCode'] == exitcode + + @requires_api_version('1.30') + def test_wait_with_condition(self): + ctnr = self.client.create_container(TEST_IMG, 'true') + self.tmp_containers.append(ctnr) + with pytest.raises(requests.exceptions.ConnectionError): + self.client.wait(ctnr, condition='removed', timeout=1) + + ctnr = self.client.create_container( + TEST_IMG, ['sleep', '3'], + host_config=self.client.create_host_config(auto_remove=True) + ) + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + assert self.client.wait( + ctnr, condition='removed', timeout=5 + )['StatusCode'] == 0 + + +class LogsTest(BaseAPIIntegrationTest): + def test_logs(self): + snippet = 'Flowering Nights (Sakuya Iyazoi)' + container = self.client.create_container( + TEST_IMG, f'echo {snippet}' + ) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + logs = self.client.logs(id) + assert logs == f"{snippet}\n".encode(encoding='ascii') + + def test_logs_tail_option(self): + snippet = '''Line1 +Line2''' + container = self.client.create_container( + TEST_IMG, f'echo "{snippet}"' + ) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + logs = self.client.logs(id, tail=1) + assert logs == 'Line2\n'.encode(encoding='ascii') + + def test_logs_streaming_and_follow(self): + snippet = 'Flowering Nights (Sakuya Iyazoi)' + container = self.client.create_container( + TEST_IMG, f'echo {snippet}' + ) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + logs = b'' + for chunk in self.client.logs(id, stream=True, follow=True): + logs += chunk + + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + + assert logs == f"{snippet}\n".encode(encoding='ascii') + + @pytest.mark.timeout(5) + @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'), + reason='No cancellable streams over SSH') + def test_logs_streaming_and_follow_and_cancel(self): + snippet = 'Flowering Nights (Sakuya Iyazoi)' + container = self.client.create_container( + TEST_IMG, f'sh -c "echo \\"{snippet}\\" && sleep 3"' + ) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + logs = b'' + + generator = self.client.logs(id, stream=True, follow=True) + threading.Timer(1, generator.close).start() + + for chunk in generator: + logs += chunk + + assert logs == f"{snippet}\n".encode(encoding='ascii') + + def test_logs_with_dict_instead_of_id(self): + snippet = 'Flowering Nights (Sakuya Iyazoi)' + container = self.client.create_container( + TEST_IMG, f'echo {snippet}' + ) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + logs = self.client.logs(container) + assert logs == f"{snippet}\n".encode(encoding='ascii') + + def test_logs_with_tail_0(self): + snippet = 'Flowering Nights (Sakuya Iyazoi)' + container = self.client.create_container( + TEST_IMG, f'echo "{snippet}"' + ) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + logs = self.client.logs(id, tail=0) + assert logs == ''.encode(encoding='ascii') + + @requires_api_version('1.35') + def test_logs_with_until(self): + snippet = 'Shanghai Teahouse (Hong Meiling)' + container = self.client.create_container( + TEST_IMG, f'echo "{snippet}"' + ) + + self.tmp_containers.append(container) + self.client.start(container) + exitcode = self.client.wait(container)['StatusCode'] + assert exitcode == 0 + logs_until_1 = self.client.logs(container, until=1) + assert logs_until_1 == b'' + logs_until_now = self.client.logs(container, datetime.now()) + assert logs_until_now == f"{snippet}\n".encode(encoding='ascii') + + +class DiffTest(BaseAPIIntegrationTest): + def test_diff(self): + container = self.client.create_container(TEST_IMG, ['touch', '/test']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + diff = self.client.diff(id) + test_diff = [x for x in diff if x.get('Path', None) == '/test'] + assert len(test_diff) == 1 + assert 'Kind' in test_diff[0] + assert test_diff[0]['Kind'] == 1 + + def test_diff_with_dict_instead_of_id(self): + container = self.client.create_container(TEST_IMG, ['touch', '/test']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode == 0 + diff = self.client.diff(container) + test_diff = [x for x in diff if x.get('Path', None) == '/test'] + assert len(test_diff) == 1 + assert 'Kind' in test_diff[0] + assert test_diff[0]['Kind'] == 1 + + +class StopTest(BaseAPIIntegrationTest): + def test_stop(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + self.client.stop(id, timeout=2) + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'Running' in state + assert state['Running'] is False + + def test_stop_with_dict_instead_of_id(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + assert 'Id' in container + id = container['Id'] + self.client.start(container) + self.tmp_containers.append(id) + self.client.stop(container, timeout=2) + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'Running' in state + assert state['Running'] is False + + +class KillTest(BaseAPIIntegrationTest): + def test_kill(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + self.client.kill(id) + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] != 0 + assert 'Running' in state + assert state['Running'] is False + + def test_kill_with_dict_instead_of_id(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + self.client.kill(container) + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] != 0 + assert 'Running' in state + assert state['Running'] is False + + def test_kill_with_signal(self): + id = self.client.create_container(TEST_IMG, ['sleep', '60']) + self.tmp_containers.append(id) + self.client.start(id) + self.client.kill( + id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9 + ) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode != 0 + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] != 0 + assert 'Running' in state + assert state['Running'] is False, state + + def test_kill_with_signal_name(self): + id = self.client.create_container(TEST_IMG, ['sleep', '60']) + self.client.start(id) + self.tmp_containers.append(id) + self.client.kill(id, signal='SIGKILL') + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode != 0 + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] != 0 + assert 'Running' in state + assert state['Running'] is False, state + + def test_kill_with_signal_integer(self): + id = self.client.create_container(TEST_IMG, ['sleep', '60']) + self.client.start(id) + self.tmp_containers.append(id) + self.client.kill(id, signal=9) + exitcode = self.client.wait(id)['StatusCode'] + assert exitcode != 0 + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] != 0 + assert 'Running' in state + assert state['Running'] is False, state + + +class PortTest(BaseAPIIntegrationTest): + def test_port(self): + port_bindings = { + '1111': ('127.0.0.1', '4567'), + '2222': ('127.0.0.1', '4568'), + '3333/udp': ('127.0.0.1', '4569'), + } + ports = [ + 1111, + 2222, + (3333, 'udp'), + ] + + container = self.client.create_container( + TEST_IMG, ['sleep', '60'], ports=ports, + host_config=self.client.create_host_config( + port_bindings=port_bindings, network_mode='bridge' + ) + ) + id = container['Id'] + + self.client.start(container) + + # Call the port function on each biding and compare expected vs actual + for port in port_bindings: + port, _, protocol = port.partition('/') + actual_bindings = self.client.port(container, port) + port_binding = actual_bindings.pop() + + ip, host_port = port_binding['HostIp'], port_binding['HostPort'] + + port_binding = port if not protocol else f"{port}/{protocol}" + assert ip == port_bindings[port_binding][0] + assert host_port == port_bindings[port_binding][1] + + self.client.kill(id) + + +class ContainerTopTest(BaseAPIIntegrationTest): + @pytest.mark.xfail(reason='Output of docker top depends on host distro, ' + 'and is not formalized.') + def test_top(self): + container = self.client.create_container( + TEST_IMG, ['sleep', '60'] + ) + + self.tmp_containers.append(container) + + self.client.start(container) + res = self.client.top(container) + if not IS_WINDOWS_PLATFORM: + assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND'] + assert len(res['Processes']) == 1 + assert res['Processes'][0][-1] == 'sleep 60' + self.client.kill(container) + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='No psargs support on windows' + ) + @pytest.mark.xfail(reason='Output of docker top depends on host distro, ' + 'and is not formalized.') + def test_top_with_psargs(self): + container = self.client.create_container( + TEST_IMG, ['sleep', '60']) + + self.tmp_containers.append(container) + + self.client.start(container) + res = self.client.top(container, '-eopid,user') + assert res['Titles'] == ['PID', 'USER'] + assert len(res['Processes']) == 1 + assert res['Processes'][0][10] == 'sleep 60' + + +class RestartContainerTest(BaseAPIIntegrationTest): + def test_restart(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + info = self.client.inspect_container(id) + assert 'State' in info + assert 'StartedAt' in info['State'] + start_time1 = info['State']['StartedAt'] + self.client.restart(id, timeout=2) + info2 = self.client.inspect_container(id) + assert 'State' in info2 + assert 'StartedAt' in info2['State'] + start_time2 = info2['State']['StartedAt'] + assert start_time1 != start_time2 + assert 'Running' in info2['State'] + assert info2['State']['Running'] is True + self.client.kill(id) + + def test_restart_with_low_timeout(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + self.client.start(container) + self.client.timeout = 3 + self.client.restart(container, timeout=1) + self.client.timeout = None + self.client.restart(container, timeout=1) + self.client.kill(container) + + def test_restart_with_dict_instead_of_id(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + assert 'Id' in container + id = container['Id'] + self.client.start(container) + self.tmp_containers.append(id) + info = self.client.inspect_container(id) + assert 'State' in info + assert 'StartedAt' in info['State'] + start_time1 = info['State']['StartedAt'] + self.client.restart(container, timeout=2) + info2 = self.client.inspect_container(id) + assert 'State' in info2 + assert 'StartedAt' in info2['State'] + start_time2 = info2['State']['StartedAt'] + assert start_time1 != start_time2 + assert 'Running' in info2['State'] + assert info2['State']['Running'] is True + self.client.kill(id) + + +class RemoveContainerTest(BaseAPIIntegrationTest): + def test_remove(self): + container = self.client.create_container(TEST_IMG, ['true']) + id = container['Id'] + self.client.start(id) + self.client.wait(id) + self.client.remove_container(id) + containers = self.client.containers(all=True) + res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)] + assert len(res) == 0 + + def test_remove_with_dict_instead_of_id(self): + container = self.client.create_container(TEST_IMG, ['true']) + id = container['Id'] + self.client.start(id) + self.client.wait(id) + self.client.remove_container(container) + containers = self.client.containers(all=True) + res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)] + assert len(res) == 0 + + +class AttachContainerTest(BaseAPIIntegrationTest): + def test_run_container_streaming(self): + container = self.client.create_container(TEST_IMG, '/bin/sh', + detach=True, stdin_open=True) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + sock = self.client.attach_socket(container, ws=False) + assert sock.fileno() > -1 + + def test_run_container_reading_socket_http(self): + line = 'hi there and stuff and things, words!' + # `echo` appends CRLF, `printf` doesn't + command = f"printf '{line}'" + container = self.client.create_container(TEST_IMG, command, + detach=True, tty=False) + self.tmp_containers.append(container) + + opts = {"stdout": 1, "stream": 1, "logs": 1} + pty_stdout = self.client.attach_socket(container, opts) + self.addCleanup(pty_stdout.close) + + self.client.start(container) + + (stream, next_size) = next_frame_header(pty_stdout) + assert stream == 1 # correspond to stdout + assert next_size == len(line) + data = read_exactly(pty_stdout, next_size) + assert data.decode('utf-8') == line + + @pytest.mark.xfail(condition=bool(os.environ.get('DOCKER_CERT_PATH', '')), + reason='DOCKER_CERT_PATH not respected for websockets') + def test_run_container_reading_socket_ws(self): + line = 'hi there and stuff and things, words!' + # `echo` appends CRLF, `printf` doesn't + command = f"printf '{line}'" + container = self.client.create_container(TEST_IMG, command, + detach=True, tty=False) + self.tmp_containers.append(container) + + opts = {"stdout": 1, "stream": 1, "logs": 1} + pty_stdout = self.client.attach_socket(container, opts, ws=True) + self.addCleanup(pty_stdout.close) + + self.client.start(container) + + data = pty_stdout.recv() + assert data.decode('utf-8') == line + + @pytest.mark.timeout(10) + def test_attach_no_stream(self): + container = self.client.create_container( + TEST_IMG, 'echo hello' + ) + self.tmp_containers.append(container) + self.client.start(container) + self.client.wait(container, condition='not-running') + output = self.client.attach(container, stream=False, logs=True) + assert output == 'hello\n'.encode(encoding='ascii') + + @pytest.mark.timeout(10) + @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'), + reason='No cancellable streams over SSH') + @pytest.mark.xfail(condition=os.environ.get('DOCKER_TLS_VERIFY') or + os.environ.get('DOCKER_CERT_PATH'), + reason='Flaky test on TLS') + def test_attach_stream_and_cancel(self): + container = self.client.create_container( + TEST_IMG, 'sh -c "sleep 2 && echo hello && sleep 60"', + tty=True + ) + self.tmp_containers.append(container) + self.client.start(container) + output = self.client.attach(container, stream=True, logs=True) + + threading.Timer(3, output.close).start() + + lines = [] + for line in output: + lines.append(line) + + assert len(lines) == 1 + assert lines[0] == 'hello\r\n'.encode(encoding='ascii') + + def test_detach_with_default(self): + container = self.client.create_container( + TEST_IMG, 'cat', + detach=True, stdin_open=True, tty=True + ) + self.tmp_containers.append(container) + self.client.start(container) + + sock = self.client.attach_socket( + container, + {'stdin': True, 'stream': True} + ) + + assert_cat_socket_detached_with_keys( + sock, [ctrl_with('p'), ctrl_with('q')] + ) + + def test_detach_with_config_file(self): + self.client._general_configs['detachKeys'] = 'ctrl-p' + + container = self.client.create_container( + TEST_IMG, 'cat', + detach=True, stdin_open=True, tty=True + ) + self.tmp_containers.append(container) + self.client.start(container) + + sock = self.client.attach_socket( + container, + {'stdin': True, 'stream': True} + ) + + assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')]) + + def test_detach_with_arg(self): + self.client._general_configs['detachKeys'] = 'ctrl-p' + + container = self.client.create_container( + TEST_IMG, 'cat', + detach=True, stdin_open=True, tty=True + ) + self.tmp_containers.append(container) + self.client.start(container) + + sock = self.client.attach_socket( + container, + {'stdin': True, 'stream': True, 'detachKeys': 'ctrl-x'} + ) + + assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')]) + + +class PauseTest(BaseAPIIntegrationTest): + def test_pause_unpause(self): + container = self.client.create_container(TEST_IMG, ['sleep', '9999']) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(container) + self.client.pause(id) + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] == 0 + assert 'Running' in state + assert state['Running'] is True + assert 'Paused' in state + assert state['Paused'] is True + + self.client.unpause(id) + container_info = self.client.inspect_container(id) + assert 'State' in container_info + state = container_info['State'] + assert 'ExitCode' in state + assert state['ExitCode'] == 0 + assert 'Running' in state + assert state['Running'] is True + assert 'Paused' in state + assert state['Paused'] is False + + +class PruneTest(BaseAPIIntegrationTest): + @requires_api_version('1.25') + def test_prune_containers(self): + container1 = self.client.create_container( + TEST_IMG, ['sh', '-c', 'echo hello > /data.txt'] + ) + container2 = self.client.create_container(TEST_IMG, ['sleep', '9999']) + self.client.start(container1) + self.client.start(container2) + self.client.wait(container1) + result = self.client.prune_containers() + assert container1['Id'] in result['ContainersDeleted'] + assert result['SpaceReclaimed'] > 0 + assert container2['Id'] not in result['ContainersDeleted'] + + +class GetContainerStatsTest(BaseAPIIntegrationTest): + def test_get_container_stats_no_stream(self): + container = self.client.create_container( + TEST_IMG, ['sleep', '60'], + ) + self.tmp_containers.append(container) + self.client.start(container) + response = self.client.stats(container, stream=0) + self.client.kill(container) + + assert isinstance(response, dict) + for key in ['read', 'networks', 'precpu_stats', 'cpu_stats', + 'memory_stats', 'blkio_stats']: + assert key in response + + def test_get_container_stats_stream(self): + container = self.client.create_container( + TEST_IMG, ['sleep', '60'], + ) + self.tmp_containers.append(container) + self.client.start(container) + stream = self.client.stats(container) + for chunk in stream: + assert isinstance(chunk, dict) + for key in ['read', 'network', 'precpu_stats', 'cpu_stats', + 'memory_stats', 'blkio_stats']: + assert key in chunk + + +class ContainerUpdateTest(BaseAPIIntegrationTest): + @requires_api_version('1.22') + def test_update_container(self): + old_mem_limit = 400 * 1024 * 1024 + new_mem_limit = 300 * 1024 * 1024 + container = self.client.create_container( + TEST_IMG, 'top', host_config=self.client.create_host_config( + mem_limit=old_mem_limit + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + self.client.update_container(container, mem_limit=new_mem_limit) + inspect_data = self.client.inspect_container(container) + assert inspect_data['HostConfig']['Memory'] == new_mem_limit + + @requires_api_version('1.23') + def test_restart_policy_update(self): + old_restart_policy = { + 'MaximumRetryCount': 0, + 'Name': 'always' + } + new_restart_policy = { + 'MaximumRetryCount': 42, + 'Name': 'on-failure' + } + container = self.client.create_container( + TEST_IMG, ['sleep', '60'], + host_config=self.client.create_host_config( + restart_policy=old_restart_policy + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + self.client.update_container(container, + restart_policy=new_restart_policy) + inspect_data = self.client.inspect_container(container) + assert ( + inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'] == + new_restart_policy['MaximumRetryCount'] + ) + assert ( + inspect_data['HostConfig']['RestartPolicy']['Name'] == + new_restart_policy['Name'] + ) + + +class ContainerCPUTest(BaseAPIIntegrationTest): + def test_container_cpu_shares(self): + cpu_shares = 512 + container = self.client.create_container( + TEST_IMG, 'ls', host_config=self.client.create_host_config( + cpu_shares=cpu_shares + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + inspect_data = self.client.inspect_container(container) + assert inspect_data['HostConfig']['CpuShares'] == 512 + + def test_container_cpuset(self): + cpuset_cpus = "0,1" + container = self.client.create_container( + TEST_IMG, 'ls', host_config=self.client.create_host_config( + cpuset_cpus=cpuset_cpus + ) + ) + self.tmp_containers.append(container) + self.client.start(container) + inspect_data = self.client.inspect_container(container) + assert inspect_data['HostConfig']['CpusetCpus'] == cpuset_cpus + + @requires_api_version('1.25') + def test_create_with_runtime(self): + container = self.client.create_container( + TEST_IMG, ['echo', 'test'], runtime='runc' + ) + self.tmp_containers.append(container['Id']) + config = self.client.inspect_container(container) + assert config['HostConfig']['Runtime'] == 'runc' + + +class LinkTest(BaseAPIIntegrationTest): + def test_remove_link(self): + # Create containers + container1 = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True + ) + container1_id = container1['Id'] + self.tmp_containers.append(container1_id) + self.client.start(container1_id) + + # Create Link + # we don't want the first / + link_path = self.client.inspect_container(container1_id)['Name'][1:] + link_alias = 'mylink' + + container2 = self.client.create_container( + TEST_IMG, 'cat', host_config=self.client.create_host_config( + links={link_path: link_alias} + ) + ) + container2_id = container2['Id'] + self.tmp_containers.append(container2_id) + self.client.start(container2_id) + + # Remove link + linked_name = self.client.inspect_container(container2_id)['Name'][1:] + link_name = f'{linked_name}/{link_alias}' + self.client.remove_container(link_name, link=True) + + # Link is gone + containers = self.client.containers(all=True) + retrieved = [x for x in containers if link_name in x['Names']] + assert len(retrieved) == 0 + + # Containers are still there + retrieved = [ + x for x in containers if x['Id'].startswith(container1_id) or + x['Id'].startswith(container2_id) + ] + assert len(retrieved) == 2 diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py new file mode 100644 index 0000000000..5b829e2875 --- /dev/null +++ b/tests/integration/api_exec_test.py @@ -0,0 +1,313 @@ +from docker.utils.proxy import ProxyConfig +from docker.utils.socket import next_frame_header, read_exactly + +from ..helpers import ( + assert_cat_socket_detached_with_keys, + ctrl_with, + requires_api_version, +) +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class ExecTest(BaseAPIIntegrationTest): + def test_execute_command_with_proxy_env(self): + # Set a custom proxy config on the client + self.client._proxy_configs = ProxyConfig( + ftp='a', https='b', http='c', no_proxy='d' + ) + + container = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True, + ) + self.client.start(container) + self.tmp_containers.append(container) + + cmd = 'sh -c "env | grep -i proxy"' + + # First, just make sure the environment variables from the custom + # config are set + + res = self.client.exec_create(container, cmd=cmd) + output = self.client.exec_start(res).decode('utf-8').split('\n') + expected = [ + 'ftp_proxy=a', 'https_proxy=b', 'http_proxy=c', 'no_proxy=d', + 'FTP_PROXY=a', 'HTTPS_PROXY=b', 'HTTP_PROXY=c', 'NO_PROXY=d' + ] + for item in expected: + assert item in output + + # Overwrite some variables with a custom environment + env = {'https_proxy': 'xxx', 'HTTPS_PROXY': 'XXX'} + + res = self.client.exec_create(container, cmd=cmd, environment=env) + output = self.client.exec_start(res).decode('utf-8').split('\n') + expected = [ + 'ftp_proxy=a', 'https_proxy=xxx', 'http_proxy=c', 'no_proxy=d', + 'FTP_PROXY=a', 'HTTPS_PROXY=XXX', 'HTTP_PROXY=c', 'NO_PROXY=d' + ] + for item in expected: + assert item in output + + def test_execute_command(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + res = self.client.exec_create(id, ['echo', 'hello']) + assert 'Id' in res + + exec_log = self.client.exec_start(res) + assert exec_log == b'hello\n' + + def test_exec_command_string(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + res = self.client.exec_create(id, 'echo hello world') + assert 'Id' in res + + exec_log = self.client.exec_start(res) + assert exec_log == b'hello world\n' + + def test_exec_command_as_user(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + res = self.client.exec_create(id, 'whoami', user='postgres') + assert 'Id' in res + + exec_log = self.client.exec_start(res) + assert exec_log == b'postgres\n' + + def test_exec_command_as_root(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + res = self.client.exec_create(id, 'whoami') + assert 'Id' in res + + exec_log = self.client.exec_start(res) + assert exec_log == b'root\n' + + def test_exec_command_streaming(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.tmp_containers.append(id) + self.client.start(id) + + exec_id = self.client.exec_create(id, ['echo', 'hello\nworld']) + assert 'Id' in exec_id + + res = b'' + for chunk in self.client.exec_start(exec_id, stream=True): + res += chunk + assert res == b'hello\nworld\n' + + def test_exec_start_socket(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + container_id = container['Id'] + self.client.start(container_id) + self.tmp_containers.append(container_id) + + line = 'yay, interactive exec!' + # `echo` appends CRLF, `printf` doesn't + exec_id = self.client.exec_create( + container_id, ['printf', line], tty=True) + assert 'Id' in exec_id + + socket = self.client.exec_start(exec_id, socket=True) + self.addCleanup(socket.close) + + (stream, next_size) = next_frame_header(socket) + assert stream == 1 # stdout (0 = stdin, 1 = stdout, 2 = stderr) + assert next_size == len(line) + data = read_exactly(socket, next_size) + assert data.decode('utf-8') == line + + def test_exec_start_detached(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + container_id = container['Id'] + self.client.start(container_id) + self.tmp_containers.append(container_id) + + exec_id = self.client.exec_create( + container_id, ['printf', "asdqwe"]) + assert 'Id' in exec_id + + response = self.client.exec_start(exec_id, detach=True) + + assert response == "" + + def test_exec_inspect(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist']) + assert 'Id' in exec_id + self.client.exec_start(exec_id) + exec_info = self.client.exec_inspect(exec_id) + assert 'ExitCode' in exec_info + assert exec_info['ExitCode'] != 0 + + @requires_api_version('1.25') + def test_exec_command_with_env(self): + container = self.client.create_container(TEST_IMG, 'cat', + detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + res = self.client.exec_create(id, 'env', environment=["X=Y"]) + assert 'Id' in res + + exec_log = self.client.exec_start(res) + assert b'X=Y\n' in exec_log + + @requires_api_version('1.35') + def test_exec_command_with_workdir(self): + container = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True + ) + self.tmp_containers.append(container) + self.client.start(container) + + res = self.client.exec_create(container, 'pwd', workdir='/var/opt') + exec_log = self.client.exec_start(res) + assert exec_log == b'/var/opt\n' + + def test_detach_with_default(self): + container = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True + ) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + exec_id = self.client.exec_create( + id, 'cat', stdin=True, tty=True, stdout=True + ) + sock = self.client.exec_start(exec_id, tty=True, socket=True) + self.addCleanup(sock.close) + + assert_cat_socket_detached_with_keys( + sock, [ctrl_with('p'), ctrl_with('q')] + ) + + def test_detach_with_config_file(self): + self.client._general_configs['detachKeys'] = 'ctrl-p' + container = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True + ) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + exec_id = self.client.exec_create( + id, 'cat', stdin=True, tty=True, stdout=True + ) + sock = self.client.exec_start(exec_id, tty=True, socket=True) + self.addCleanup(sock.close) + + assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')]) + + +class ExecDemuxTest(BaseAPIIntegrationTest): + cmd = 'sh -c "{}"'.format(' ; '.join([ + # Write something on stdout + 'echo hello out', + # Busybox's sleep does not handle sub-second times. + # This loops takes ~0.3 second to execute on my machine. + 'sleep 0.5', + # Write something on stderr + 'echo hello err >&2']) + ) + + def setUp(self): + super().setUp() + self.container = self.client.create_container( + TEST_IMG, 'cat', detach=True, stdin_open=True + ) + self.client.start(self.container) + self.tmp_containers.append(self.container) + + def test_exec_command_no_stream_no_demux(self): + # tty=False, stream=False, demux=False + res = self.client.exec_create(self.container, self.cmd) + exec_log = self.client.exec_start(res) + assert b'hello out\n' in exec_log + assert b'hello err\n' in exec_log + + def test_exec_command_stream_no_demux(self): + # tty=False, stream=True, demux=False + res = self.client.exec_create(self.container, self.cmd) + exec_log = list(self.client.exec_start(res, stream=True)) + assert len(exec_log) == 2 + assert b'hello out\n' in exec_log + assert b'hello err\n' in exec_log + + def test_exec_command_no_stream_demux(self): + # tty=False, stream=False, demux=True + res = self.client.exec_create(self.container, self.cmd) + exec_log = self.client.exec_start(res, demux=True) + assert exec_log == (b'hello out\n', b'hello err\n') + + def test_exec_command_stream_demux(self): + # tty=False, stream=True, demux=True + res = self.client.exec_create(self.container, self.cmd) + exec_log = list(self.client.exec_start(res, demux=True, stream=True)) + assert len(exec_log) == 2 + assert (b'hello out\n', None) in exec_log + assert (None, b'hello err\n') in exec_log + + def test_exec_command_tty_no_stream_no_demux(self): + # tty=True, stream=False, demux=False + res = self.client.exec_create(self.container, self.cmd, tty=True) + exec_log = self.client.exec_start(res) + assert exec_log == b'hello out\r\nhello err\r\n' + + def test_exec_command_tty_stream_no_demux(self): + # tty=True, stream=True, demux=False + res = self.client.exec_create(self.container, self.cmd, tty=True) + exec_log = list(self.client.exec_start(res, stream=True)) + assert b'hello out\r\n' in exec_log + if len(exec_log) == 2: + assert b'hello err\r\n' in exec_log + else: + assert len(exec_log) == 3 + assert b'hello err' in exec_log + assert b'\r\n' in exec_log + + def test_exec_command_tty_no_stream_demux(self): + # tty=True, stream=False, demux=True + res = self.client.exec_create(self.container, self.cmd, tty=True) + exec_log = self.client.exec_start(res, demux=True) + assert exec_log == (b'hello out\r\nhello err\r\n', None) + + def test_exec_command_tty_stream_demux(self): + # tty=True, stream=True, demux=True + res = self.client.exec_create(self.container, self.cmd, tty=True) + exec_log = list(self.client.exec_start(res, demux=True, stream=True)) + assert (b'hello out\r\n', None) in exec_log + if len(exec_log) == 2: + assert (b'hello err\r\n', None) in exec_log + else: + assert len(exec_log) == 3 + assert (b'hello err', None) in exec_log + assert (b'\r\n', None) in exec_log diff --git a/tests/integration/api_healthcheck_test.py b/tests/integration/api_healthcheck_test.py new file mode 100644 index 0000000000..f00d804b44 --- /dev/null +++ b/tests/integration/api_healthcheck_test.py @@ -0,0 +1,68 @@ +from .. import helpers +from .base import TEST_IMG, BaseAPIIntegrationTest + +SECOND = 1000000000 + + +def wait_on_health_status(client, container, status): + def condition(): + res = client.inspect_container(container) + return res['State']['Health']['Status'] == status + return helpers.wait_on_condition(condition) + + +class HealthcheckTest(BaseAPIIntegrationTest): + + @helpers.requires_api_version('1.24') + def test_healthcheck_shell_command(self): + container = self.client.create_container( + TEST_IMG, 'top', healthcheck={'test': 'echo "hello world"'}) + self.tmp_containers.append(container) + + res = self.client.inspect_container(container) + assert res['Config']['Healthcheck']['Test'] == [ + 'CMD-SHELL', 'echo "hello world"' + ] + + @helpers.requires_api_version('1.24') + def test_healthcheck_passes(self): + container = self.client.create_container( + TEST_IMG, 'top', healthcheck={ + 'test': "true", + 'interval': 1 * SECOND, + 'timeout': 1 * SECOND, + 'retries': 1, + }) + self.tmp_containers.append(container) + self.client.start(container) + wait_on_health_status(self.client, container, "healthy") + + @helpers.requires_api_version('1.24') + def test_healthcheck_fails(self): + container = self.client.create_container( + TEST_IMG, 'top', healthcheck={ + 'test': "false", + 'interval': 1 * SECOND, + 'timeout': 1 * SECOND, + 'retries': 1, + }) + self.tmp_containers.append(container) + self.client.start(container) + wait_on_health_status(self.client, container, "unhealthy") + + @helpers.requires_api_version('1.29') + def test_healthcheck_start_period(self): + container = self.client.create_container( + TEST_IMG, 'top', healthcheck={ + 'test': "echo 'x' >> /counter.txt && " + "test `cat /counter.txt | wc -l` -ge 3", + 'interval': 1 * SECOND, + 'timeout': 1 * SECOND, + 'retries': 1, + 'start_period': 3 * SECOND + } + ) + + self.tmp_containers.append(container) + self.client.start(container) + wait_on_health_status(self.client, container, "healthy") diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py new file mode 100644 index 0000000000..d3915c9b51 --- /dev/null +++ b/tests/integration/api_image_test.py @@ -0,0 +1,359 @@ +import contextlib +import json +import shutil +import socket +import socketserver +import tarfile +import tempfile +import threading +from http.server import SimpleHTTPRequestHandler + +import pytest + +import docker + +from ..helpers import requires_api_version, requires_experimental +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class ListImagesTest(BaseAPIIntegrationTest): + def test_images(self): + res1 = self.client.images(all=True) + assert 'Id' in res1[0] + res10 = res1[0] + assert 'Created' in res10 + assert 'RepoTags' in res10 + distinct = [] + for img in res1: + if img['Id'] not in distinct: + distinct.append(img['Id']) + assert len(distinct) == self.client.info()['Images'] + + def test_images_quiet(self): + res1 = self.client.images(quiet=True) + assert isinstance(res1[0], str) + + +class PullImageTest(BaseAPIIntegrationTest): + def test_pull(self): + try: + self.client.remove_image('hello-world') + except docker.errors.APIError: + pass + res = self.client.pull('hello-world') + self.tmp_imgs.append('hello-world') + assert isinstance(res, str) + assert len(self.client.images('hello-world')) >= 1 + img_info = self.client.inspect_image('hello-world') + assert 'Id' in img_info + + def test_pull_streaming(self): + try: + self.client.remove_image('hello-world') + except docker.errors.APIError: + pass + stream = self.client.pull( + 'hello-world', stream=True, decode=True) + self.tmp_imgs.append('hello-world') + for chunk in stream: + assert isinstance(chunk, dict) + assert len(self.client.images('hello-world')) >= 1 + img_info = self.client.inspect_image('hello-world') + assert 'Id' in img_info + + @requires_api_version('1.32') + @requires_experimental(until=None) + def test_pull_invalid_platform(self): + with pytest.raises(docker.errors.APIError) as excinfo: + self.client.pull('hello-world', platform='foobar') + + # Some API versions incorrectly returns 500 status; assert 4xx or 5xx + assert excinfo.value.is_error() + assert 'unknown operating system' in excinfo.exconly() \ + or 'invalid platform' in excinfo.exconly() + + +class CommitTest(BaseAPIIntegrationTest): + def test_commit(self): + container = self.client.create_container(TEST_IMG, ['touch', '/test']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + res = self.client.commit(id) + assert 'Id' in res + img_id = res['Id'] + self.tmp_imgs.append(img_id) + img = self.client.inspect_image(img_id) + assert 'Parent' in img + busybox_id = self.client.inspect_image(TEST_IMG)['Id'] + assert img['Parent'] == busybox_id + + def test_commit_with_changes(self): + cid = self.client.create_container(TEST_IMG, ['touch', '/test']) + self.tmp_containers.append(cid) + self.client.start(cid) + img_id = self.client.commit( + cid, changes=['EXPOSE 8000', 'CMD ["bash"]'] + ) + self.tmp_imgs.append(img_id) + img = self.client.inspect_image(img_id) + assert '8000/tcp' in img['Config']['ExposedPorts'] + assert img['Config']['Cmd'] == ['bash'] + + +class RemoveImageTest(BaseAPIIntegrationTest): + def test_remove(self): + container = self.client.create_container(TEST_IMG, ['touch', '/test']) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + res = self.client.commit(id) + assert 'Id' in res + img_id = res['Id'] + self.tmp_imgs.append(img_id) + logs = self.client.remove_image(img_id, force=True) + assert {"Deleted": img_id} in logs + images = self.client.images(all=True) + res = [x for x in images if x['Id'].startswith(img_id)] + assert len(res) == 0 + + +class ImportImageTest(BaseAPIIntegrationTest): + '''Base class for `docker import` test cases.''' + + TAR_SIZE = 512 * 1024 + + def write_dummy_tar_content(self, n_bytes, tar_fd): + def extend_file(f, n_bytes): + f.seek(n_bytes - 1) + f.write(bytearray([65])) + f.seek(0) + + tar = tarfile.TarFile(fileobj=tar_fd, mode='w') + + with tempfile.NamedTemporaryFile() as f: + extend_file(f, n_bytes) + tarinfo = tar.gettarinfo(name=f.name, arcname='testdata') + tar.addfile(tarinfo, fileobj=f) + + tar.close() + + @contextlib.contextmanager + def dummy_tar_stream(self, n_bytes): + '''Yields a stream that is valid tar data of size n_bytes.''' + with tempfile.NamedTemporaryFile() as tar_file: + self.write_dummy_tar_content(n_bytes, tar_file) + tar_file.seek(0) + yield tar_file + + @contextlib.contextmanager + def dummy_tar_file(self, n_bytes): + '''Yields the name of a valid tar file of size n_bytes.''' + with tempfile.NamedTemporaryFile(delete=False) as tar_file: + self.write_dummy_tar_content(n_bytes, tar_file) + tar_file.seek(0) + yield tar_file.name + + def test_import_from_bytes(self): + with self.dummy_tar_stream(n_bytes=500) as f: + content = f.read() + + # The generic import_image() function cannot import in-memory bytes + # data that happens to be represented as a string type, because + # import_image() will try to use it as a filename and usually then + # trigger an exception. So we test the import_image_from_data() + # function instead. + statuses = self.client.import_image_from_data( + content, repository='test/import-from-bytes') + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + img_id = result['status'] + self.tmp_imgs.append(img_id) + + def test_import_from_file(self): + with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename: + # statuses = self.client.import_image( + # src=tar_filename, repository='test/import-from-file') + statuses = self.client.import_image_from_file( + tar_filename, repository='test/import-from-file') + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + assert 'status' in result + img_id = result['status'] + self.tmp_imgs.append(img_id) + + def test_import_from_stream(self): + with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream: + statuses = self.client.import_image( + src=tar_stream, repository='test/import-from-stream') + # statuses = self.client.import_image_from_stream( + # tar_stream, repository='test/import-from-stream') + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + assert 'status' in result + img_id = result['status'] + self.tmp_imgs.append(img_id) + + def test_import_image_from_data_with_changes(self): + with self.dummy_tar_stream(n_bytes=500) as f: + content = f.read() + + statuses = self.client.import_image_from_data( + content, repository='test/import-from-bytes', + changes=['USER foobar', 'CMD ["echo"]'] + ) + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + img_id = result['status'] + self.tmp_imgs.append(img_id) + + img_data = self.client.inspect_image(img_id) + assert img_data is not None + assert img_data['Config']['Cmd'] == ['echo'] + assert img_data['Config']['User'] == 'foobar' + + def test_import_image_with_changes(self): + with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename: + statuses = self.client.import_image( + src=tar_filename, repository='test/import-from-file', + changes=['USER foobar', 'CMD ["echo"]'] + ) + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + img_id = result['status'] + self.tmp_imgs.append(img_id) + + img_data = self.client.inspect_image(img_id) + assert img_data is not None + assert img_data['Config']['Cmd'] == ['echo'] + assert img_data['Config']['User'] == 'foobar' + + # Docs say output is available in 1.23, but this test fails on 1.12.0 + @requires_api_version('1.24') + def test_get_load_image(self): + test_img = 'hello-world:latest' + self.client.pull(test_img) + data = self.client.get_image(test_img) + assert data + output = self.client.load_image(data) + assert any(line for line in output + if f'Loaded image: {test_img}' in line.get('stream', '')) + + @contextlib.contextmanager + def temporary_http_file_server(self, stream): + '''Serve data from an IO stream over HTTP.''' + + class Handler(SimpleHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-Type', 'application/x-tar') + self.end_headers() + shutil.copyfileobj(stream, self.wfile) + + server = socketserver.TCPServer(('', 0), Handler) + thread = threading.Thread(target=server.serve_forever) + thread.daemon = True + thread.start() + + yield f'http://{socket.gethostname()}:{server.server_address[1]}' + + server.shutdown() + + @pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME") + def test_import_from_url(self): + # The crappy test HTTP server doesn't handle large files well, so use + # a small file. + tar_size = 10240 + + with self.dummy_tar_stream(n_bytes=tar_size) as tar_data: + with self.temporary_http_file_server(tar_data) as url: + statuses = self.client.import_image( + src=url, repository='test/import-from-url') + + result_text = statuses.splitlines()[-1] + result = json.loads(result_text) + + assert 'error' not in result + + assert 'status' in result + img_id = result['status'] + self.tmp_imgs.append(img_id) + + +@requires_api_version('1.25') +class PruneImagesTest(BaseAPIIntegrationTest): + def test_prune_images(self): + try: + self.client.remove_image('hello-world') + except docker.errors.APIError: + pass + + # Ensure busybox does not get pruned + ctnr = self.client.create_container(TEST_IMG, ['sleep', '9999']) + self.tmp_containers.append(ctnr) + + self.client.pull('hello-world', tag='latest') + self.tmp_imgs.append('hello-world') + img_id = self.client.inspect_image('hello-world')['Id'] + result = self.client.prune_images() + assert img_id not in [ + img.get('Deleted') for img in result.get('ImagesDeleted') or [] + ] + result = self.client.prune_images({'dangling': False}) + assert result['SpaceReclaimed'] > 0 + assert 'hello-world:latest' in [ + img.get('Untagged') for img in result['ImagesDeleted'] + ] + assert img_id in [ + img.get('Deleted') for img in result['ImagesDeleted'] + ] + + +class SaveLoadImagesTest(BaseAPIIntegrationTest): + @requires_api_version('1.23') + def test_get_image_load_image(self): + with tempfile.TemporaryFile() as f: + stream = self.client.get_image(TEST_IMG) + for chunk in stream: + f.write(chunk) + + f.seek(0) + result = self.client.load_image(f.read()) + + success = False + result_line = f'Loaded image: {TEST_IMG}\n' + for data in result: + print(data) + if 'stream' in data: + if data['stream'] == result_line: + success = True + break + assert success is True + + +@requires_api_version('1.30') +class InspectDistributionTest(BaseAPIIntegrationTest): + def test_inspect_distribution(self): + data = self.client.inspect_distribution('busybox:latest') + assert data is not None + assert 'Platforms' in data + assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms'] diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py new file mode 100644 index 0000000000..ce2e8ea4c3 --- /dev/null +++ b/tests/integration/api_network_test.py @@ -0,0 +1,510 @@ +import pytest + +import docker +from docker.types import IPAMConfig, IPAMPool + +from ..helpers import random_name, requires_api_version +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class TestNetworks(BaseAPIIntegrationTest): + def tearDown(self): + self.client.leave_swarm(force=True) + super().tearDown() + + def create_network(self, *args, **kwargs): + net_name = random_name() + net_id = self.client.create_network(net_name, *args, **kwargs)['Id'] + self.tmp_networks.append(net_id) + return (net_name, net_id) + + def test_list_networks(self): + networks = self.client.networks() + + net_name, net_id = self.create_network() + + networks = self.client.networks() + assert net_id in [n['Id'] for n in networks] + + networks_by_name = self.client.networks(names=[net_name]) + assert [n['Id'] for n in networks_by_name] == [net_id] + + networks_by_partial_id = self.client.networks(ids=[net_id[:8]]) + assert [n['Id'] for n in networks_by_partial_id] == [net_id] + + def test_inspect_network(self): + net_name, net_id = self.create_network() + + net = self.client.inspect_network(net_id) + assert net['Id'] == net_id + assert net['Name'] == net_name + assert net['Driver'] == 'bridge' + assert net['Scope'] == 'local' + assert net['IPAM']['Driver'] == 'default' + + def test_create_network_with_ipam_config(self): + _, net_id = self.create_network( + ipam=IPAMConfig( + driver='default', + pool_configs=[ + IPAMPool( + subnet="172.28.0.0/16", + iprange="172.28.5.0/24", + gateway="172.28.5.254", + aux_addresses={ + "a": "172.28.1.5", + "b": "172.28.1.6", + "c": "172.28.1.7", + }, + ), + ], + ), + ) + + net = self.client.inspect_network(net_id) + ipam = net['IPAM'] + + assert ipam.pop('Options', None) is None + + assert ipam['Driver'] == 'default' + + assert ipam['Config'] == [{ + 'Subnet': "172.28.0.0/16", + 'IPRange': "172.28.5.0/24", + 'Gateway': "172.28.5.254", + 'AuxiliaryAddresses': { + "a": "172.28.1.5", + "b": "172.28.1.6", + "c": "172.28.1.7", + }, + }] + + def test_create_network_with_host_driver_fails(self): + with pytest.raises(docker.errors.APIError): + self.client.create_network(random_name(), driver='host') + + def test_remove_network(self): + net_name, net_id = self.create_network() + assert net_name in [n['Name'] for n in self.client.networks()] + + self.client.remove_network(net_id) + assert net_name not in [n['Name'] for n in self.client.networks()] + + def test_connect_and_disconnect_container(self): + net_name, net_id = self.create_network() + + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + self.client.start(container) + + network_data = self.client.inspect_network(net_id) + assert not network_data.get('Containers') + + self.client.connect_container_to_network(container, net_id) + network_data = self.client.inspect_network(net_id) + assert list(network_data['Containers'].keys()) == [ + container['Id'] + ] + + with pytest.raises(docker.errors.APIError): + self.client.connect_container_to_network(container, net_id) + + self.client.disconnect_container_from_network(container, net_id) + network_data = self.client.inspect_network(net_id) + assert not network_data.get('Containers') + + with pytest.raises(docker.errors.APIError): + self.client.disconnect_container_from_network(container, net_id) + + @requires_api_version('1.22') + def test_connect_and_force_disconnect_container(self): + net_name, net_id = self.create_network() + + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + self.client.start(container) + + network_data = self.client.inspect_network(net_id) + assert not network_data.get('Containers') + + self.client.connect_container_to_network(container, net_id) + network_data = self.client.inspect_network(net_id) + assert list(network_data['Containers'].keys()) == \ + [container['Id']] + + self.client.disconnect_container_from_network(container, net_id, True) + network_data = self.client.inspect_network(net_id) + assert not network_data.get('Containers') + + with pytest.raises(docker.errors.APIError): + self.client.disconnect_container_from_network( + container, net_id, force=True + ) + + @requires_api_version('1.22') + def test_connect_with_aliases(self): + net_name, net_id = self.create_network() + + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + self.client.start(container) + + self.client.connect_container_to_network( + container, net_id, aliases=['foo', 'bar']) + container_data = self.client.inspect_container(container) + aliases = ( + container_data['NetworkSettings']['Networks'][net_name]['Aliases'] + ) + assert 'foo' in aliases + assert 'bar' in aliases + + def test_connect_on_container_create(self): + net_name, net_id = self.create_network() + + container = self.client.create_container( + image=TEST_IMG, + command='top', + host_config=self.client.create_host_config(network_mode=net_name), + ) + self.tmp_containers.append(container) + self.client.start(container) + + network_data = self.client.inspect_network(net_id) + assert list(network_data['Containers'].keys()) == \ + [container['Id']] + + self.client.disconnect_container_from_network(container, net_id) + network_data = self.client.inspect_network(net_id) + assert not network_data.get('Containers') + + @requires_api_version('1.22') + def test_create_with_aliases(self): + net_name, net_id = self.create_network() + + container = self.client.create_container( + image=TEST_IMG, + command='top', + host_config=self.client.create_host_config( + network_mode=net_name, + ), + networking_config=self.client.create_networking_config({ + net_name: self.client.create_endpoint_config( + aliases=['foo', 'bar'], + ), + }), + ) + self.tmp_containers.append(container) + self.client.start(container) + + container_data = self.client.inspect_container(container) + aliases = ( + container_data['NetworkSettings']['Networks'][net_name]['Aliases'] + ) + assert 'foo' in aliases + assert 'bar' in aliases + + @requires_api_version('1.22') + def test_create_with_ipv4_address(self): + net_name, net_id = self.create_network( + ipam=IPAMConfig( + driver='default', + pool_configs=[IPAMPool(subnet="132.124.0.0/16")], + ), + ) + container = self.client.create_container( + image=TEST_IMG, command='top', + host_config=self.client.create_host_config(network_mode=net_name), + networking_config=self.client.create_networking_config({ + net_name: self.client.create_endpoint_config( + ipv4_address='132.124.0.23' + ) + }) + ) + self.tmp_containers.append(container) + self.client.start(container) + + net_settings = self.client.inspect_container(container)[ + 'NetworkSettings' + ] + assert net_settings['Networks'][net_name]['IPAMConfig']['IPv4Address']\ + == '132.124.0.23' + + @requires_api_version('1.22') + def test_create_with_ipv6_address(self): + net_name, net_id = self.create_network( + ipam=IPAMConfig( + driver='default', + pool_configs=[IPAMPool(subnet="2001:389::/64")], + ), + ) + container = self.client.create_container( + image=TEST_IMG, command='top', + host_config=self.client.create_host_config(network_mode=net_name), + networking_config=self.client.create_networking_config({ + net_name: self.client.create_endpoint_config( + ipv6_address='2001:389::f00d' + ) + }) + ) + self.tmp_containers.append(container) + self.client.start(container) + + net_settings = self.client.inspect_container(container)[ + 'NetworkSettings' + ] + assert net_settings['Networks'][net_name]['IPAMConfig']['IPv6Address']\ + == '2001:389::f00d' + + @requires_api_version('1.24') + def test_create_with_linklocal_ips(self): + container = self.client.create_container( + TEST_IMG, 'top', + networking_config=self.client.create_networking_config( + { + 'bridge': self.client.create_endpoint_config( + link_local_ips=['169.254.8.8'] + ) + } + ), + host_config=self.client.create_host_config(network_mode='bridge') + ) + self.tmp_containers.append(container) + self.client.start(container) + container_data = self.client.inspect_container(container) + net_cfg = container_data['NetworkSettings']['Networks']['bridge'] + assert 'IPAMConfig' in net_cfg + assert 'LinkLocalIPs' in net_cfg['IPAMConfig'] + assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8'] + + @requires_api_version('1.32') + def test_create_with_driveropt(self): + container = self.client.create_container( + TEST_IMG, 'top', + networking_config=self.client.create_networking_config( + { + 'bridge': self.client.create_endpoint_config( + driver_opt={'com.docker-py.setting': 'on'} + ) + } + ), + host_config=self.client.create_host_config(network_mode='bridge') + ) + self.tmp_containers.append(container) + self.client.start(container) + container_data = self.client.inspect_container(container) + net_cfg = container_data['NetworkSettings']['Networks']['bridge'] + assert 'DriverOpts' in net_cfg + assert 'com.docker-py.setting' in net_cfg['DriverOpts'] + assert net_cfg['DriverOpts']['com.docker-py.setting'] == 'on' + + @requires_api_version('1.22') + def test_create_with_links(self): + net_name, net_id = self.create_network() + + container = self.create_and_start( + host_config=self.client.create_host_config(network_mode=net_name), + networking_config=self.client.create_networking_config({ + net_name: self.client.create_endpoint_config( + links=[('docker-py-test-upstream', 'bar')], + ), + }), + ) + + net_settings = self.client.inspect_container(container)[ + 'NetworkSettings' + ] + assert net_settings['Networks'][net_name]['Links'] == [ + 'docker-py-test-upstream:bar' + ] + + self.create_and_start( + name='docker-py-test-upstream', + host_config=self.client.create_host_config(network_mode=net_name), + ) + + self.execute(container, ['nslookup', 'bar']) + + def test_create_check_duplicate(self): + net_name, net_id = self.create_network() + with pytest.raises(docker.errors.APIError): + self.client.create_network(net_name, check_duplicate=True) + + @requires_api_version('1.22') + def test_connect_with_links(self): + net_name, net_id = self.create_network() + + container = self.create_and_start( + host_config=self.client.create_host_config(network_mode=net_name)) + + self.client.disconnect_container_from_network(container, net_name) + self.client.connect_container_to_network( + container, net_name, + links=[('docker-py-test-upstream', 'bar')]) + + net_settings = self.client.inspect_container(container)[ + 'NetworkSettings' + ] + assert net_settings['Networks'][net_name]['Links'] == [ + 'docker-py-test-upstream:bar' + ] + + self.create_and_start( + name='docker-py-test-upstream', + host_config=self.client.create_host_config(network_mode=net_name), + ) + + self.execute(container, ['nslookup', 'bar']) + + @requires_api_version('1.22') + def test_connect_with_ipv4_address(self): + net_name, net_id = self.create_network( + ipam=IPAMConfig( + driver='default', + pool_configs=[ + IPAMPool( + subnet="172.28.0.0/16", iprange="172.28.5.0/24", + gateway="172.28.5.254" + ) + ] + ) + ) + + container = self.create_and_start( + host_config=self.client.create_host_config(network_mode=net_name)) + + self.client.disconnect_container_from_network(container, net_name) + self.client.connect_container_to_network( + container, net_name, ipv4_address='172.28.5.24' + ) + + container_data = self.client.inspect_container(container) + net_data = container_data['NetworkSettings']['Networks'][net_name] + assert net_data['IPAMConfig']['IPv4Address'] == '172.28.5.24' + + @requires_api_version('1.22') + def test_connect_with_ipv6_address(self): + net_name, net_id = self.create_network( + ipam=IPAMConfig( + driver='default', + pool_configs=[ + IPAMPool( + subnet="2001:389::/64", iprange="2001:389::0/96", + gateway="2001:389::ffff" + ) + ] + ) + ) + + container = self.create_and_start( + host_config=self.client.create_host_config(network_mode=net_name)) + + self.client.disconnect_container_from_network(container, net_name) + self.client.connect_container_to_network( + container, net_name, ipv6_address='2001:389::f00d' + ) + + container_data = self.client.inspect_container(container) + net_data = container_data['NetworkSettings']['Networks'][net_name] + assert net_data['IPAMConfig']['IPv6Address'] == '2001:389::f00d' + + @requires_api_version('1.25') + def test_connect_with_mac_address(self): + net_name, net_id = self.create_network() + + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + + self.client.connect_container_to_network( + container, net_name, mac_address='02:42:ac:11:00:02' + ) + + container_data = self.client.inspect_container(container) + + net_data = container_data['NetworkSettings']['Networks'][net_name] + assert net_data['MacAddress'] == '02:42:ac:11:00:02' + + @requires_api_version('1.23') + def test_create_internal_networks(self): + _, net_id = self.create_network(internal=True) + net = self.client.inspect_network(net_id) + assert net['Internal'] is True + + @requires_api_version('1.23') + def test_create_network_with_labels(self): + _, net_id = self.create_network(labels={ + 'com.docker.py.test': 'label' + }) + + net = self.client.inspect_network(net_id) + assert 'Labels' in net + assert len(net['Labels']) == 1 + assert net['Labels'] == { + 'com.docker.py.test': 'label' + } + + @requires_api_version('1.23') + def test_create_network_with_labels_wrong_type(self): + with pytest.raises(TypeError): + self.create_network(labels=['com.docker.py.test=label', ]) + + @requires_api_version('1.23') + def test_create_network_ipv6_enabled(self): + _, net_id = self.create_network( + enable_ipv6=True, ipam=IPAMConfig( + driver='default', + pool_configs=[ + IPAMPool( + subnet="2001:389::/64", iprange="2001:389::0/96", + gateway="2001:389::ffff" + ) + ] + ) + ) + net = self.client.inspect_network(net_id) + assert net['EnableIPv6'] is True + + @requires_api_version('1.25') + def test_create_network_attachable(self): + assert self.init_swarm() + _, net_id = self.create_network(driver='overlay', attachable=True) + net = self.client.inspect_network(net_id) + assert net['Attachable'] is True + + @requires_api_version('1.29') + def test_create_network_ingress(self): + assert self.init_swarm() + self.client.remove_network('ingress') + _, net_id = self.create_network(driver='overlay', ingress=True) + net = self.client.inspect_network(net_id) + assert net['Ingress'] is True + + @requires_api_version('1.25') + def test_prune_networks(self): + net_name, _ = self.create_network() + result = self.client.prune_networks() + assert net_name in result['NetworksDeleted'] + + @requires_api_version('1.31') + def test_create_inspect_network_with_scope(self): + assert self.init_swarm() + net_name_loc, net_id_loc = self.create_network(scope='local') + + assert self.client.inspect_network(net_name_loc) + assert self.client.inspect_network(net_name_loc, scope='local') + with pytest.raises(docker.errors.NotFound): + self.client.inspect_network(net_name_loc, scope='global') + + net_name_swarm, net_id_swarm = self.create_network( + driver='overlay', scope='swarm' + ) + + assert self.client.inspect_network(net_name_swarm) + assert self.client.inspect_network(net_name_swarm, scope='swarm') + with pytest.raises(docker.errors.NotFound): + self.client.inspect_network(net_name_swarm, scope='local') + + def test_create_remove_network_with_space_in_name(self): + net_id = self.client.create_network('test 01') + self.tmp_networks.append(net_id) + assert self.client.inspect_network('test 01') + assert self.client.remove_network('test 01') is None # does not raise diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py new file mode 100644 index 0000000000..168c81b231 --- /dev/null +++ b/tests/integration/api_plugin_test.py @@ -0,0 +1,146 @@ +import os + +import pytest + +import docker + +from ..helpers import requires_api_version +from .base import BaseAPIIntegrationTest + +SSHFS = 'vieux/sshfs:latest' + + +@requires_api_version('1.25') +class PluginTest(BaseAPIIntegrationTest): + @classmethod + def teardown_class(cls): + client = cls.get_client_instance() + try: + client.remove_plugin(SSHFS, force=True) + except docker.errors.APIError: + pass + + def teardown_method(self, method): + client = self.get_client_instance() + try: + client.disable_plugin(SSHFS, True) + except docker.errors.APIError: + pass + + for p in self.tmp_plugins: + try: + client.remove_plugin(p) + except docker.errors.APIError: + pass + + client.close() + + def ensure_plugin_installed(self, plugin_name): + try: + return self.client.inspect_plugin(plugin_name) + except docker.errors.NotFound: + prv = self.client.plugin_privileges(plugin_name) + for _d in self.client.pull_plugin(plugin_name, prv): + pass + return self.client.inspect_plugin(plugin_name) + + def test_enable_plugin(self): + pl_data = self.ensure_plugin_installed(SSHFS) + assert pl_data['Enabled'] is False + assert self.client.enable_plugin(SSHFS) + pl_data = self.client.inspect_plugin(SSHFS) + assert pl_data['Enabled'] is True + with pytest.raises(docker.errors.APIError): + self.client.enable_plugin(SSHFS) + + def test_disable_plugin(self): + pl_data = self.ensure_plugin_installed(SSHFS) + assert pl_data['Enabled'] is False + assert self.client.enable_plugin(SSHFS) + pl_data = self.client.inspect_plugin(SSHFS) + assert pl_data['Enabled'] is True + self.client.disable_plugin(SSHFS) + pl_data = self.client.inspect_plugin(SSHFS) + assert pl_data['Enabled'] is False + with pytest.raises(docker.errors.APIError): + self.client.disable_plugin(SSHFS) + + def test_inspect_plugin(self): + self.ensure_plugin_installed(SSHFS) + data = self.client.inspect_plugin(SSHFS) + assert 'Config' in data + assert 'Name' in data + assert data['Name'] == SSHFS + + def test_plugin_privileges(self): + prv = self.client.plugin_privileges(SSHFS) + assert isinstance(prv, list) + for item in prv: + assert 'Name' in item + assert 'Value' in item + assert 'Description' in item + + def test_list_plugins(self): + self.ensure_plugin_installed(SSHFS) + data = self.client.plugins() + assert len(data) > 0 + plugin = [p for p in data if p['Name'] == SSHFS][0] + assert 'Config' in plugin + + def test_configure_plugin(self): + pl_data = self.ensure_plugin_installed(SSHFS) + assert pl_data['Enabled'] is False + self.client.configure_plugin(SSHFS, { + 'DEBUG': '1' + }) + pl_data = self.client.inspect_plugin(SSHFS) + assert 'Env' in pl_data['Settings'] + assert 'DEBUG=1' in pl_data['Settings']['Env'] + + self.client.configure_plugin(SSHFS, ['DEBUG=0']) + pl_data = self.client.inspect_plugin(SSHFS) + assert 'DEBUG=0' in pl_data['Settings']['Env'] + + def test_remove_plugin(self): + pl_data = self.ensure_plugin_installed(SSHFS) + assert pl_data['Enabled'] is False + assert self.client.remove_plugin(SSHFS) is True + + def test_force_remove_plugin(self): + self.ensure_plugin_installed(SSHFS) + self.client.enable_plugin(SSHFS) + assert self.client.inspect_plugin(SSHFS)['Enabled'] is True + assert self.client.remove_plugin(SSHFS, force=True) is True + + def test_install_plugin(self): + try: + self.client.remove_plugin(SSHFS, force=True) + except docker.errors.APIError: + pass + + prv = self.client.plugin_privileges(SSHFS) + logs = list(self.client.pull_plugin(SSHFS, prv)) + assert filter(lambda x: x['status'] == 'Download complete', logs) + assert self.client.inspect_plugin(SSHFS) + assert self.client.enable_plugin(SSHFS) + + @requires_api_version('1.26') + def test_upgrade_plugin(self): + pl_data = self.ensure_plugin_installed(SSHFS) + assert pl_data['Enabled'] is False + prv = self.client.plugin_privileges(SSHFS) + logs = list(self.client.upgrade_plugin(SSHFS, SSHFS, prv)) + assert filter(lambda x: x['status'] == 'Download complete', logs) + assert self.client.inspect_plugin(SSHFS) + assert self.client.enable_plugin(SSHFS) + + def test_create_plugin(self): + plugin_data_dir = os.path.join( + os.path.dirname(__file__), os.path.join('testdata', 'dummy-plugin') + ) + assert self.client.create_plugin( + 'docker-sdk-py/dummy', plugin_data_dir + ) + self.tmp_plugins.append('docker-sdk-py/dummy') + data = self.client.inspect_plugin('docker-sdk-py/dummy') + assert data['Config']['Entrypoint'] == ['/dummy'] diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py new file mode 100644 index 0000000000..588aaeb99d --- /dev/null +++ b/tests/integration/api_secret_test.py @@ -0,0 +1,71 @@ +import pytest + +import docker + +from ..helpers import force_leave_swarm, requires_api_version +from .base import BaseAPIIntegrationTest + + +@requires_api_version('1.25') +class SecretAPITest(BaseAPIIntegrationTest): + @classmethod + def setup_class(cls): + client = cls.get_client_instance() + force_leave_swarm(client) + cls._init_swarm(client) + + @classmethod + def teardown_class(cls): + client = cls.get_client_instance() + force_leave_swarm(client) + + def test_create_secret(self): + secret_id = self.client.create_secret( + 'favorite_character', 'sakuya izayoi' + ) + self.tmp_secrets.append(secret_id) + assert 'ID' in secret_id + data = self.client.inspect_secret(secret_id) + assert data['Spec']['Name'] == 'favorite_character' + + def test_create_secret_unicode_data(self): + secret_id = self.client.create_secret( + 'favorite_character', 'いざよいさくや' + ) + self.tmp_secrets.append(secret_id) + assert 'ID' in secret_id + data = self.client.inspect_secret(secret_id) + assert data['Spec']['Name'] == 'favorite_character' + + def test_inspect_secret(self): + secret_name = 'favorite_character' + secret_id = self.client.create_secret( + secret_name, 'sakuya izayoi' + ) + self.tmp_secrets.append(secret_id) + data = self.client.inspect_secret(secret_id) + assert data['Spec']['Name'] == secret_name + assert 'ID' in data + assert 'Version' in data + + def test_remove_secret(self): + secret_name = 'favorite_character' + secret_id = self.client.create_secret( + secret_name, 'sakuya izayoi' + ) + self.tmp_secrets.append(secret_id) + + assert self.client.remove_secret(secret_id) + with pytest.raises(docker.errors.NotFound): + self.client.inspect_secret(secret_id) + + def test_list_secrets(self): + secret_name = 'favorite_character' + secret_id = self.client.create_secret( + secret_name, 'sakuya izayoi' + ) + self.tmp_secrets.append(secret_id) + + data = self.client.secrets(filters={'names': ['favorite_character']}) + assert len(data) == 1 + assert data[0]['ID'] == secret_id['ID'] diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py new file mode 100644 index 0000000000..ba67c6d538 --- /dev/null +++ b/tests/integration/api_service_test.py @@ -0,0 +1,1467 @@ +import random +import time + +import pytest + +import docker + +from ..helpers import force_leave_swarm, requires_api_version +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class ServiceTest(BaseAPIIntegrationTest): + @classmethod + def setup_class(cls): + client = cls.get_client_instance() + force_leave_swarm(client) + cls._init_swarm(client) + + @classmethod + def teardown_class(cls): + client = cls.get_client_instance() + force_leave_swarm(client) + + def tearDown(self): + for service in self.client.services(filters={'name': 'dockerpytest_'}): + try: + self.client.remove_service(service['ID']) + except docker.errors.APIError: + pass + super().tearDown() + + def get_service_name(self): + return f'dockerpytest_{random.getrandbits(64):x}' + + def get_service_container(self, service_name, attempts=20, interval=0.5, + include_stopped=False): + # There is some delay between the service's creation and the creation + # of the service's containers. This method deals with the uncertainty + # when trying to retrieve the container associated with a service. + while True: + containers = self.client.containers( + filters={'name': [service_name]}, quiet=True, + all=include_stopped + ) + if len(containers) > 0: + return containers[0] + attempts -= 1 + if attempts <= 0: + return None + time.sleep(interval) + + def create_simple_service(self, name=None, labels=None): + if name: + name = f'dockerpytest_{name}' + else: + name = self.get_service_name() + + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + return name, self.client.create_service( + task_tmpl, name=name, labels=labels + ) + + @requires_api_version('1.24') + def test_list_services(self): + services = self.client.services() + assert isinstance(services, list) + + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 0 + self.create_simple_service() + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 1 + assert 'dockerpytest_' in test_services[0]['Spec']['Name'] + + @requires_api_version('1.24') + def test_list_services_filter_by_label(self): + test_services = self.client.services(filters={'label': 'test_label'}) + assert len(test_services) == 0 + self.create_simple_service(labels={'test_label': 'testing'}) + test_services = self.client.services(filters={'label': 'test_label'}) + assert len(test_services) == 1 + assert test_services[0]['Spec']['Labels']['test_label'] == 'testing' + + @requires_api_version('1.41') + def test_list_services_with_status(self): + test_services = self.client.services() + assert len(test_services) == 0 + self.create_simple_service() + test_services = self.client.services( + filters={'name': 'dockerpytest_'}, status=False + ) + assert 'ServiceStatus' not in test_services[0] + test_services = self.client.services( + filters={'name': 'dockerpytest_'}, status=True + ) + assert 'ServiceStatus' in test_services[0] + + def test_inspect_service_by_id(self): + svc_name, svc_id = self.create_simple_service() + svc_info = self.client.inspect_service(svc_id) + assert 'ID' in svc_info + assert svc_info['ID'] == svc_id['ID'] + + def test_inspect_service_by_name(self): + svc_name, svc_id = self.create_simple_service() + svc_info = self.client.inspect_service(svc_name) + assert 'ID' in svc_info + assert svc_info['ID'] == svc_id['ID'] + + @requires_api_version('1.29') + def test_inspect_service_insert_defaults(self): + svc_name, svc_id = self.create_simple_service() + svc_info = self.client.inspect_service(svc_id) + svc_info_defaults = self.client.inspect_service( + svc_id, insert_defaults=True + ) + assert svc_info != svc_info_defaults + assert 'RollbackConfig' in svc_info_defaults['Spec'] + assert 'RollbackConfig' not in svc_info['Spec'] + + def test_remove_service_by_id(self): + svc_name, svc_id = self.create_simple_service() + assert self.client.remove_service(svc_id) + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 0 + + def test_remove_service_by_name(self): + svc_name, svc_id = self.create_simple_service() + assert self.client.remove_service(svc_name) + test_services = self.client.services(filters={'name': 'dockerpytest_'}) + assert len(test_services) == 0 + + def test_create_service_simple(self): + name, svc_id = self.create_simple_service() + assert self.client.inspect_service(svc_id) + services = self.client.services(filters={'name': name}) + assert len(services) == 1 + assert services[0]['ID'] == svc_id['ID'] + + @requires_api_version('1.29') + def test_service_logs(self): + name, svc_id = self.create_simple_service() + assert self.get_service_container(name, include_stopped=True) + attempts = 20 + while True: + if attempts == 0: + self.fail('No service logs produced by endpoint') + return + logs = self.client.service_logs(svc_id, stdout=True, is_tty=False) + try: + log_line = next(logs) + except StopIteration: + attempts -= 1 + time.sleep(0.1) + continue + else: + break + + if log_line is not None: + log_line = log_line.decode('utf-8') + assert 'hello\n' in log_line + + def test_create_service_custom_log_driver(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'] + ) + log_cfg = docker.types.DriverConfig('none') + task_tmpl = docker.types.TaskTemplate( + container_spec, log_driver=log_cfg + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + res_template = svc_info['Spec']['TaskTemplate'] + assert 'LogDriver' in res_template + assert 'Name' in res_template['LogDriver'] + assert res_template['LogDriver']['Name'] == 'none' + + def test_create_service_with_volume_mount(self): + vol_name = self.get_service_name() + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['ls'], + mounts=[ + docker.types.Mount(target='/test', source=vol_name) + ] + ) + self.tmp_volumes.append(vol_name) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert 'Mounts' in cspec + assert len(cspec['Mounts']) == 1 + mount = cspec['Mounts'][0] + assert mount['Target'] == '/test' + assert mount['Source'] == vol_name + assert mount['Type'] == 'volume' + + def test_create_service_with_resources_constraints(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + resources = docker.types.Resources( + cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024, + cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024 + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, resources=resources + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + res_template = svc_info['Spec']['TaskTemplate'] + assert 'Resources' in res_template + assert res_template['Resources']['Limits'] == resources['Limits'] + assert res_template['Resources']['Reservations'] == resources[ + 'Reservations' + ] + + def _create_service_with_generic_resources(self, generic_resources): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + + resources = docker.types.Resources( + generic_resources=generic_resources + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, resources=resources + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + return resources, self.client.inspect_service(svc_id) + + @requires_api_version('1.32') + def test_create_service_with_generic_resources(self): + successful = [{ + 'input': [ + {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 1}}, + {'NamedResourceSpec': {'Kind': 'gpu', 'Value': 'test'}} + ]}, { + 'input': {'gpu': 2, 'mpi': 'latest'}, + 'expected': [ + {'DiscreteResourceSpec': {'Kind': 'gpu', 'Value': 2}}, + {'NamedResourceSpec': {'Kind': 'mpi', 'Value': 'latest'}} + ]} + ] + + for test in successful: + t = test['input'] + resrcs, svc_info = self._create_service_with_generic_resources(t) + + assert 'TaskTemplate' in svc_info['Spec'] + res_template = svc_info['Spec']['TaskTemplate'] + assert 'Resources' in res_template + res_reservations = res_template['Resources']['Reservations'] + assert res_reservations == resrcs['Reservations'] + assert 'GenericResources' in res_reservations + + def _key(d, specs=('DiscreteResourceSpec', 'NamedResourceSpec')): + return [d.get(s, {}).get('Kind', '') for s in specs] + + actual = res_reservations['GenericResources'] + expected = test.get('expected', test['input']) + assert sorted(actual, key=_key) == sorted(expected, key=_key) + + @requires_api_version('1.32') + def test_create_service_with_invalid_generic_resources(self): + for test_input in ['1', 1.0, lambda: '1', {1, 2}]: + with pytest.raises(docker.errors.InvalidArgument): + self._create_service_with_generic_resources(test_input) + + def test_create_service_with_update_config(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + update_config = docker.types.UpdateConfig( + parallelism=10, delay=5, failure_action='pause' + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, update_config=update_config, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'UpdateConfig' in svc_info['Spec'] + uc = svc_info['Spec']['UpdateConfig'] + assert update_config['Parallelism'] == uc['Parallelism'] + assert update_config['Delay'] == uc['Delay'] + assert update_config['FailureAction'] == uc['FailureAction'] + + @requires_api_version('1.28') + def test_create_service_with_failure_action_rollback(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + update_config = docker.types.UpdateConfig(failure_action='rollback') + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, update_config=update_config, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'UpdateConfig' in svc_info['Spec'] + uc = svc_info['Spec']['UpdateConfig'] + assert update_config['FailureAction'] == uc['FailureAction'] + + @requires_api_version('1.25') + def test_create_service_with_update_config_monitor(self): + container_spec = docker.types.ContainerSpec('busybox', ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + update_config = docker.types.UpdateConfig( + monitor=300000000, max_failure_ratio=0.4 + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, update_config=update_config, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'UpdateConfig' in svc_info['Spec'] + uc = svc_info['Spec']['UpdateConfig'] + assert update_config['Monitor'] == uc['Monitor'] + assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio'] + + @requires_api_version('1.28') + def test_create_service_with_rollback_config(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + rollback_cfg = docker.types.RollbackConfig( + parallelism=10, delay=5, failure_action='pause', + monitor=300000000, max_failure_ratio=0.4 + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, rollback_config=rollback_cfg, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'RollbackConfig' in svc_info['Spec'] + rc = svc_info['Spec']['RollbackConfig'] + assert rollback_cfg['Parallelism'] == rc['Parallelism'] + assert rollback_cfg['Delay'] == rc['Delay'] + assert rollback_cfg['FailureAction'] == rc['FailureAction'] + assert rollback_cfg['Monitor'] == rc['Monitor'] + assert rollback_cfg['MaxFailureRatio'] == rc['MaxFailureRatio'] + + def test_create_service_with_restart_policy(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + policy = docker.types.RestartPolicy( + docker.types.RestartPolicy.condition_types.ANY, + delay=5, max_attempts=5 + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, restart_policy=policy + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate'] + assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy'] + + def test_create_service_with_custom_networks(self): + net1 = self.client.create_network( + 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'} + ) + self.tmp_networks.append(net1['Id']) + net2 = self.client.create_network( + 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'} + ) + self.tmp_networks.append(net2['Id']) + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[ + 'dockerpytest_1', {'Target': 'dockerpytest_2'} + ] + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Networks' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Networks'] == [ + {'Target': net1['Id']}, {'Target': net2['Id']} + ] + + def test_create_service_with_network_attachment_config(self): + network = self.client.create_network( + 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'} + ) + self.tmp_networks.append(network['Id']) + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + network_config = docker.types.NetworkAttachmentConfig( + target='dockerpytest_1', + aliases=['dockerpytest_1_alias'], + options={ + 'foo': 'bar' + } + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, + networks=[network_config] + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Networks' in svc_info['Spec']['TaskTemplate'] + service_networks_info = svc_info['Spec']['TaskTemplate']['Networks'] + assert len(service_networks_info) == 1 + assert service_networks_info[0]['Target'] == network['Id'] + assert service_networks_info[0]['Aliases'] == ['dockerpytest_1_alias'] + assert service_networks_info[0]['DriverOpts'] == {'foo': 'bar'} + + def test_create_service_with_placement(self): + node_id = self.client.nodes()[0]['ID'] + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=[f'node.id=={node_id}'] + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert (svc_info['Spec']['TaskTemplate']['Placement'] == + {'Constraints': [f'node.id=={node_id}']}) + + def test_create_service_with_placement_object(self): + node_id = self.client.nodes()[0]['ID'] + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + placemt = docker.types.Placement( + constraints=[f'node.id=={node_id}'] + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=placemt + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + + @requires_api_version('1.30') + def test_create_service_with_placement_platform(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + placemt = docker.types.Placement(platforms=[('x86_64', 'linux')]) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=placemt + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + + @requires_api_version('1.27') + def test_create_service_with_placement_preferences(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + placemt = docker.types.Placement(preferences=[ + {'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}} + ]) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=placemt + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + + @requires_api_version('1.27') + def test_create_service_with_placement_preferences_tuple(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + placemt = docker.types.Placement(preferences=( + ('spread', 'com.dockerpy.test'), + )) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=placemt + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + + @requires_api_version('1.40') + def test_create_service_with_placement_maxreplicas(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + placemt = docker.types.Placement(maxreplicas=1) + task_tmpl = docker.types.TaskTemplate( + container_spec, placement=placemt + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Placement' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt + + def test_create_service_with_endpoint_spec(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + endpoint_spec = docker.types.EndpointSpec(ports={ + 12357: (1990, 'udp'), + 12562: (678,), + 53243: 8080, + }) + svc_id = self.client.create_service( + task_tmpl, name=name, endpoint_spec=endpoint_spec + ) + svc_info = self.client.inspect_service(svc_id) + ports = svc_info['Spec']['EndpointSpec']['Ports'] + for port in ports: + if port['PublishedPort'] == 12562: + assert port['TargetPort'] == 678 + assert port['Protocol'] == 'tcp' + elif port['PublishedPort'] == 53243: + assert port['TargetPort'] == 8080 + assert port['Protocol'] == 'tcp' + elif port['PublishedPort'] == 12357: + assert port['TargetPort'] == 1990 + assert port['Protocol'] == 'udp' + else: + self.fail(f'Invalid port specification: {port}') + + assert len(ports) == 3 + + @requires_api_version('1.32') + def test_create_service_with_endpoint_spec_host_publish_mode(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + endpoint_spec = docker.types.EndpointSpec(ports={ + 12357: (1990, None, 'host'), + }) + svc_id = self.client.create_service( + task_tmpl, name=name, endpoint_spec=endpoint_spec + ) + svc_info = self.client.inspect_service(svc_id) + ports = svc_info['Spec']['EndpointSpec']['Ports'] + assert len(ports) == 1 + port = ports[0] + assert port['PublishedPort'] == 12357 + assert port['TargetPort'] == 1990 + assert port['Protocol'] == 'tcp' + assert port['PublishMode'] == 'host' + + def test_create_service_with_env(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['true'], env={'DOCKER_PY_TEST': 1} + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert 'Env' in con_spec + assert con_spec['Env'] == ['DOCKER_PY_TEST=1'] + + @requires_api_version('1.29') + def test_create_service_with_update_order(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + update_config = docker.types.UpdateConfig( + parallelism=10, delay=5, order='start-first' + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, update_config=update_config, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'UpdateConfig' in svc_info['Spec'] + uc = svc_info['Spec']['UpdateConfig'] + assert update_config['Parallelism'] == uc['Parallelism'] + assert update_config['Delay'] == uc['Delay'] + assert update_config['Order'] == uc['Order'] + + @requires_api_version('1.25') + def test_create_service_with_tty(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['true'], tty=True + ) + task_tmpl = docker.types.TaskTemplate( + container_spec, + ) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert 'TTY' in con_spec + assert con_spec['TTY'] is True + + @requires_api_version('1.25') + def test_create_service_with_tty_dict(self): + container_spec = { + 'Image': TEST_IMG, + 'Command': ['true'], + 'TTY': True + } + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert 'TTY' in con_spec + assert con_spec['TTY'] is True + + def test_create_service_global_mode(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, mode='global' + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Mode' in svc_info['Spec'] + assert 'Global' in svc_info['Spec']['Mode'] + + def test_create_service_replicated_mode(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, + mode=docker.types.ServiceMode('replicated', 5) + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Mode' in svc_info['Spec'] + assert 'Replicated' in svc_info['Spec']['Mode'] + assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5} + + @requires_api_version('1.41') + def test_create_service_global_job_mode(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, mode='global-job' + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Mode' in svc_info['Spec'] + assert 'GlobalJob' in svc_info['Spec']['Mode'] + + @requires_api_version('1.41') + def test_create_service_replicated_job_mode(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, + mode=docker.types.ServiceMode('replicated-job', 5) + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Mode' in svc_info['Spec'] + assert 'ReplicatedJob' in svc_info['Spec']['Mode'] + assert svc_info['Spec']['Mode']['ReplicatedJob'] == { + 'MaxConcurrent': 1, + 'TotalCompletions': 5 + } + + @requires_api_version('1.25') + def test_update_service_force_update(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0 + version_index = svc_info['Version']['Index'] + + task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10) + self.client.update_service(name, version_index, task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10 + + @requires_api_version('1.25') + def test_create_service_with_secret(self): + secret_name = 'favorite_touhou' + secret_data = b'phantasmagoria of flower view' + secret_id = self.client.create_secret(secret_name, secret_data) + self.tmp_secrets.append(secret_id) + secret_ref = docker.types.SecretReference(secret_id, secret_name) + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], secrets=[secret_ref] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets'] + assert secrets[0] == secret_ref + + container = self.get_service_container(name) + assert container is not None + exec_id = self.client.exec_create( + container, f'cat /run/secrets/{secret_name}' + ) + assert self.client.exec_start(exec_id) == secret_data + + @requires_api_version('1.25') + def test_create_service_with_unicode_secret(self): + secret_name = 'favorite_touhou' + secret_data = '東方花映塚' + secret_id = self.client.create_secret(secret_name, secret_data) + self.tmp_secrets.append(secret_id) + secret_ref = docker.types.SecretReference(secret_id, secret_name) + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], secrets=[secret_ref] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets'] + assert secrets[0] == secret_ref + + container = self.get_service_container(name) + assert container is not None + exec_id = self.client.exec_create( + container, f'cat /run/secrets/{secret_name}' + ) + container_secret = self.client.exec_start(exec_id) + container_secret = container_secret.decode('utf-8') + assert container_secret == secret_data + + @requires_api_version('1.30') + def test_create_service_with_config(self): + config_name = 'favorite_touhou' + config_data = b'phantasmagoria of flower view' + config_id = self.client.create_config(config_name, config_data) + self.tmp_configs.append(config_id) + config_ref = docker.types.ConfigReference(config_id, config_name) + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], configs=[config_ref] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs'] + assert configs[0] == config_ref + + container = self.get_service_container(name) + assert container is not None + exec_id = self.client.exec_create( + container, f'cat /{config_name}' + ) + assert self.client.exec_start(exec_id) == config_data + + @requires_api_version('1.30') + def test_create_service_with_unicode_config(self): + config_name = 'favorite_touhou' + config_data = '東方花映塚' + config_id = self.client.create_config(config_name, config_data) + self.tmp_configs.append(config_id) + config_ref = docker.types.ConfigReference(config_id, config_name) + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], configs=[config_ref] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs'] + assert configs[0] == config_ref + + container = self.get_service_container(name) + assert container is not None + exec_id = self.client.exec_create( + container, f'cat /{config_name}' + ) + container_config = self.client.exec_start(exec_id) + container_config = container_config.decode('utf-8') + assert container_config == config_data + + @requires_api_version('1.25') + def test_create_service_with_hosts(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], hosts={ + 'foobar': '127.0.0.1', + 'baz': '8.8.8.8', + } + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Hosts' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + hosts = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hosts'] + assert len(hosts) == 2 + assert '127.0.0.1 foobar' in hosts + assert '8.8.8.8 baz' in hosts + + @requires_api_version('1.25') + def test_create_service_with_hostname(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], hostname='foobar.baz.com' + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Hostname' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert ( + svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hostname'] == + 'foobar.baz.com' + ) + + @requires_api_version('1.25') + def test_create_service_with_groups(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], groups=['shrinemaidens', 'youkais'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Groups' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + groups = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Groups'] + assert len(groups) == 2 + assert 'shrinemaidens' in groups + assert 'youkais' in groups + + @requires_api_version('1.25') + def test_create_service_with_dns_config(self): + dns_config = docker.types.DNSConfig( + nameservers=['8.8.8.8', '8.8.4.4'], + search=['local'], options=['debug'] + ) + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], dns_config=dns_config + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'DNSConfig' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert ( + dns_config == + svc_info['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig'] + ) + + @requires_api_version('1.25') + def test_create_service_with_healthcheck(self): + second = 1000000000 + hc = docker.types.Healthcheck( + test='true', retries=3, timeout=1 * second, + start_period=3 * second, interval=int(second / 2), + ) + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], healthcheck=hc + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert ( + 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + ) + assert ( + hc == + svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck'] + ) + + @requires_api_version('1.28') + def test_create_service_with_readonly(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], read_only=True + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert ( + 'ReadOnly' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + ) + assert svc_info['Spec']['TaskTemplate']['ContainerSpec']['ReadOnly'] + + @requires_api_version('1.28') + def test_create_service_with_stop_signal(self): + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], stop_signal='SIGINT' + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert ( + 'StopSignal' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + ) + assert ( + svc_info['Spec']['TaskTemplate']['ContainerSpec']['StopSignal'] == + 'SIGINT' + ) + + @requires_api_version('1.30') + def test_create_service_with_privileges(self): + priv = docker.types.Privileges(selinux_disable=True) + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], privileges=priv + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert ( + 'Privileges' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + ) + privileges = ( + svc_info['Spec']['TaskTemplate']['ContainerSpec']['Privileges'] + ) + assert privileges['SELinuxContext']['Disable'] is True + + @requires_api_version('1.38') + def test_create_service_with_init(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['sleep', '999'], init=True + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Init' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert ( + svc_info['Spec']['TaskTemplate']['ContainerSpec']['Init'] is True + ) + + @requires_api_version('1.25') + def test_update_service_with_defaults_name(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert 'Name' in svc_info['Spec'] + assert svc_info['Spec']['Name'] == name + version_index = svc_info['Version']['Index'] + + task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10) + self._update_service( + svc_id, name, version_index, task_tmpl, fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'Name' in svc_info['Spec'] + assert svc_info['Spec']['Name'] == name + + @requires_api_version('1.25') + def test_update_service_with_defaults_labels(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, labels={'service.label': 'SampleLabel'} + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Labels' in svc_info['Spec'] + assert 'service.label' in svc_info['Spec']['Labels'] + assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel' + version_index = svc_info['Version']['Index'] + + task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10) + self._update_service( + svc_id, name, version_index, task_tmpl, name=name, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'Labels' in svc_info['Spec'] + assert 'service.label' in svc_info['Spec']['Labels'] + assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel' + + def test_update_service_with_defaults_mode(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, + mode=docker.types.ServiceMode(mode='replicated', replicas=2) + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Mode' in svc_info['Spec'] + assert 'Replicated' in svc_info['Spec']['Mode'] + assert 'Replicas' in svc_info['Spec']['Mode']['Replicated'] + assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2 + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, labels={'force': 'update'}, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'Mode' in svc_info['Spec'] + assert 'Replicated' in svc_info['Spec']['Mode'] + assert 'Replicas' in svc_info['Spec']['Mode']['Replicated'] + assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2 + + def test_update_service_with_defaults_container_labels(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'], + labels={'container.label': 'SampleLabel'} + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, labels={'service.label': 'SampleLabel'} + ) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels'] + assert labels['container.label'] == 'SampleLabel' + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, labels={'force': 'update'}, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels'] + assert labels['container.label'] == 'SampleLabel' + + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + self._update_service( + svc_id, name, new_index, task_tmpl, fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + newer_index = svc_info['Version']['Index'] + assert newer_index > new_index + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels'] + assert labels['container.label'] == 'SampleLabel' + + def test_update_service_with_defaults_update_config(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + update_config = docker.types.UpdateConfig( + parallelism=10, delay=5, failure_action='pause' + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, update_config=update_config, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'UpdateConfig' in svc_info['Spec'] + uc = svc_info['Spec']['UpdateConfig'] + assert update_config['Parallelism'] == uc['Parallelism'] + assert update_config['Delay'] == uc['Delay'] + assert update_config['FailureAction'] == uc['FailureAction'] + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, labels={'force': 'update'}, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'UpdateConfig' in svc_info['Spec'] + uc = svc_info['Spec']['UpdateConfig'] + assert update_config['Parallelism'] == uc['Parallelism'] + assert update_config['Delay'] == uc['Delay'] + assert update_config['FailureAction'] == uc['FailureAction'] + + def test_update_service_with_defaults_networks(self): + net1 = self.client.create_network( + 'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'} + ) + self.tmp_networks.append(net1['Id']) + net2 = self.client.create_network( + 'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'} + ) + self.tmp_networks.append(net2['Id']) + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[ + 'dockerpytest_1', {'Target': 'dockerpytest_2'} + ] + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Networks' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Networks'] == [ + {'Target': net1['Id']}, {'Target': net2['Id']} + ] + + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, labels={'force': 'update'}, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'Networks' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Networks'] == [ + {'Target': net1['Id']}, {'Target': net2['Id']} + ] + + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[net1['Id']] + ) + self._update_service( + svc_id, name, new_index, task_tmpl, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Networks' in svc_info['Spec']['TaskTemplate'] + assert svc_info['Spec']['TaskTemplate']['Networks'] == [ + {'Target': net1['Id']} + ] + + def test_update_service_with_defaults_endpoint_spec(self): + container_spec = docker.types.ContainerSpec(TEST_IMG, ['true']) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + endpoint_spec = docker.types.EndpointSpec(ports={ + 12357: (1990, 'udp'), + 12562: (678,), + 53243: 8080, + }) + svc_id = self.client.create_service( + task_tmpl, name=name, endpoint_spec=endpoint_spec + ) + svc_info = self.client.inspect_service(svc_id) + print(svc_info) + ports = svc_info['Spec']['EndpointSpec']['Ports'] + for port in ports: + if port['PublishedPort'] == 12562: + assert port['TargetPort'] == 678 + assert port['Protocol'] == 'tcp' + elif port['PublishedPort'] == 53243: + assert port['TargetPort'] == 8080 + assert port['Protocol'] == 'tcp' + elif port['PublishedPort'] == 12357: + assert port['TargetPort'] == 1990 + assert port['Protocol'] == 'udp' + else: + self.fail(f'Invalid port specification: {port}') + + assert len(ports) == 3 + + svc_info = self.client.inspect_service(svc_id) + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, labels={'force': 'update'}, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + + ports = svc_info['Spec']['EndpointSpec']['Ports'] + for port in ports: + if port['PublishedPort'] == 12562: + assert port['TargetPort'] == 678 + assert port['Protocol'] == 'tcp' + elif port['PublishedPort'] == 53243: + assert port['TargetPort'] == 8080 + assert port['Protocol'] == 'tcp' + elif port['PublishedPort'] == 12357: + assert port['TargetPort'] == 1990 + assert port['Protocol'] == 'udp' + else: + self.fail(f'Invalid port specification: {port}') + + assert len(ports) == 3 + + @requires_api_version('1.25') + def test_update_service_remove_healthcheck(self): + second = 1000000000 + hc = docker.types.Healthcheck( + test='true', retries=3, timeout=1 * second, + start_period=3 * second, interval=int(second / 2), + ) + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], healthcheck=hc + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service(task_tmpl, name=name) + svc_info = self.client.inspect_service(svc_id) + assert ( + 'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + ) + assert ( + hc == + svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck'] + ) + + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['sleep', '999'], healthcheck={} + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, task_tmpl, fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert ( + 'Healthcheck' not in container_spec or + not container_spec['Healthcheck'] + ) + + def test_update_service_remove_labels(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, labels={'service.label': 'SampleLabel'} + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Labels' in svc_info['Spec'] + assert 'service.label' in svc_info['Spec']['Labels'] + assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel' + version_index = svc_info['Version']['Index'] + + self._update_service( + svc_id, name, version_index, labels={}, fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert not svc_info['Spec'].get('Labels') + + def test_update_service_remove_container_labels(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'], + labels={'container.label': 'SampleLabel'} + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name, labels={'service.label': 'SampleLabel'} + ) + svc_info = self.client.inspect_service(svc_id) + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec'] + labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels'] + assert labels['container.label'] == 'SampleLabel' + version_index = svc_info['Version']['Index'] + + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'], + labels={} + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + self._update_service( + svc_id, name, version_index, task_tmpl, fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + assert 'TaskTemplate' in svc_info['Spec'] + assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate'] + container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec'] + assert not container_spec.get('Labels') + + @requires_api_version('1.29') + def test_update_service_with_network_change(self): + container_spec = docker.types.ContainerSpec( + 'busybox', ['echo', 'hello'] + ) + net1 = self.client.create_network( + self.get_service_name(), driver='overlay', + ipam={'Driver': 'default'} + ) + self.tmp_networks.append(net1['Id']) + net2 = self.client.create_network( + self.get_service_name(), driver='overlay', + ipam={'Driver': 'default'} + ) + self.tmp_networks.append(net2['Id']) + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[net1['Id']] + ) + name = self.get_service_name() + svc_id = self.client.create_service( + task_tmpl, name=name + ) + svc_info = self.client.inspect_service(svc_id) + assert 'Networks' in svc_info['Spec']['TaskTemplate'] + assert len(svc_info['Spec']['TaskTemplate']['Networks']) > 0 + assert svc_info['Spec']['TaskTemplate']['Networks'][0]['Target'] == net1['Id'] + + svc_info = self.client.inspect_service(svc_id) + version_index = svc_info['Version']['Index'] + + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[net2['Id']] + ) + self._update_service( + svc_id, name, version_index, task_tmpl, name=name, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + task_template = svc_info['Spec']['TaskTemplate'] + assert 'Networks' in task_template + assert len(task_template['Networks']) > 0 + assert task_template['Networks'][0]['Target'] == net2['Id'] + + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + assert new_index > version_index + + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[net1['Id']] + ) + self._update_service( + svc_id, name, new_index, task_tmpl, name=name, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + task_template = svc_info['Spec']['TaskTemplate'] + assert 'ContainerSpec' in task_template + new_spec = task_template['ContainerSpec'] + assert 'Image' in new_spec + assert new_spec['Image'].split(':')[0] == 'busybox' + assert 'Command' in new_spec + assert new_spec['Command'] == ['echo', 'hello'] + assert 'Networks' in task_template + assert len(task_template['Networks']) > 0 + assert task_template['Networks'][0]['Target'] == net1['Id'] + + svc_info = self.client.inspect_service(svc_id) + new_index = svc_info['Version']['Index'] + + task_tmpl = docker.types.TaskTemplate( + container_spec, networks=[net2['Id']] + ) + self._update_service( + svc_id, name, new_index, task_tmpl, name=name, + fetch_current_spec=True + ) + svc_info = self.client.inspect_service(svc_id) + task_template = svc_info['Spec']['TaskTemplate'] + assert 'Networks' in task_template + assert len(task_template['Networks']) > 0 + assert task_template['Networks'][0]['Target'] == net2['Id'] + + def _update_service(self, svc_id, *args, **kwargs): + # service update tests seem to be a bit flaky + # give them a chance to retry the update with a new version index + try: + self.client.update_service(*args, **kwargs) + except docker.errors.APIError as e: + if e.explanation.endswith("update out of sequence"): + svc_info = self.client.inspect_service(svc_id) + version_index = svc_info['Version']['Index'] + + if len(args) > 1: + args = (args[0], version_index) + args[2:] + else: + kwargs['version'] = version_index + + self.client.update_service(*args, **kwargs) + else: + raise + + @requires_api_version('1.41') + def test_create_service_cap_add(self): + name = self.get_service_name() + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'], cap_add=['CAP_SYSLOG'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + svc_id = self.client.create_service(task_tmpl, name=name) + assert self.client.inspect_service(svc_id) + services = self.client.services(filters={'name': name}) + assert len(services) == 1 + assert services[0]['ID'] == svc_id['ID'] + spec = services[0]['Spec']['TaskTemplate']['ContainerSpec'] + assert 'CAP_SYSLOG' in spec['CapabilityAdd'] + + @requires_api_version('1.41') + def test_create_service_cap_drop(self): + name = self.get_service_name() + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'], cap_drop=['CAP_SYSLOG'] + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + svc_id = self.client.create_service(task_tmpl, name=name) + assert self.client.inspect_service(svc_id) + services = self.client.services(filters={'name': name}) + assert len(services) == 1 + assert services[0]['ID'] == svc_id['ID'] + spec = services[0]['Spec']['TaskTemplate']['ContainerSpec'] + assert 'CAP_SYSLOG' in spec['CapabilityDrop'] + + @requires_api_version('1.40') + def test_create_service_with_sysctl(self): + name = self.get_service_name() + sysctls = { + 'net.core.somaxconn': '1024', + 'net.ipv4.tcp_syncookies': '0', + } + container_spec = docker.types.ContainerSpec( + TEST_IMG, ['echo', 'hello'], sysctls=sysctls + ) + task_tmpl = docker.types.TaskTemplate(container_spec) + svc_id = self.client.create_service(task_tmpl, name=name) + assert self.client.inspect_service(svc_id) + services = self.client.services(filters={'name': name}) + assert len(services) == 1 + assert services[0]['ID'] == svc_id['ID'] + spec = services[0]['Spec']['TaskTemplate']['ContainerSpec'] + assert spec['Sysctls']['net.core.somaxconn'] == '1024' + assert spec['Sysctls']['net.ipv4.tcp_syncookies'] == '0' diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py new file mode 100644 index 0000000000..00477e1036 --- /dev/null +++ b/tests/integration/api_swarm_test.py @@ -0,0 +1,262 @@ +import copy + +import pytest + +import docker + +from ..helpers import force_leave_swarm, requires_api_version +from .base import BaseAPIIntegrationTest + + +class SwarmTest(BaseAPIIntegrationTest): + def setUp(self): + super().setUp() + force_leave_swarm(self.client) + self._unlock_key = None + + def tearDown(self): + try: + if self._unlock_key: + self.client.unlock_swarm(self._unlock_key) + except docker.errors.APIError: + pass + force_leave_swarm(self.client) + super().tearDown() + + @requires_api_version('1.24') + def test_init_swarm_simple(self): + assert self.init_swarm() + + @requires_api_version('1.24') + def test_init_swarm_force_new_cluster(self): + pytest.skip('Test stalls the engine on 1.12.0') + + assert self.init_swarm() + version_1 = self.client.inspect_swarm()['Version']['Index'] + assert self.client.init_swarm(force_new_cluster=True) + version_2 = self.client.inspect_swarm()['Version']['Index'] + assert version_2 != version_1 + + @requires_api_version('1.39') + def test_init_swarm_custom_addr_pool_defaults(self): + assert self.init_swarm() + results = self.client.inspect_swarm() + assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'} + assert results['SubnetSize'] == 24 + + @requires_api_version('1.39') + def test_init_swarm_custom_addr_pool_only_pool(self): + assert self.init_swarm(default_addr_pool=['2.0.0.0/16']) + results = self.client.inspect_swarm() + assert set(results['DefaultAddrPool']) == {'2.0.0.0/16'} + assert results['SubnetSize'] == 24 + + @requires_api_version('1.39') + def test_init_swarm_custom_addr_pool_only_subnet_size(self): + assert self.init_swarm(subnet_size=26) + results = self.client.inspect_swarm() + assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'} + assert results['SubnetSize'] == 26 + + @requires_api_version('1.39') + def test_init_swarm_custom_addr_pool_both_args(self): + assert self.init_swarm(default_addr_pool=['2.0.0.0/16', '3.0.0.0/16'], + subnet_size=28) + results = self.client.inspect_swarm() + assert set(results['DefaultAddrPool']) == {'2.0.0.0/16', '3.0.0.0/16'} + assert results['SubnetSize'] == 28 + + @requires_api_version('1.24') + def test_init_already_in_cluster(self): + assert self.init_swarm() + with pytest.raises(docker.errors.APIError): + self.init_swarm() + + @requires_api_version('1.24') + def test_init_swarm_custom_raft_spec(self): + spec = self.client.create_swarm_spec( + snapshot_interval=5000, log_entries_for_slow_followers=1200 + ) + assert self.init_swarm(swarm_spec=spec) + swarm_info = self.client.inspect_swarm() + assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000 + assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200 + + @requires_api_version('1.30') + def test_init_swarm_with_ca_config(self): + spec = self.client.create_swarm_spec( + node_cert_expiry=7776000000000000, ca_force_rotate=6000000000000 + ) + + assert self.init_swarm(swarm_spec=spec) + swarm_info = self.client.inspect_swarm() + assert swarm_info['Spec']['CAConfig']['NodeCertExpiry'] == ( + spec['CAConfig']['NodeCertExpiry'] + ) + assert swarm_info['Spec']['CAConfig']['ForceRotate'] == ( + spec['CAConfig']['ForceRotate'] + ) + + @requires_api_version('1.25') + def test_init_swarm_with_autolock_managers(self): + spec = self.client.create_swarm_spec(autolock_managers=True) + assert self.init_swarm(swarm_spec=spec) + # save unlock key for tearDown + self._unlock_key = self.client.get_unlock_key() + swarm_info = self.client.inspect_swarm() + + assert ( + swarm_info['Spec']['EncryptionConfig']['AutoLockManagers'] is True + ) + + assert self._unlock_key.get('UnlockKey') + + @requires_api_version('1.25') + @pytest.mark.xfail( + reason="This doesn't seem to be taken into account by the engine" + ) + def test_init_swarm_with_log_driver(self): + spec = {'TaskDefaults': {'LogDriver': {'Name': 'syslog'}}} + assert self.init_swarm(swarm_spec=spec) + swarm_info = self.client.inspect_swarm() + + assert swarm_info['Spec']['TaskDefaults']['LogDriver']['Name'] == ( + 'syslog' + ) + + @requires_api_version('1.24') + def test_leave_swarm(self): + assert self.init_swarm() + with pytest.raises(docker.errors.APIError) as exc_info: + self.client.leave_swarm() + assert exc_info.value.response.status_code == 503 + assert self.client.leave_swarm(force=True) + with pytest.raises(docker.errors.APIError) as exc_info: + self.client.inspect_swarm() + assert exc_info.value.response.status_code == 503 + assert self.client.leave_swarm(force=True) + + @requires_api_version('1.24') + def test_update_swarm(self): + assert self.init_swarm() + swarm_info_1 = self.client.inspect_swarm() + spec = self.client.create_swarm_spec( + snapshot_interval=5000, log_entries_for_slow_followers=1200, + node_cert_expiry=7776000000000000 + ) + assert self.client.update_swarm( + version=swarm_info_1['Version']['Index'], + swarm_spec=spec, rotate_worker_token=True + ) + swarm_info_2 = self.client.inspect_swarm() + + assert ( + swarm_info_1['Version']['Index'] != + swarm_info_2['Version']['Index'] + ) + assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000 + assert ( + swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200 + ) + assert ( + swarm_info_1['JoinTokens']['Manager'] == + swarm_info_2['JoinTokens']['Manager'] + ) + assert ( + swarm_info_1['JoinTokens']['Worker'] != + swarm_info_2['JoinTokens']['Worker'] + ) + + @requires_api_version('1.24') + def test_list_nodes(self): + assert self.init_swarm() + nodes_list = self.client.nodes() + assert len(nodes_list) == 1 + node = nodes_list[0] + assert 'ID' in node + assert 'Spec' in node + assert node['Spec']['Role'] == 'manager' + + filtered_list = self.client.nodes(filters={ + 'id': node['ID'] + }) + assert len(filtered_list) == 1 + filtered_list = self.client.nodes(filters={ + 'role': 'worker' + }) + assert len(filtered_list) == 0 + + @requires_api_version('1.24') + def test_inspect_node(self): + node_id = self.init_swarm() + assert node_id + nodes_list = self.client.nodes() + assert len(nodes_list) == 1 + node = nodes_list[0] + node_data = self.client.inspect_node(node['ID']) + assert node['ID'] == node_data['ID'] + assert node_id == node['ID'] + assert node['Version'] == node_data['Version'] + + @requires_api_version('1.24') + def test_update_node(self): + assert self.init_swarm() + nodes_list = self.client.nodes() + node = nodes_list[0] + orig_spec = node['Spec'] + + # add a new label + new_spec = copy.deepcopy(orig_spec) + new_spec['Labels'] = {'new.label': 'new value'} + self.client.update_node(node_id=node['ID'], + version=node['Version']['Index'], + node_spec=new_spec) + updated_node = self.client.inspect_node(node['ID']) + assert new_spec == updated_node['Spec'] + + # Revert the changes + self.client.update_node(node_id=node['ID'], + version=updated_node['Version']['Index'], + node_spec=orig_spec) + reverted_node = self.client.inspect_node(node['ID']) + assert orig_spec == reverted_node['Spec'] + + @requires_api_version('1.24') + def test_remove_main_node(self): + assert self.init_swarm() + nodes_list = self.client.nodes() + node_id = nodes_list[0]['ID'] + with pytest.raises(docker.errors.NotFound): + self.client.remove_node('foobar01') + with pytest.raises(docker.errors.APIError) as e: + self.client.remove_node(node_id) + + assert e.value.response.status_code >= 400 + + with pytest.raises(docker.errors.APIError) as e: + self.client.remove_node(node_id, True) + + assert e.value.response.status_code >= 400 + + @requires_api_version('1.25') + def test_rotate_manager_unlock_key(self): + spec = self.client.create_swarm_spec(autolock_managers=True) + assert self.init_swarm(swarm_spec=spec) + swarm_info = self.client.inspect_swarm() + key_1 = self.client.get_unlock_key() + assert self.client.update_swarm( + version=swarm_info['Version']['Index'], + rotate_manager_unlock_key=True + ) + key_2 = self.client.get_unlock_key() + assert key_1['UnlockKey'] != key_2['UnlockKey'] + + @requires_api_version('1.30') + @pytest.mark.xfail(reason='Can fail if eth0 has multiple IP addresses') + def test_init_swarm_data_path_addr(self): + assert self.init_swarm(data_path_addr='eth0') + + @requires_api_version('1.40') + def test_init_swarm_data_path_port(self): + assert self.init_swarm(data_path_port=4242) + assert self.client.inspect_swarm()['DataPathPort'] == 4242 diff --git a/tests/integration/api_volume_test.py b/tests/integration/api_volume_test.py new file mode 100644 index 0000000000..413b1d9bc6 --- /dev/null +++ b/tests/integration/api_volume_test.py @@ -0,0 +1,75 @@ +import pytest + +import docker + +from ..helpers import requires_api_version +from .base import BaseAPIIntegrationTest + + +class TestVolumes(BaseAPIIntegrationTest): + def test_create_volume(self): + name = 'perfectcherryblossom' + self.tmp_volumes.append(name) + result = self.client.create_volume(name) + assert 'Name' in result + assert result['Name'] == name + assert 'Driver' in result + assert result['Driver'] == 'local' + + def test_create_volume_invalid_driver(self): + # special name to avoid exponential timeout loop + # https://github.com/moby/moby/blob/9e00a63d65434cdedc444e79a2b33a7c202b10d8/pkg/plugins/client.go#L253-L254 + driver_name = 'this-plugin-does-not-exist' + + with pytest.raises(docker.errors.APIError) as cm: + self.client.create_volume('perfectcherryblossom', driver_name) + assert ( + cm.value.response.status_code == 404 or + cm.value.response.status_code == 400 + ) + + def test_list_volumes(self): + name = 'imperishablenight' + self.tmp_volumes.append(name) + volume_info = self.client.create_volume(name) + result = self.client.volumes() + assert 'Volumes' in result + volumes = result['Volumes'] + assert volume_info in volumes + + def test_inspect_volume(self): + name = 'embodimentofscarletdevil' + self.tmp_volumes.append(name) + volume_info = self.client.create_volume(name) + result = self.client.inspect_volume(name) + assert volume_info == result + + def test_inspect_nonexistent_volume(self): + name = 'embodimentofscarletdevil' + with pytest.raises(docker.errors.NotFound): + self.client.inspect_volume(name) + + def test_remove_volume(self): + name = 'shootthebullet' + self.tmp_volumes.append(name) + self.client.create_volume(name) + self.client.remove_volume(name) + + @requires_api_version('1.25') + def test_force_remove_volume(self): + name = 'shootthebullet' + self.tmp_volumes.append(name) + self.client.create_volume(name) + self.client.remove_volume(name, force=True) + + @requires_api_version('1.25') + def test_prune_volumes(self): + v = self.client.create_volume() + self.tmp_volumes.append(v["Name"]) + result = self.client.prune_volumes() + assert v["Name"] in result['VolumesDeleted'] + + def test_remove_nonexistent_volume(self): + name = 'shootthebullet' + with pytest.raises(docker.errors.NotFound): + self.client.remove_volume(name) diff --git a/tests/integration/base.py b/tests/integration/base.py new file mode 100644 index 0000000000..51ee05daa5 --- /dev/null +++ b/tests/integration/base.py @@ -0,0 +1,127 @@ +import os +import shutil +import unittest + +import docker +from docker.utils import kwargs_from_env + +from .. import helpers + +TEST_IMG = 'alpine:3.10' +TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION') + + +class BaseIntegrationTest(unittest.TestCase): + """ + A base class for integration test cases. It cleans up the Docker server + after itself. + """ + + def setUp(self): + self.tmp_imgs = [] + self.tmp_containers = [] + self.tmp_folders = [] + self.tmp_volumes = [] + self.tmp_networks = [] + self.tmp_plugins = [] + self.tmp_secrets = [] + self.tmp_configs = [] + + def tearDown(self): + client = docker.from_env(version=TEST_API_VERSION) + try: + for img in self.tmp_imgs: + try: + client.api.remove_image(img) + except docker.errors.APIError: + pass + for container in self.tmp_containers: + try: + client.api.remove_container(container, force=True, v=True) + except docker.errors.APIError: + pass + for network in self.tmp_networks: + try: + client.api.remove_network(network) + except docker.errors.APIError: + pass + for volume in self.tmp_volumes: + try: + client.api.remove_volume(volume) + except docker.errors.APIError: + pass + + for secret in self.tmp_secrets: + try: + client.api.remove_secret(secret) + except docker.errors.APIError: + pass + + for config in self.tmp_configs: + try: + client.api.remove_config(config) + except docker.errors.APIError: + pass + + for folder in self.tmp_folders: + shutil.rmtree(folder) + finally: + client.close() + + +class BaseAPIIntegrationTest(BaseIntegrationTest): + """ + A test case for `APIClient` integration tests. It sets up an `APIClient` + as `self.client`. + """ + + def setUp(self): + super().setUp() + self.client = self.get_client_instance() + + def tearDown(self): + super().tearDown() + self.client.close() + + @staticmethod + def get_client_instance(): + return docker.APIClient( + version=TEST_API_VERSION, timeout=60, **kwargs_from_env() + ) + + @staticmethod + def _init_swarm(client, **kwargs): + return client.init_swarm( + '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs + ) + + def run_container(self, *args, **kwargs): + container = self.client.create_container(*args, **kwargs) + self.tmp_containers.append(container) + self.client.start(container) + exitcode = self.client.wait(container)['StatusCode'] + + if exitcode != 0: + output = self.client.logs(container) + raise Exception( + f"Container exited with code {exitcode}:\n{output}") + + return container + + def create_and_start(self, image=TEST_IMG, command='top', **kwargs): + container = self.client.create_container( + image=image, command=command, **kwargs) + self.tmp_containers.append(container) + self.client.start(container) + return container + + def execute(self, container, cmd, exit_code=0, **kwargs): + exc = self.client.exec_create(container, cmd, **kwargs) + output = self.client.exec_start(exc) + actual_exit_code = self.client.exec_inspect(exc)['ExitCode'] + msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format( + " ".join(cmd), exit_code, actual_exit_code, output) + assert actual_exit_code == exit_code, msg + + def init_swarm(self, **kwargs): + return self._init_swarm(self.client, **kwargs) diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py new file mode 100644 index 0000000000..1d1be077e0 --- /dev/null +++ b/tests/integration/client_test.py @@ -0,0 +1,48 @@ +import threading +import unittest +from datetime import datetime, timedelta + +import docker + +from ..helpers import requires_api_version +from .base import TEST_API_VERSION + + +class ClientTest(unittest.TestCase): + client = docker.from_env(version=TEST_API_VERSION) + + def test_info(self): + info = self.client.info() + assert 'ID' in info + assert 'Name' in info + + def test_ping(self): + assert self.client.ping() is True + + def test_version(self): + assert 'Version' in self.client.version() + + @requires_api_version('1.25') + def test_df(self): + data = self.client.df() + assert 'LayersSize' in data + assert 'Containers' in data + assert 'Volumes' in data + assert 'Images' in data + + +class CancellableEventsTest(unittest.TestCase): + client = docker.from_env(version=TEST_API_VERSION) + + def test_cancel_events(self): + start = datetime.now() + + events = self.client.events(until=start + timedelta(seconds=5)) + + cancel_thread = threading.Timer(2, events.close) + cancel_thread.start() + + for _ in events: + pass + + self.assertLess(datetime.now() - start, timedelta(seconds=3)) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000000..443c5b7950 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,28 @@ +import sys +import warnings + +import pytest + +import docker.errors +from docker.utils import kwargs_from_env + +from .base import TEST_IMG + + +@pytest.fixture(autouse=True, scope='session') +def setup_test_session(): + warnings.simplefilter('error') + c = docker.APIClient(version='auto', **kwargs_from_env()) + try: + c.inspect_image(TEST_IMG) + except docker.errors.NotFound: + print(f"\npulling {TEST_IMG}", file=sys.stderr) + for data in c.pull(TEST_IMG, stream=True, decode=True): + status = data.get("status") + progress = data.get("progress") + detail = f"{status} - {progress}" + print(detail, file=sys.stderr) + + # Double make sure we now have busybox + c.inspect_image(TEST_IMG) + c.close() diff --git a/tests/integration/context_api_test.py b/tests/integration/context_api_test.py new file mode 100644 index 0000000000..2131ebe749 --- /dev/null +++ b/tests/integration/context_api_test.py @@ -0,0 +1,62 @@ +import os +import tempfile + +import pytest + +from docker import errors +from docker.context import ContextAPI +from docker.tls import TLSConfig + +from .base import BaseAPIIntegrationTest + + +class ContextLifecycleTest(BaseAPIIntegrationTest): + def test_lifecycle(self): + assert ContextAPI.get_context().Name == "default" + assert not ContextAPI.get_context("test") + assert ContextAPI.get_current_context().Name == "default" + + dirpath = tempfile.mkdtemp() + ca = tempfile.NamedTemporaryFile( + prefix=os.path.join(dirpath, "ca.pem"), mode="r") + cert = tempfile.NamedTemporaryFile( + prefix=os.path.join(dirpath, "cert.pem"), mode="r") + key = tempfile.NamedTemporaryFile( + prefix=os.path.join(dirpath, "key.pem"), mode="r") + + # create context 'test + docker_tls = TLSConfig( + client_cert=(cert.name, key.name), + ca_cert=ca.name) + ContextAPI.create_context( + "test", tls_cfg=docker_tls) + + # check for a context 'test' in the context store + assert any(ctx.Name == "test" for ctx in ContextAPI.contexts()) + # retrieve a context object for 'test' + assert ContextAPI.get_context("test") + # remove context + ContextAPI.remove_context("test") + with pytest.raises(errors.ContextNotFound): + ContextAPI.inspect_context("test") + # check there is no 'test' context in store + assert not ContextAPI.get_context("test") + + ca.close() + key.close() + cert.close() + + def test_context_remove(self): + ContextAPI.create_context("test") + assert ContextAPI.inspect_context("test")["Name"] == "test" + + ContextAPI.remove_context("test") + with pytest.raises(errors.ContextNotFound): + ContextAPI.inspect_context("test") + + def test_load_context_without_orchestrator(self): + ContextAPI.create_context("test") + ctx = ContextAPI.get_context("test") + assert ctx + assert ctx.Name == "test" + assert ctx.Orchestrator is None diff --git a/tests/testdata/certs/key.pem b/tests/integration/credentials/__init__.py similarity index 100% rename from tests/testdata/certs/key.pem rename to tests/integration/credentials/__init__.py diff --git a/tests/integration/credentials/create_gpg_key.sh b/tests/integration/credentials/create_gpg_key.sh new file mode 100644 index 0000000000..b276c20dc5 --- /dev/null +++ b/tests/integration/credentials/create_gpg_key.sh @@ -0,0 +1,12 @@ +#!/usr/bin/sh +haveged +gpg --batch --gen-key <<-EOF +%echo Generating a standard key +Key-Type: DSA +Key-Length: 1024 +Subkey-Type: ELG-E +Subkey-Length: 1024 +Name-Real: Sakuya Izayoi +Name-Email: sakuya@gensokyo.jp +Expire-Date: 0 +EOF \ No newline at end of file diff --git a/tests/integration/credentials/store_test.py b/tests/integration/credentials/store_test.py new file mode 100644 index 0000000000..e1eba33a18 --- /dev/null +++ b/tests/integration/credentials/store_test.py @@ -0,0 +1,96 @@ +import os +import random +import shutil +import sys + +import pytest + +from docker.credentials import ( + DEFAULT_LINUX_STORE, + DEFAULT_OSX_STORE, + CredentialsNotFound, + Store, + StoreError, +) + + +class TestStore: + def teardown_method(self): + for server in self.tmp_keys: + try: + self.store.erase(server) + except StoreError: + pass + + def setup_method(self): + self.tmp_keys = [] + if sys.platform.startswith('linux'): + if shutil.which(f"docker-credential-{DEFAULT_LINUX_STORE}"): + self.store = Store(DEFAULT_LINUX_STORE) + elif shutil.which('docker-credential-pass'): + self.store = Store('pass') + else: + raise Exception('No supported docker-credential store in PATH') + elif sys.platform.startswith('darwin'): + self.store = Store(DEFAULT_OSX_STORE) + + def get_random_servername(self): + res = f'pycreds_test_{random.getrandbits(32):x}' + self.tmp_keys.append(res) + return res + + def test_store_and_get(self): + key = self.get_random_servername() + self.store.store(server=key, username='user', secret='pass') + data = self.store.get(key) + assert data == { + 'ServerURL': key, + 'Username': 'user', + 'Secret': 'pass' + } + + def test_get_nonexistent(self): + key = self.get_random_servername() + with pytest.raises(CredentialsNotFound): + self.store.get(key) + + def test_store_and_erase(self): + key = self.get_random_servername() + self.store.store(server=key, username='user', secret='pass') + self.store.erase(key) + with pytest.raises(CredentialsNotFound): + self.store.get(key) + + def test_unicode_strings(self): + key = self.get_random_servername() + key = key + self.store.store(server=key, username='user', secret='pass') + data = self.store.get(key) + assert data + self.store.erase(key) + with pytest.raises(CredentialsNotFound): + self.store.get(key) + + def test_list(self): + names = (self.get_random_servername(), self.get_random_servername()) + self.store.store(names[0], username='sakuya', secret='izayoi') + self.store.store(names[1], username='reimu', secret='hakurei') + data = self.store.list() + assert names[0] in data + assert data[names[0]] == 'sakuya' + assert names[1] in data + assert data[names[1]] == 'reimu' + + def test_execute_with_env_override(self): + self.store.exe = 'env' + self.store.environment = {'FOO': 'bar'} + data = self.store._execute('--null', '') + assert b'\0FOO=bar\0' in data + assert 'FOO' not in os.environ + + def test_unavailable_store(self): + some_unavailable_store = None + with pytest.warns(UserWarning): + some_unavailable_store = Store('that-does-not-exist') + with pytest.raises(StoreError): + some_unavailable_store.get('anything-this-does-not-matter') diff --git a/tests/integration/credentials/utils_test.py b/tests/integration/credentials/utils_test.py new file mode 100644 index 0000000000..75bdea1009 --- /dev/null +++ b/tests/integration/credentials/utils_test.py @@ -0,0 +1,18 @@ +import os +from unittest import mock + +from docker.credentials.utils import create_environment_dict + + +@mock.patch.dict(os.environ) +def test_create_environment_dict(): + base = {'FOO': 'bar', 'BAZ': 'foobar'} + os.environ = base # noqa: B003 + assert create_environment_dict({'FOO': 'baz'}) == { + 'FOO': 'baz', 'BAZ': 'foobar', + } + assert create_environment_dict({'HELLO': 'world'}) == { + 'FOO': 'bar', 'BAZ': 'foobar', 'HELLO': 'world', + } + + assert os.environ == base diff --git a/tests/integration/errors_test.py b/tests/integration/errors_test.py new file mode 100644 index 0000000000..438caacbc4 --- /dev/null +++ b/tests/integration/errors_test.py @@ -0,0 +1,17 @@ +import pytest + +from docker.errors import APIError + +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class ErrorsTest(BaseAPIIntegrationTest): + def test_api_error_parses_json(self): + container = self.client.create_container(TEST_IMG, ['sleep', '10']) + self.client.start(container['Id']) + with pytest.raises(APIError) as cm: + self.client.remove_container(container['Id']) + explanation = cm.value.explanation.lower() + assert 'stop the container before' in explanation + assert '{"message":' not in explanation + self.client.remove_container(container['Id'], force=True) diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py new file mode 100644 index 0000000000..8727455932 --- /dev/null +++ b/tests/integration/models_containers_test.py @@ -0,0 +1,560 @@ +import os +import tempfile +import threading + +import pytest + +import docker + +from ..helpers import random_name, requires_api_version +from .base import TEST_API_VERSION, BaseIntegrationTest + + +class ContainerCollectionTest(BaseIntegrationTest): + + def test_run(self): + client = docker.from_env(version=TEST_API_VERSION) + assert client.containers.run( + "alpine", "echo hello world", remove=True + ) == b'hello world\n' + + def test_run_detach(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 300", detach=True) + self.tmp_containers.append(container.id) + assert container.attrs['Config']['Image'] == "alpine" + assert container.attrs['Config']['Cmd'] == ['sleep', '300'] + + def test_run_with_error(self): + client = docker.from_env(version=TEST_API_VERSION) + with pytest.raises(docker.errors.ContainerError) as cm: + client.containers.run("alpine", "cat /test", remove=True) + assert cm.value.exit_status == 1 + assert "cat /test" in cm.exconly() + assert "alpine" in cm.exconly() + assert "No such file or directory" in cm.exconly() + + def test_run_with_image_that_does_not_exist(self): + client = docker.from_env(version=TEST_API_VERSION) + with pytest.raises(docker.errors.ImageNotFound): + client.containers.run("dockerpytest_does_not_exist") + + @pytest.mark.skipif( + docker.constants.IS_WINDOWS_PLATFORM, reason="host mounts on Windows" + ) + def test_run_with_volume(self): + client = docker.from_env(version=TEST_API_VERSION) + path = tempfile.mkdtemp() + + container = client.containers.run( + "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'", + volumes=[f"{path}:/insidecontainer"], + detach=True + ) + self.tmp_containers.append(container.id) + container.wait() + + name = "container_volume_test" + out = client.containers.run( + "alpine", "cat /insidecontainer/test", + volumes=[f"{path}:/insidecontainer"], + name=name + ) + self.tmp_containers.append(name) + assert out == b'hello\n' + + def test_run_with_named_volume(self): + client = docker.from_env(version=TEST_API_VERSION) + volume = client.volumes.create(name="somevolume") + self.tmp_volumes.append(volume.id) + + container = client.containers.run( + "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'", + volumes=["somevolume:/insidecontainer"], + detach=True + ) + self.tmp_containers.append(container.id) + container.wait() + + name = "container_volume_test" + out = client.containers.run( + "alpine", "cat /insidecontainer/test", + volumes=["somevolume:/insidecontainer"], + name=name + ) + self.tmp_containers.append(name) + assert out == b'hello\n' + + def test_run_with_network(self): + net_name = random_name() + client = docker.from_env(version=TEST_API_VERSION) + client.networks.create(net_name) + self.tmp_networks.append(net_name) + + container = client.containers.run( + 'alpine', 'echo hello world', network=net_name, + detach=True + ) + self.tmp_containers.append(container.id) + + attrs = container.attrs + + assert 'NetworkSettings' in attrs + assert 'Networks' in attrs['NetworkSettings'] + assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name] + + def test_run_with_networking_config(self): + net_name = random_name() + client = docker.from_env(version=TEST_API_VERSION) + client.networks.create(net_name) + self.tmp_networks.append(net_name) + + test_alias = 'hello' + test_driver_opt = {'key1': 'a'} + + networking_config = { + net_name: client.api.create_endpoint_config( + aliases=[test_alias], + driver_opt=test_driver_opt + ) + } + + container = client.containers.run( + 'alpine', 'echo hello world', network=net_name, + networking_config=networking_config, + detach=True + ) + self.tmp_containers.append(container.id) + + attrs = container.attrs + + assert 'NetworkSettings' in attrs + assert 'Networks' in attrs['NetworkSettings'] + assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name] + # Aliases no longer include the container's short-id in API v1.45. + assert attrs['NetworkSettings']['Networks'][net_name]['Aliases'] \ + == [test_alias] + assert attrs['NetworkSettings']['Networks'][net_name]['DriverOpts'] \ + == test_driver_opt + + def test_run_with_networking_config_with_undeclared_network(self): + net_name = random_name() + client = docker.from_env(version=TEST_API_VERSION) + client.networks.create(net_name) + self.tmp_networks.append(net_name) + + test_aliases = ['hello'] + test_driver_opt = {'key1': 'a'} + + networking_config = { + net_name: client.api.create_endpoint_config( + aliases=test_aliases, + driver_opt=test_driver_opt + ), + 'bar': client.api.create_endpoint_config( + aliases=['test'], + driver_opt={'key2': 'b'} + ), + } + + with pytest.raises(docker.errors.APIError): + container = client.containers.run( + 'alpine', 'echo hello world', network=net_name, + networking_config=networking_config, + detach=True + ) + self.tmp_containers.append(container.id) + + def test_run_with_networking_config_only_undeclared_network(self): + net_name = random_name() + client = docker.from_env(version=TEST_API_VERSION) + client.networks.create(net_name) + self.tmp_networks.append(net_name) + + networking_config = { + 'bar': client.api.create_endpoint_config( + aliases=['hello'], + driver_opt={'key1': 'a'} + ), + } + + container = client.containers.run( + 'alpine', 'echo hello world', network=net_name, + networking_config=networking_config, + detach=True + ) + self.tmp_containers.append(container.id) + + attrs = container.attrs + + assert 'NetworkSettings' in attrs + assert 'Networks' in attrs['NetworkSettings'] + assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name] + # Aliases no longer include the container's short-id in API v1.45. + assert (attrs['NetworkSettings']['Networks'][net_name]['Aliases'] + is None) + assert (attrs['NetworkSettings']['Networks'][net_name]['DriverOpts'] + is None) + + def test_run_with_none_driver(self): + client = docker.from_env(version=TEST_API_VERSION) + + out = client.containers.run( + "alpine", "echo hello", + log_config={"type": 'none'} + ) + assert out is None + + def test_run_with_json_file_driver(self): + client = docker.from_env(version=TEST_API_VERSION) + + out = client.containers.run( + "alpine", "echo hello", + log_config={"type": 'json-file'} + ) + assert out == b'hello\n' + + @requires_api_version('1.25') + def test_run_with_auto_remove(self): + client = docker.from_env(version=TEST_API_VERSION) + out = client.containers.run( + # sleep(2) to allow any communication with the container + # before it gets removed by the host. + 'alpine', 'sh -c "echo hello && sleep 2"', auto_remove=True + ) + assert out == b'hello\n' + + @requires_api_version('1.25') + def test_run_with_auto_remove_error(self): + client = docker.from_env(version=TEST_API_VERSION) + with pytest.raises(docker.errors.ContainerError) as e: + client.containers.run( + # sleep(2) to allow any communication with the container + # before it gets removed by the host. + 'alpine', 'sh -c ">&2 echo error && sleep 2 && exit 1"', + auto_remove=True + ) + assert e.value.exit_status == 1 + assert e.value.stderr is None + + def test_run_with_streamed_logs(self): + client = docker.from_env(version=TEST_API_VERSION) + out = client.containers.run( + 'alpine', 'sh -c "echo hello && echo world"', stream=True + ) + logs = list(out) + assert logs[0] == b'hello\n' + assert logs[1] == b'world\n' + + @pytest.mark.timeout(5) + @pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'), + reason='No cancellable streams over SSH') + def test_run_with_streamed_logs_and_cancel(self): + client = docker.from_env(version=TEST_API_VERSION) + out = client.containers.run( + 'alpine', 'sh -c "echo hello && echo world"', stream=True + ) + + threading.Timer(1, out.close).start() + + logs = list(out) + + assert len(logs) == 2 + assert logs[0] == b'hello\n' + assert logs[1] == b'world\n' + + def test_run_with_proxy_config(self): + client = docker.from_env(version=TEST_API_VERSION) + client.api._proxy_configs = docker.utils.proxy.ProxyConfig( + ftp='sakuya.jp:4967' + ) + + out = client.containers.run('alpine', 'sh -c "env"') + + assert b'FTP_PROXY=sakuya.jp:4967\n' in out + assert b'ftp_proxy=sakuya.jp:4967\n' in out + + def test_get(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 300", detach=True) + self.tmp_containers.append(container.id) + assert client.containers.get(container.id).attrs[ + 'Config']['Image'] == "alpine" + + def test_list(self): + client = docker.from_env(version=TEST_API_VERSION) + container_id = client.containers.run( + "alpine", "sleep 300", detach=True).id + self.tmp_containers.append(container_id) + containers = [c for c in client.containers.list() if c.id == + container_id] + assert len(containers) == 1 + + container = containers[0] + assert container.attrs['Config']['Image'] == 'alpine' + assert container.status == 'running' + assert container.image == client.images.get('alpine') + + container.kill() + container.remove() + assert container_id not in [c.id for c in client.containers.list()] + + def test_list_sparse(self): + client = docker.from_env(version=TEST_API_VERSION) + container_id = client.containers.run( + "alpine", "sleep 300", detach=True).id + self.tmp_containers.append(container_id) + containers = [c for c in client.containers.list(sparse=True) if c.id == + container_id] + assert len(containers) == 1 + + container = containers[0] + assert container.attrs['Image'] == 'alpine' + assert container.status == 'running' + assert container.image == client.images.get('alpine') + with pytest.raises(docker.errors.DockerException): + _ = container.labels + + container.kill() + container.remove() + assert container_id not in [c.id for c in client.containers.list()] + + +class ContainerTest(BaseIntegrationTest): + + def test_commit(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run( + "alpine", "sh -c 'echo \"hello\" > /test'", + detach=True + ) + self.tmp_containers.append(container.id) + container.wait() + image = container.commit() + assert client.containers.run( + image.id, "cat /test", remove=True + ) == b"hello\n" + + def test_diff(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "touch /test", detach=True) + self.tmp_containers.append(container.id) + container.wait() + assert container.diff() == [{'Path': '/test', 'Kind': 1}] + + def test_exec_run_success(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run( + "alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True + ) + self.tmp_containers.append(container.id) + exec_output = container.exec_run("cat /test") + assert exec_output[0] == 0 + assert exec_output[1] == b"hello\n" + + def test_exec_run_error_code_from_exec(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run( + "alpine", "sh -c 'sleep 20'", detach=True + ) + self.tmp_containers.append(container.id) + exec_output = container.exec_run("sh -c 'exit 42'") + assert exec_output[0] == 42 + + def test_exec_run_failed(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run( + "alpine", "sh -c 'sleep 60'", detach=True + ) + self.tmp_containers.append(container.id) + exec_output = container.exec_run("non-existent") + # older versions of docker return `126` in the case that an exec cannot + # be started due to a missing executable. We're fixing this for the + # future, so accept both for now. + assert exec_output[0] == 127 or exec_output[0] == 126 + + def test_kill(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 300", detach=True) + self.tmp_containers.append(container.id) + while container.status != 'running': + container.reload() + assert container.status == 'running' + container.kill() + container.reload() + assert container.status == 'exited' + + def test_logs(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "echo hello world", + detach=True) + self.tmp_containers.append(container.id) + container.wait() + assert container.logs() == b"hello world\n" + + def test_pause(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 300", detach=True) + self.tmp_containers.append(container.id) + container.pause() + container.reload() + assert container.status == "paused" + container.unpause() + container.reload() + assert container.status == "running" + + def test_remove(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "echo hello", detach=True) + self.tmp_containers.append(container.id) + assert container.id in [c.id for c in client.containers.list(all=True)] + container.wait() + container.remove() + containers = client.containers.list(all=True) + assert container.id not in [c.id for c in containers] + + def test_rename(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "echo hello", name="test1", + detach=True) + self.tmp_containers.append(container.id) + assert container.name == "test1" + container.rename("test2") + container.reload() + assert container.name == "test2" + + def test_restart(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 100", detach=True) + self.tmp_containers.append(container.id) + first_started_at = container.attrs['State']['StartedAt'] + container.restart() + container.reload() + second_started_at = container.attrs['State']['StartedAt'] + assert first_started_at != second_started_at + + def test_start(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.create("alpine", "sleep 50", detach=True) + self.tmp_containers.append(container.id) + assert container.status == "created" + container.start() + container.reload() + assert container.status == "running" + + def test_stats(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 100", detach=True) + self.tmp_containers.append(container.id) + stats = container.stats(stream=False) + for key in ['read', 'networks', 'precpu_stats', 'cpu_stats', + 'memory_stats', 'blkio_stats']: + assert key in stats + + def test_ports_target_none(self): + client = docker.from_env(version=TEST_API_VERSION) + ports = None + target_ports = {'2222/tcp': ports} + container = client.containers.run( + "alpine", "sleep 100", detach=True, + ports=target_ports + ) + self.tmp_containers.append(container.id) + container.reload() # required to get auto-assigned ports + actual_ports = container.ports + assert sorted(target_ports.keys()) == sorted(actual_ports.keys()) + for target_client, target_host in target_ports.items(): + for actual_port in actual_ports[target_client]: + actual_keys = sorted(actual_port.keys()) + assert sorted(['HostIp', 'HostPort']) == actual_keys + assert target_host is ports + assert int(actual_port['HostPort']) > 0 + client.close() + + def test_ports_target_tuple(self): + client = docker.from_env(version=TEST_API_VERSION) + ports = ('127.0.0.1', 1111) + target_ports = {'2222/tcp': ports} + container = client.containers.run( + "alpine", "sleep 100", detach=True, + ports=target_ports + ) + self.tmp_containers.append(container.id) + container.reload() # required to get auto-assigned ports + actual_ports = container.ports + assert sorted(target_ports.keys()) == sorted(actual_ports.keys()) + for target_client, target_host in target_ports.items(): + for actual_port in actual_ports[target_client]: + actual_keys = sorted(actual_port.keys()) + assert sorted(['HostIp', 'HostPort']) == actual_keys + assert target_host == ports + assert int(actual_port['HostPort']) > 0 + client.close() + + def test_ports_target_list(self): + client = docker.from_env(version=TEST_API_VERSION) + ports = [1234, 4567] + target_ports = {'2222/tcp': ports} + container = client.containers.run( + "alpine", "sleep 100", detach=True, + ports=target_ports + ) + self.tmp_containers.append(container.id) + container.reload() # required to get auto-assigned ports + actual_ports = container.ports + assert sorted(target_ports.keys()) == sorted(actual_ports.keys()) + for target_client, target_host in target_ports.items(): + for actual_port in actual_ports[target_client]: + actual_keys = sorted(actual_port.keys()) + assert sorted(['HostIp', 'HostPort']) == actual_keys + assert target_host == ports + assert int(actual_port['HostPort']) > 0 + client.close() + + def test_stop(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "top", detach=True) + self.tmp_containers.append(container.id) + assert container.status in ("running", "created") + container.stop(timeout=2) + container.reload() + assert container.status == "exited" + + def test_top(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 60", detach=True) + self.tmp_containers.append(container.id) + top = container.top() + assert len(top['Processes']) == 1 + assert 'sleep 60' in top['Processes'][0] + + def test_update(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 60", detach=True, + cpu_shares=2) + self.tmp_containers.append(container.id) + assert container.attrs['HostConfig']['CpuShares'] == 2 + container.update(cpu_shares=3) + container.reload() + assert container.attrs['HostConfig']['CpuShares'] == 3 + + def test_wait(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sh -c 'exit 0'", + detach=True) + self.tmp_containers.append(container.id) + assert container.wait()['StatusCode'] == 0 + container = client.containers.run("alpine", "sh -c 'exit 1'", + detach=True) + self.tmp_containers.append(container.id) + assert container.wait()['StatusCode'] == 1 + + def test_create_with_volume_driver(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.create( + 'alpine', + 'sleep 300', + volume_driver='foo' + ) + self.tmp_containers.append(container.id) + assert container.attrs['HostConfig']['VolumeDriver'] == 'foo' diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py new file mode 100644 index 0000000000..9d42cc48f2 --- /dev/null +++ b/tests/integration/models_images_test.py @@ -0,0 +1,164 @@ +import io +import tempfile + +import pytest + +import docker + +from ..helpers import random_name +from .base import TEST_API_VERSION, TEST_IMG, BaseIntegrationTest + + +class ImageCollectionTest(BaseIntegrationTest): + + def test_build(self): + client = docker.from_env(version=TEST_API_VERSION) + image, _ = client.images.build(fileobj=io.BytesIO( + b"FROM alpine\n" + b"CMD echo hello world" + )) + self.tmp_imgs.append(image.id) + assert client.containers.run(image) == b"hello world\n" + + # @pytest.mark.xfail(reason='Engine 1.13 responds with status 500') + def test_build_with_error(self): + client = docker.from_env(version=TEST_API_VERSION) + with pytest.raises(docker.errors.BuildError) as cm: + client.images.build(fileobj=io.BytesIO( + b"FROM alpine\n" + b"RUN exit 1" + )) + assert ( + "The command '/bin/sh -c exit 1' returned a non-zero code: 1" + ) in cm.exconly() + assert cm.value.build_log + + def test_build_with_multiple_success(self): + client = docker.from_env(version=TEST_API_VERSION) + image, _ = client.images.build( + tag='some-tag', fileobj=io.BytesIO( + b"FROM alpine\n" + b"CMD echo hello world" + ) + ) + self.tmp_imgs.append(image.id) + assert client.containers.run(image) == b"hello world\n" + + def test_build_with_success_build_output(self): + client = docker.from_env(version=TEST_API_VERSION) + image, _ = client.images.build( + tag='dup-txt-tag', fileobj=io.BytesIO( + b"FROM alpine\n" + b"CMD echo Successfully built abcd1234" + ) + ) + self.tmp_imgs.append(image.id) + assert client.containers.run(image) == b"Successfully built abcd1234\n" + + def test_list(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull('alpine:latest') + assert image.id in get_ids(client.images.list()) + + def test_list_with_repository(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull('alpine:latest') + assert image.id in get_ids(client.images.list('alpine')) + assert image.id in get_ids(client.images.list('alpine:latest')) + + def test_pull(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull('alpine:latest') + assert 'alpine:latest' in image.attrs['RepoTags'] + + def test_pull_with_tag(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull('alpine', tag='3.10') + assert 'alpine:3.10' in image.attrs['RepoTags'] + + def test_pull_with_sha(self): + image_ref = ( + 'hello-world@sha256:083de497cff944f969d8499ab94f07134c50bcf5e6b95' + '59b27182d3fa80ce3f7' + ) + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull(image_ref) + assert image_ref in image.attrs['RepoDigests'] + + def test_pull_multiple(self): + client = docker.from_env(version=TEST_API_VERSION) + images = client.images.pull('hello-world', all_tags=True) + assert len(images) >= 1 + assert any('hello-world:latest' in img.attrs['RepoTags'] for img in images) + + def test_load_error(self): + client = docker.from_env(version=TEST_API_VERSION) + with pytest.raises(docker.errors.ImageLoadError): + client.images.load('abc') + + def test_save_and_load(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.get(TEST_IMG) + with tempfile.TemporaryFile() as f: + stream = image.save() + for chunk in stream: + f.write(chunk) + + f.seek(0) + result = client.images.load(f.read()) + + assert len(result) == 1 + assert result[0].id == image.id + + def test_save_and_load_repo_name(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.get(TEST_IMG) + additional_tag = random_name() + image.tag(additional_tag) + self.tmp_imgs.append(additional_tag) + image.reload() + with tempfile.TemporaryFile() as f: + stream = image.save(named=f'{additional_tag}:latest') + for chunk in stream: + f.write(chunk) + + f.seek(0) + client.images.remove(additional_tag, force=True) + result = client.images.load(f.read()) + + assert len(result) == 1 + assert result[0].id == image.id + assert f'{additional_tag}:latest' in result[0].tags + + def test_save_name_error(self): + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.get(TEST_IMG) + with pytest.raises(docker.errors.InvalidArgument): + image.save(named='sakuya/izayoi') + + +class ImageTest(BaseIntegrationTest): + + def test_tag_and_remove(self): + repo = 'dockersdk.tests.images.test_tag' + tag = 'some-tag' + identifier = f'{repo}:{tag}' + + client = docker.from_env(version=TEST_API_VERSION) + image = client.images.pull('alpine:latest') + + result = image.tag(repo, tag) + assert result is True + self.tmp_imgs.append(identifier) + assert image.id in get_ids(client.images.list(repo)) + assert image.id in get_ids(client.images.list(identifier)) + + client.images.remove(identifier) + assert image.id not in get_ids(client.images.list(repo)) + assert image.id not in get_ids(client.images.list(identifier)) + + assert image.id in get_ids(client.images.list('alpine:latest')) + + +def get_ids(images): + return [i.id for i in images] diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py new file mode 100644 index 0000000000..f5e6fcf573 --- /dev/null +++ b/tests/integration/models_networks_test.py @@ -0,0 +1,71 @@ +import docker + +from .. import helpers +from .base import TEST_API_VERSION, BaseIntegrationTest + + +class NetworkCollectionTest(BaseIntegrationTest): + + def test_create(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + network = client.networks.create(name, labels={'foo': 'bar'}) + self.tmp_networks.append(network.id) + assert network.name == name + assert network.attrs['Labels']['foo'] == "bar" + + def test_get(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + network_id = client.networks.create(name).id + self.tmp_networks.append(network_id) + network = client.networks.get(network_id) + assert network.name == name + + def test_list_remove(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + network = client.networks.create(name) + self.tmp_networks.append(network.id) + assert network.id in [n.id for n in client.networks.list()] + assert network.id not in [ + n.id for n in + client.networks.list(ids=["fdhjklfdfdshjkfds"]) + ] + assert network.id in [ + n.id for n in + client.networks.list(ids=[network.id]) + ] + assert network.id not in [ + n.id for n in + client.networks.list(names=["fdshjklfdsjhkl"]) + ] + assert network.id in [ + n.id for n in + client.networks.list(names=[name]) + ] + network.remove() + assert network.id not in [n.id for n in client.networks.list()] + + +class NetworkTest(BaseIntegrationTest): + + def test_connect_disconnect(self): + client = docker.from_env(version=TEST_API_VERSION) + network = client.networks.create(helpers.random_name()) + self.tmp_networks.append(network.id) + container = client.containers.create("alpine", "sleep 300") + self.tmp_containers.append(container.id) + assert network.containers == [] + network.connect(container) + container.start() + assert client.networks.get(network.id).containers == [container] + network_containers = [ + c + for net in client.networks.list(ids=[network.id], greedy=True) + for c in net.containers + ] + assert network_containers == [container] + network.disconnect(container) + assert network.containers == [] + assert client.networks.get(network.id).containers == [] diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py new file mode 100644 index 0000000000..3c8d48adb5 --- /dev/null +++ b/tests/integration/models_nodes_test.py @@ -0,0 +1,37 @@ +import unittest + +import docker + +from .. import helpers +from .base import TEST_API_VERSION + + +class NodesTest(unittest.TestCase): + def setUp(self): + helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) + + def tearDown(self): + helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) + + def test_list_get_update(self): + client = docker.from_env(version=TEST_API_VERSION) + client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr()) + nodes = client.nodes.list() + assert len(nodes) == 1 + assert nodes[0].attrs['Spec']['Role'] == 'manager' + + node = client.nodes.get(nodes[0].id) + assert node.id == nodes[0].id + assert node.attrs['Spec']['Role'] == 'manager' + assert node.version > 0 + + node = client.nodes.list()[0] + assert not node.attrs['Spec'].get('Labels') + node.update({ + 'Availability': 'active', + 'Name': 'node-name', + 'Role': 'manager', + 'Labels': {'foo': 'bar'} + }) + node.reload() + assert node.attrs['Spec']['Labels'] == {'foo': 'bar'} diff --git a/tests/integration/models_resources_test.py b/tests/integration/models_resources_test.py new file mode 100644 index 0000000000..7d9762702f --- /dev/null +++ b/tests/integration/models_resources_test.py @@ -0,0 +1,17 @@ +import docker + +from .base import TEST_API_VERSION, BaseIntegrationTest + + +class ModelTest(BaseIntegrationTest): + + def test_reload(self): + client = docker.from_env(version=TEST_API_VERSION) + container = client.containers.run("alpine", "sleep 300", detach=True) + self.tmp_containers.append(container.id) + first_started_at = container.attrs['State']['StartedAt'] + container.kill() + container.start() + assert container.attrs['State']['StartedAt'] == first_started_at + container.reload() + assert container.attrs['State']['StartedAt'] != first_started_at diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py new file mode 100644 index 0000000000..947ba46d27 --- /dev/null +++ b/tests/integration/models_services_test.py @@ -0,0 +1,379 @@ +import unittest + +import pytest + +import docker +from docker.errors import InvalidArgument +from docker.types.services import ServiceMode + +from .. import helpers +from .base import TEST_API_VERSION + + +class ServiceTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + client = docker.from_env(version=TEST_API_VERSION) + helpers.force_leave_swarm(client) + client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr()) + + @classmethod + def tearDownClass(cls): + helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) + + def test_create(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + service = client.services.create( + # create arguments + name=name, + labels={'foo': 'bar'}, + # ContainerSpec arguments + image="alpine", + command="sleep 300", + container_labels={'container': 'label'}, + rollback_config={'order': 'start-first'} + ) + assert service.name == name + assert service.attrs['Spec']['Labels']['foo'] == 'bar' + container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert "alpine" in container_spec['Image'] + assert container_spec['Labels'] == {'container': 'label'} + spec_rollback = service.attrs['Spec'].get('RollbackConfig', None) + assert spec_rollback is not None + assert ('Order' in spec_rollback and + spec_rollback['Order'] == 'start-first') + + def test_create_with_network(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + network = client.networks.create( + helpers.random_name(), driver='overlay' + ) + service = client.services.create( + # create arguments + name=name, + # ContainerSpec arguments + image="alpine", + command="sleep 300", + networks=[network.id] + ) + assert 'Networks' in service.attrs['Spec']['TaskTemplate'] + networks = service.attrs['Spec']['TaskTemplate']['Networks'] + assert len(networks) == 1 + assert networks[0]['Target'] == network.id + + def test_get(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + service = client.services.create( + name=name, + image="alpine", + command="sleep 300" + ) + service = client.services.get(service.id) + assert service.name == name + + def test_list_remove(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + name=helpers.random_name(), + image="alpine", + command="sleep 300" + ) + assert service in client.services.list() + service.remove() + assert service not in client.services.list() + + def test_tasks(self): + client = docker.from_env(version=TEST_API_VERSION) + service1 = client.services.create( + name=helpers.random_name(), + image="alpine", + command="sleep 300" + ) + service2 = client.services.create( + name=helpers.random_name(), + image="alpine", + command="sleep 300" + ) + tasks = [] + while len(tasks) == 0: + tasks = service1.tasks() + assert len(tasks) == 1 + assert tasks[0]['ServiceID'] == service1.id + + tasks = [] + while len(tasks) == 0: + tasks = service2.tasks() + assert len(tasks) == 1 + assert tasks[0]['ServiceID'] == service2.id + + def test_update(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + service.update( + # create argument + name=service.name, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert container_spec['Command'] == ["sleep", "600"] + + def test_update_retains_service_labels(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + labels={'service.label': 'SampleLabel'}, + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + service.update( + # create argument + name=service.name, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + labels = service.attrs['Spec']['Labels'] + assert labels == {'service.label': 'SampleLabel'} + + def test_update_retains_container_labels(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300", + container_labels={'container.label': 'SampleLabel'} + ) + service.update( + # create argument + name=service.name, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert container_spec['Labels'] == {'container.label': 'SampleLabel'} + + def test_update_remove_service_labels(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + labels={'service.label': 'SampleLabel'}, + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + service.update( + # create argument + name=service.name, + labels={}, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + assert not service.attrs['Spec'].get('Labels') + + @pytest.mark.xfail(reason='Flaky test') + def test_update_retains_networks(self): + client = docker.from_env(version=TEST_API_VERSION) + network_name = helpers.random_name() + network = client.networks.create( + network_name, driver='overlay' + ) + service = client.services.create( + # create arguments + name=helpers.random_name(), + networks=[network.id], + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + service.reload() + service.update( + # create argument + name=service.name, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + networks = service.attrs['Spec']['TaskTemplate']['Networks'] + assert networks == [{'Target': network.id}] + + def test_scale_service(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + tasks = [] + while len(tasks) == 0: + tasks = service.tasks() + assert len(tasks) == 1 + service.update( + mode=docker.types.ServiceMode('replicated', replicas=2), + ) + while len(tasks) == 1: + tasks = service.tasks() + assert len(tasks) >= 2 + # check that the container spec is not overridden with None + service.reload() + spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert spec.get('Command') == ['sleep', '300'] + + def test_scale_method_service(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300", + ) + tasks = [] + while len(tasks) == 0: + tasks = service.tasks() + assert len(tasks) == 1 + service.scale(2) + while len(tasks) == 1: + tasks = service.tasks() + assert len(tasks) >= 2 + # check that the container spec is not overridden with None + service.reload() + spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert spec.get('Command') == ['sleep', '300'] + + def test_scale_method_global_service(self): + client = docker.from_env(version=TEST_API_VERSION) + mode = ServiceMode('global') + service = client.services.create( + name=helpers.random_name(), + image="alpine", + command="sleep 300", + mode=mode + ) + tasks = [] + while len(tasks) == 0: + tasks = service.tasks() + assert len(tasks) == 1 + with pytest.raises(InvalidArgument): + service.scale(2) + + assert len(tasks) == 1 + service.reload() + spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert spec.get('Command') == ['sleep', '300'] + + @helpers.requires_api_version('1.25') + def test_force_update_service(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + initial_version = service.version + assert service.update( + # create argument + name=service.name, + # task template argument + force_update=10, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + assert service.version > initial_version + + @helpers.requires_api_version('1.25') + def test_force_update_service_using_bool(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + initial_version = service.version + assert service.update( + # create argument + name=service.name, + # task template argument + force_update=True, + # ContainerSpec argument + command="sleep 600" + ) + service.reload() + assert service.version > initial_version + + @helpers.requires_api_version('1.25') + def test_force_update_service_using_shorthand_method(self): + client = docker.from_env(version=TEST_API_VERSION) + service = client.services.create( + # create arguments + name=helpers.random_name(), + # ContainerSpec arguments + image="alpine", + command="sleep 300" + ) + initial_version = service.version + assert service.force_update() + service.reload() + assert service.version > initial_version + + @helpers.requires_api_version('1.41') + def test_create_cap_add(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + service = client.services.create( + name=name, + labels={'foo': 'bar'}, + image="alpine", + command="sleep 300", + container_labels={'container': 'label'}, + cap_add=["CAP_SYSLOG"] + ) + assert service.name == name + assert service.attrs['Spec']['Labels']['foo'] == 'bar' + container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert "alpine" in container_spec['Image'] + assert container_spec['Labels'] == {'container': 'label'} + assert "CAP_SYSLOG" in container_spec["CapabilityAdd"] + + @helpers.requires_api_version('1.41') + def test_create_cap_drop(self): + client = docker.from_env(version=TEST_API_VERSION) + name = helpers.random_name() + service = client.services.create( + name=name, + labels={'foo': 'bar'}, + image="alpine", + command="sleep 300", + container_labels={'container': 'label'}, + cap_drop=["CAP_SYSLOG"] + ) + assert service.name == name + assert service.attrs['Spec']['Labels']['foo'] == 'bar' + container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec'] + assert "alpine" in container_spec['Image'] + assert container_spec['Labels'] == {'container': 'label'} + assert "CAP_SYSLOG" in container_spec["CapabilityDrop"] diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py new file mode 100644 index 0000000000..f43824c75f --- /dev/null +++ b/tests/integration/models_swarm_test.py @@ -0,0 +1,46 @@ +import unittest + +import pytest + +import docker + +from .. import helpers +from .base import TEST_API_VERSION + + +class SwarmTest(unittest.TestCase): + def setUp(self): + helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) + + def tearDown(self): + helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) + + def test_init_update_leave(self): + client = docker.from_env(version=TEST_API_VERSION) + client.swarm.init( + advertise_addr='127.0.0.1', snapshot_interval=5000, + listen_addr=helpers.swarm_listen_addr() + ) + assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000 + client.swarm.update(snapshot_interval=10000) + assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000 + assert client.swarm.id + assert client.swarm.leave(force=True) + with pytest.raises(docker.errors.APIError) as cm: + client.swarm.reload() + assert ( + cm.value.response.status_code == 406 or + cm.value.response.status_code == 503 + ) + + def test_join_on_already_joined_swarm(self): + client = docker.from_env(version=TEST_API_VERSION) + client.swarm.init() + join_token = client.swarm.attrs['JoinTokens']['Manager'] + with pytest.raises(docker.errors.APIError) as cm: + client.swarm.join( + remote_addrs=['127.0.0.1'], + join_token=join_token, + ) + assert cm.value.response.status_code == 503 + assert 'This node is already part of a swarm.' in cm.value.explanation diff --git a/tests/integration/models_volumes_test.py b/tests/integration/models_volumes_test.py new file mode 100644 index 0000000000..7d3ffda99d --- /dev/null +++ b/tests/integration/models_volumes_test.py @@ -0,0 +1,31 @@ +import docker + +from .base import TEST_API_VERSION, BaseIntegrationTest + + +class VolumesTest(BaseIntegrationTest): + def test_create_get(self): + client = docker.from_env(version=TEST_API_VERSION) + volume = client.volumes.create( + 'dockerpytest_1', + driver='local', + labels={'labelkey': 'labelvalue'} + ) + self.tmp_volumes.append(volume.id) + assert volume.id + assert volume.name == 'dockerpytest_1' + assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'} + + volume = client.volumes.get(volume.id) + assert volume.name == 'dockerpytest_1' + + def test_list_remove(self): + client = docker.from_env(version=TEST_API_VERSION) + volume = client.volumes.create('dockerpytest_1') + self.tmp_volumes.append(volume.id) + assert volume in client.volumes.list() + assert volume in client.volumes.list(filters={'name': 'dockerpytest_'}) + assert volume not in client.volumes.list(filters={'name': 'foobar'}) + + volume.remove() + assert volume not in client.volumes.list() diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py new file mode 100644 index 0000000000..5df9d31210 --- /dev/null +++ b/tests/integration/regression_test.py @@ -0,0 +1,65 @@ +import io +import random + +import pytest + +import docker + +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class TestRegressions(BaseAPIIntegrationTest): + @pytest.mark.xfail(True, reason='Docker API always returns chunked resp') + def test_443_handle_nonchunked_response_in_stream(self): + dfile = io.BytesIO() + with pytest.raises(docker.errors.APIError) as exc: + for _line in self.client.build(fileobj=dfile, tag="a/b/c"): + pass + assert exc.value.is_error() + dfile.close() + + def test_542_truncate_ids_client_side(self): + self.client.start( + self.client.create_container(TEST_IMG, ['true']) + ) + result = self.client.containers(all=True, trunc=True) + assert len(result[0]['Id']) == 12 + + def test_647_support_doubleslash_in_image_names(self): + with pytest.raises(docker.errors.APIError): + self.client.inspect_image('gensokyo.jp//kirisame') + + def test_649_handle_timeout_value_none(self): + self.client.timeout = None + ctnr = self.client.create_container(TEST_IMG, ['sleep', '2']) + self.client.start(ctnr) + self.client.stop(ctnr) + + def test_715_handle_user_param_as_int_value(self): + ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000) + self.client.start(ctnr) + self.client.wait(ctnr) + logs = self.client.logs(ctnr) + logs = logs.decode('utf-8') + assert logs == '1000\n' + + def test_792_explicit_port_protocol(self): + + tcp_port, udp_port = random.sample(range(9999, 32000), 2) + ctnr = self.client.create_container( + TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')], + host_config=self.client.create_host_config( + port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port} + ) + ) + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + assert self.client.port( + ctnr, 2000 + )[0]['HostPort'] == str(tcp_port) + assert self.client.port( + ctnr, '2000/tcp' + )[0]['HostPort'] == str(tcp_port) + assert self.client.port( + ctnr, '2000/udp' + )[0]['HostPort'] == str(udp_port) diff --git a/tests/integration/testdata/dummy-plugin/config.json b/tests/integration/testdata/dummy-plugin/config.json new file mode 100644 index 0000000000..53b4e7aa98 --- /dev/null +++ b/tests/integration/testdata/dummy-plugin/config.json @@ -0,0 +1,19 @@ +{ + "description": "Dummy test plugin for docker python SDK", + "documentation": "https://github.com/docker/docker-py", + "entrypoint": ["/dummy"], + "network": { + "type": "host" + }, + "interface" : { + "types": ["docker.volumedriver/1.0"], + "socket": "dummy.sock" + }, + "env": [ + { + "name":"DEBUG", + "settable":["value"], + "value":"0" + } + ] +} diff --git a/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration_test.py b/tests/integration_test.py deleted file mode 100644 index ac4a871753..0000000000 --- a/tests/integration_test.py +++ /dev/null @@ -1,1487 +0,0 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import contextlib -import json -import io -import os -import shutil -import signal -import socket -import tarfile -import tempfile -import threading -import time -import unittest -import warnings - -import docker -import six - -from six.moves import BaseHTTPServer -from six.moves import socketserver - -from test import Cleanup - -# FIXME: missing tests for -# export; history; insert; port; push; tag; get; load; stats -DEFAULT_BASE_URL = os.environ.get('DOCKER_HOST') -EXEC_DRIVER_IS_NATIVE = True -NOT_ON_HOST = os.environ.get('NOT_ON_HOST', False) - -warnings.simplefilter('error') -create_host_config = docker.utils.create_host_config -compare_version = docker.utils.compare_version - - -class BaseTestCase(unittest.TestCase): - tmp_imgs = [] - tmp_containers = [] - tmp_folders = [] - - def setUp(self): - if six.PY2: - self.assertRegex = self.assertRegexpMatches - self.assertCountEqual = self.assertItemsEqual - self.client = docker.Client(base_url=DEFAULT_BASE_URL, timeout=5) - self.tmp_imgs = [] - self.tmp_containers = [] - self.tmp_folders = [] - - def tearDown(self): - for img in self.tmp_imgs: - try: - self.client.remove_image(img) - except docker.errors.APIError: - pass - for container in self.tmp_containers: - try: - self.client.stop(container, timeout=1) - self.client.remove_container(container) - except docker.errors.APIError: - pass - for folder in self.tmp_folders: - shutil.rmtree(folder) - self.client.close() - -######################### -# INFORMATION TESTS # -######################### - - -class TestVersion(BaseTestCase): - def runTest(self): - res = self.client.version() - self.assertIn('GoVersion', res) - self.assertIn('Version', res) - self.assertEqual(len(res['Version'].split('.')), 3) - - -class TestInfo(BaseTestCase): - def runTest(self): - res = self.client.info() - self.assertIn('Containers', res) - self.assertIn('Images', res) - self.assertIn('Debug', res) - - -class TestSearch(BaseTestCase): - def runTest(self): - self.client = docker.Client(base_url=DEFAULT_BASE_URL, timeout=10) - res = self.client.search('busybox') - self.assertTrue(len(res) >= 1) - base_img = [x for x in res if x['name'] == 'busybox'] - self.assertEqual(len(base_img), 1) - self.assertIn('description', base_img[0]) - -################### -# LISTING TESTS # -################### - - -class TestImages(BaseTestCase): - def runTest(self): - res1 = self.client.images(all=True) - self.assertIn('Id', res1[0]) - res10 = res1[0] - self.assertIn('Created', res10) - self.assertIn('RepoTags', res10) - distinct = [] - for img in res1: - if img['Id'] not in distinct: - distinct.append(img['Id']) - self.assertEqual(len(distinct), self.client.info()['Images']) - - -class TestImageIds(BaseTestCase): - def runTest(self): - res1 = self.client.images(quiet=True) - self.assertEqual(type(res1[0]), six.text_type) - - -class TestListContainers(BaseTestCase): - def runTest(self): - res0 = self.client.containers(all=True) - size = len(res0) - res1 = self.client.create_container('busybox:latest', 'true') - self.assertIn('Id', res1) - self.client.start(res1['Id']) - self.tmp_containers.append(res1['Id']) - res2 = self.client.containers(all=True) - self.assertEqual(size + 1, len(res2)) - retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])] - self.assertEqual(len(retrieved), 1) - retrieved = retrieved[0] - self.assertIn('Command', retrieved) - self.assertEqual(retrieved['Command'], six.text_type('true')) - self.assertIn('Image', retrieved) - self.assertRegex(retrieved['Image'], r'busybox:.*') - self.assertIn('Status', retrieved) - -##################### -# CONTAINER TESTS # -##################### - - -class TestCreateContainer(BaseTestCase): - def runTest(self): - res = self.client.create_container('busybox', 'true') - self.assertIn('Id', res) - self.tmp_containers.append(res['Id']) - - -class TestCreateContainerWithBinds(BaseTestCase): - def runTest(self): - mount_dest = '/mnt' - mount_origin = tempfile.mkdtemp() - self.tmp_folders.append(mount_origin) - - filename = 'shared.txt' - shared_file = os.path.join(mount_origin, filename) - binds = { - mount_origin: { - 'bind': mount_dest, - 'ro': False, - }, - } - - with open(shared_file, 'w'): - container = self.client.create_container( - 'busybox', - ['ls', mount_dest], volumes={mount_dest: {}}, - host_config=create_host_config(binds=binds) - ) - container_id = container['Id'] - self.client.start(container_id) - self.tmp_containers.append(container_id) - exitcode = self.client.wait(container_id) - self.assertEqual(exitcode, 0) - logs = self.client.logs(container_id) - - os.unlink(shared_file) - if six.PY3: - logs = logs.decode('utf-8') - self.assertIn(filename, logs) - inspect_data = self.client.inspect_container(container_id) - self.assertIn('Volumes', inspect_data) - self.assertIn(mount_dest, inspect_data['Volumes']) - self.assertEqual(mount_origin, inspect_data['Volumes'][mount_dest]) - self.assertIn(mount_dest, inspect_data['VolumesRW']) - self.assertTrue(inspect_data['VolumesRW'][mount_dest]) - - -class TestCreateContainerWithRoBinds(BaseTestCase): - def runTest(self): - mount_dest = '/mnt' - mount_origin = tempfile.mkdtemp() - self.tmp_folders.append(mount_origin) - - filename = 'shared.txt' - shared_file = os.path.join(mount_origin, filename) - binds = { - mount_origin: { - 'bind': mount_dest, - 'ro': True, - }, - } - - with open(shared_file, 'w'): - container = self.client.create_container( - 'busybox', - ['ls', mount_dest], volumes={mount_dest: {}}, - host_config=create_host_config(binds=binds) - ) - container_id = container['Id'] - self.client.start(container_id) - self.tmp_containers.append(container_id) - exitcode = self.client.wait(container_id) - self.assertEqual(exitcode, 0) - logs = self.client.logs(container_id) - - os.unlink(shared_file) - if six.PY3: - logs = logs.decode('utf-8') - self.assertIn(filename, logs) - inspect_data = self.client.inspect_container(container_id) - self.assertIn('Volumes', inspect_data) - self.assertIn(mount_dest, inspect_data['Volumes']) - self.assertEqual(mount_origin, inspect_data['Volumes'][mount_dest]) - self.assertIn(mount_dest, inspect_data['VolumesRW']) - self.assertFalse(inspect_data['VolumesRW'][mount_dest]) - - -class TestCreateContainerWithLogConfig(BaseTestCase): - def runTest(self): - config = docker.utils.LogConfig( - type=docker.utils.LogConfig.types.SYSLOG, - config={'key1': 'val1'} - ) - ctnr = self.client.create_container( - 'busybox', ['true'], - host_config=create_host_config(log_config=config) - ) - self.assertIn('Id', ctnr) - self.tmp_containers.append(ctnr['Id']) - self.client.start(ctnr) - info = self.client.inspect_container(ctnr) - self.assertIn('HostConfig', info) - host_config = info['HostConfig'] - self.assertIn('LogConfig', host_config) - log_config = host_config['LogConfig'] - self.assertIn('Type', log_config) - self.assertEqual(log_config['Type'], config.type) - self.assertIn('Config', log_config) - self.assertEqual(type(log_config['Config']), dict) - self.assertEqual(log_config['Config'], config.config) - - -@unittest.skipIf(not EXEC_DRIVER_IS_NATIVE, 'Exec driver not native') -class TestCreateContainerReadOnlyFs(BaseTestCase): - def runTest(self): - ctnr = self.client.create_container( - 'busybox', ['mkdir', '/shrine'], - host_config=create_host_config(read_only=True) - ) - self.assertIn('Id', ctnr) - self.tmp_containers.append(ctnr['Id']) - self.client.start(ctnr) - res = self.client.wait(ctnr) - self.assertNotEqual(res, 0) - - -class TestCreateContainerWithName(BaseTestCase): - def runTest(self): - res = self.client.create_container('busybox', 'true', name='foobar') - self.assertIn('Id', res) - self.tmp_containers.append(res['Id']) - inspect = self.client.inspect_container(res['Id']) - self.assertIn('Name', inspect) - self.assertEqual('/foobar', inspect['Name']) - - -class TestRenameContainer(BaseTestCase): - def runTest(self): - version = self.client.version()['Version'] - name = 'hong_meiling' - res = self.client.create_container('busybox', 'true') - self.assertIn('Id', res) - self.tmp_containers.append(res['Id']) - self.client.rename(res, name) - inspect = self.client.inspect_container(res['Id']) - self.assertIn('Name', inspect) - if version == '1.5.0': - self.assertEqual(name, inspect['Name']) - else: - self.assertEqual('/{0}'.format(name), inspect['Name']) - - -class TestStartContainer(BaseTestCase): - def runTest(self): - res = self.client.create_container('busybox', 'true') - self.assertIn('Id', res) - self.tmp_containers.append(res['Id']) - self.client.start(res['Id']) - inspect = self.client.inspect_container(res['Id']) - self.assertIn('Config', inspect) - self.assertIn('Id', inspect) - self.assertTrue(inspect['Id'].startswith(res['Id'])) - self.assertIn('Image', inspect) - self.assertIn('State', inspect) - self.assertIn('Running', inspect['State']) - if not inspect['State']['Running']: - self.assertIn('ExitCode', inspect['State']) - self.assertEqual(inspect['State']['ExitCode'], 0) - - -class TestStartContainerWithDictInsteadOfId(BaseTestCase): - def runTest(self): - res = self.client.create_container('busybox', 'true') - self.assertIn('Id', res) - self.tmp_containers.append(res['Id']) - self.client.start(res) - inspect = self.client.inspect_container(res['Id']) - self.assertIn('Config', inspect) - self.assertIn('Id', inspect) - self.assertTrue(inspect['Id'].startswith(res['Id'])) - self.assertIn('Image', inspect) - self.assertIn('State', inspect) - self.assertIn('Running', inspect['State']) - if not inspect['State']['Running']: - self.assertIn('ExitCode', inspect['State']) - self.assertEqual(inspect['State']['ExitCode'], 0) - - -class TestCreateContainerPrivileged(BaseTestCase): - def runTest(self): - res = self.client.create_container( - 'busybox', 'true', host_config=create_host_config(privileged=True) - ) - self.assertIn('Id', res) - self.tmp_containers.append(res['Id']) - self.client.start(res['Id']) - inspect = self.client.inspect_container(res['Id']) - self.assertIn('Config', inspect) - self.assertIn('Id', inspect) - self.assertTrue(inspect['Id'].startswith(res['Id'])) - self.assertIn('Image', inspect) - self.assertIn('State', inspect) - self.assertIn('Running', inspect['State']) - if not inspect['State']['Running']: - self.assertIn('ExitCode', inspect['State']) - self.assertEqual(inspect['State']['ExitCode'], 0) - # Since Nov 2013, the Privileged flag is no longer part of the - # container's config exposed via the API (safety concerns?). - # - if 'Privileged' in inspect['Config']: - self.assertEqual(inspect['Config']['Privileged'], True) - - -class TestWait(BaseTestCase): - def runTest(self): - res = self.client.create_container('busybox', ['sleep', '3']) - id = res['Id'] - self.tmp_containers.append(id) - self.client.start(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0) - inspect = self.client.inspect_container(id) - self.assertIn('Running', inspect['State']) - self.assertEqual(inspect['State']['Running'], False) - self.assertIn('ExitCode', inspect['State']) - self.assertEqual(inspect['State']['ExitCode'], exitcode) - - -class TestWaitWithDictInsteadOfId(BaseTestCase): - def runTest(self): - res = self.client.create_container('busybox', ['sleep', '3']) - id = res['Id'] - self.tmp_containers.append(id) - self.client.start(res) - exitcode = self.client.wait(res) - self.assertEqual(exitcode, 0) - inspect = self.client.inspect_container(res) - self.assertIn('Running', inspect['State']) - self.assertEqual(inspect['State']['Running'], False) - self.assertIn('ExitCode', inspect['State']) - self.assertEqual(inspect['State']['ExitCode'], exitcode) - - -class TestLogs(BaseTestCase): - def runTest(self): - snippet = 'Flowering Nights (Sakuya Iyazoi)' - container = self.client.create_container( - 'busybox', 'echo {0}'.format(snippet) - ) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0) - logs = self.client.logs(id) - self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii')) - - -class TestLogsWithTailOption(BaseTestCase): - def runTest(self): - snippet = '''Line1 -Line2''' - container = self.client.create_container( - 'busybox', 'echo "{0}"'.format(snippet) - ) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0) - logs = self.client.logs(id, tail=1) - self.assertEqual(logs, ('Line2\n').encode(encoding='ascii')) - - -# class TestLogsStreaming(BaseTestCase): -# def runTest(self): -# snippet = 'Flowering Nights (Sakuya Iyazoi)' -# container = self.client.create_container( -# 'busybox', 'echo {0}'.format(snippet) -# ) -# id = container['Id'] -# self.client.start(id) -# self.tmp_containers.append(id) -# logs = bytes() if six.PY3 else str() -# for chunk in self.client.logs(id, stream=True): -# logs += chunk - -# exitcode = self.client.wait(id) -# self.assertEqual(exitcode, 0) - -# self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii')) - - -class TestLogsWithDictInsteadOfId(BaseTestCase): - def runTest(self): - snippet = 'Flowering Nights (Sakuya Iyazoi)' - container = self.client.create_container( - 'busybox', 'echo {0}'.format(snippet) - ) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0) - logs = self.client.logs(container) - self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii')) - - -class TestDiff(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['touch', '/test']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0) - diff = self.client.diff(id) - test_diff = [x for x in diff if x.get('Path', None) == '/test'] - self.assertEqual(len(test_diff), 1) - self.assertIn('Kind', test_diff[0]) - self.assertEqual(test_diff[0]['Kind'], 1) - - -class TestDiffWithDictInsteadOfId(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['touch', '/test']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0) - diff = self.client.diff(container) - test_diff = [x for x in diff if x.get('Path', None) == '/test'] - self.assertEqual(len(test_diff), 1) - self.assertIn('Kind', test_diff[0]) - self.assertEqual(test_diff[0]['Kind'], 1) - - -class TestStop(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - self.client.stop(id, timeout=2) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - if EXEC_DRIVER_IS_NATIVE: - self.assertNotEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], False) - - -class TestStopWithDictInsteadOfId(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - self.assertIn('Id', container) - id = container['Id'] - self.client.start(container) - self.tmp_containers.append(id) - self.client.stop(container, timeout=2) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - if EXEC_DRIVER_IS_NATIVE: - self.assertNotEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], False) - - -class TestKill(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - self.client.kill(id) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - if EXEC_DRIVER_IS_NATIVE: - self.assertNotEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], False) - - -class TestKillWithDictInsteadOfId(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - self.client.kill(container) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - if EXEC_DRIVER_IS_NATIVE: - self.assertNotEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], False) - - -class TestKillWithSignal(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '60']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - self.client.kill(id, signal=signal.SIGKILL) - exitcode = self.client.wait(id) - self.assertNotEqual(exitcode, 0) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - self.assertNotEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], False, state) - - -class TestPort(BaseTestCase): - def runTest(self): - - port_bindings = { - '1111': ('127.0.0.1', '4567'), - '2222': ('127.0.0.1', '4568') - } - - container = self.client.create_container( - 'busybox', ['sleep', '60'], ports=list(port_bindings.keys()), - host_config=create_host_config(port_bindings=port_bindings) - ) - id = container['Id'] - - self.client.start(container) - - # Call the port function on each biding and compare expected vs actual - for port in port_bindings: - actual_bindings = self.client.port(container, port) - port_binding = actual_bindings.pop() - - ip, host_port = port_binding['HostIp'], port_binding['HostPort'] - - self.assertEqual(ip, port_bindings[port][0]) - self.assertEqual(host_port, port_bindings[port][1]) - - self.client.kill(id) - - -class TestMacAddress(BaseTestCase): - def runTest(self): - mac_address_expected = "02:42:ac:11:00:0a" - container = self.client.create_container( - 'busybox', ['sleep', '60'], mac_address=mac_address_expected) - - id = container['Id'] - - self.client.start(container) - res = self.client.inspect_container(container['Id']) - self.assertEqual(mac_address_expected, - res['NetworkSettings']['MacAddress']) - - self.client.kill(id) - - -class TestRestart(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - info = self.client.inspect_container(id) - self.assertIn('State', info) - self.assertIn('StartedAt', info['State']) - start_time1 = info['State']['StartedAt'] - self.client.restart(id, timeout=2) - info2 = self.client.inspect_container(id) - self.assertIn('State', info2) - self.assertIn('StartedAt', info2['State']) - start_time2 = info2['State']['StartedAt'] - self.assertNotEqual(start_time1, start_time2) - self.assertIn('Running', info2['State']) - self.assertEqual(info2['State']['Running'], True) - self.client.kill(id) - - -class TestRestartWithDictInsteadOfId(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - self.assertIn('Id', container) - id = container['Id'] - self.client.start(container) - self.tmp_containers.append(id) - info = self.client.inspect_container(id) - self.assertIn('State', info) - self.assertIn('StartedAt', info['State']) - start_time1 = info['State']['StartedAt'] - self.client.restart(container, timeout=2) - info2 = self.client.inspect_container(id) - self.assertIn('State', info2) - self.assertIn('StartedAt', info2['State']) - start_time2 = info2['State']['StartedAt'] - self.assertNotEqual(start_time1, start_time2) - self.assertIn('Running', info2['State']) - self.assertEqual(info2['State']['Running'], True) - self.client.kill(id) - - -class TestRemoveContainer(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['true']) - id = container['Id'] - self.client.start(id) - self.client.wait(id) - self.client.remove_container(id) - containers = self.client.containers(all=True) - res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)] - self.assertEqual(len(res), 0) - - -class TestRemoveContainerWithDictInsteadOfId(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['true']) - id = container['Id'] - self.client.start(id) - self.client.wait(id) - self.client.remove_container(container) - containers = self.client.containers(all=True) - res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)] - self.assertEqual(len(res), 0) - - -class TestCreateContainerWithVolumesFrom(BaseTestCase): - def runTest(self): - vol_names = ['foobar_vol0', 'foobar_vol1'] - - res0 = self.client.create_container( - 'busybox', 'true', name=vol_names[0] - ) - container1_id = res0['Id'] - self.tmp_containers.append(container1_id) - self.client.start(container1_id) - - res1 = self.client.create_container( - 'busybox', 'true', name=vol_names[1] - ) - container2_id = res1['Id'] - self.tmp_containers.append(container2_id) - self.client.start(container2_id) - with self.assertRaises(docker.errors.DockerException): - self.client.create_container( - 'busybox', 'cat', detach=True, stdin_open=True, - volumes_from=vol_names - ) - res2 = self.client.create_container( - 'busybox', 'cat', detach=True, stdin_open=True, - host_config=create_host_config(volumes_from=vol_names) - ) - container3_id = res2['Id'] - self.tmp_containers.append(container3_id) - self.client.start(container3_id) - - info = self.client.inspect_container(res2['Id']) - self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names) - - -class TestCreateContainerWithLinks(BaseTestCase): - def runTest(self): - res0 = self.client.create_container( - 'busybox', 'cat', - detach=True, stdin_open=True, - environment={'FOO': '1'}) - - container1_id = res0['Id'] - self.tmp_containers.append(container1_id) - - self.client.start(container1_id) - - res1 = self.client.create_container( - 'busybox', 'cat', - detach=True, stdin_open=True, - environment={'FOO': '1'}) - - container2_id = res1['Id'] - self.tmp_containers.append(container2_id) - - self.client.start(container2_id) - - # we don't want the first / - link_path1 = self.client.inspect_container(container1_id)['Name'][1:] - link_alias1 = 'mylink1' - link_env_prefix1 = link_alias1.upper() - - link_path2 = self.client.inspect_container(container2_id)['Name'][1:] - link_alias2 = 'mylink2' - link_env_prefix2 = link_alias2.upper() - - res2 = self.client.create_container( - 'busybox', 'env', host_config=create_host_config( - links={link_path1: link_alias1, link_path2: link_alias2} - ) - ) - container3_id = res2['Id'] - self.tmp_containers.append(container3_id) - self.client.start(container3_id) - self.assertEqual(self.client.wait(container3_id), 0) - - logs = self.client.logs(container3_id) - if six.PY3: - logs = logs.decode('utf-8') - self.assertIn('{0}_NAME='.format(link_env_prefix1), logs) - self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs) - self.assertIn('{0}_NAME='.format(link_env_prefix2), logs) - self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs) - - -class TestRestartingContainer(BaseTestCase): - def runTest(self): - container = self.client.create_container( - 'busybox', ['sleep', '2'], host_config=create_host_config( - restart_policy={"Name": "always", "MaximumRetryCount": 0} - ) - ) - id = container['Id'] - self.client.start(id) - self.client.wait(id) - with self.assertRaises(docker.errors.APIError) as exc: - self.client.remove_container(id) - err = exc.exception.response.text - self.assertIn( - 'You cannot remove a running container', err - ) - self.client.remove_container(id, force=True) - - -@unittest.skipIf(not EXEC_DRIVER_IS_NATIVE, 'Exec driver not native') -class TestExecuteCommand(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', 'cat', - detach=True, stdin_open=True) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - - res = self.client.exec_create(id, ['echo', 'hello']) - self.assertIn('Id', res) - - exec_log = self.client.exec_start(res) - expected = b'hello\n' if six.PY3 else 'hello\n' - self.assertEqual(exec_log, expected) - - -@unittest.skipIf(not EXEC_DRIVER_IS_NATIVE, 'Exec driver not native') -class TestExecuteCommandString(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', 'cat', - detach=True, stdin_open=True) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - - res = self.client.exec_create(id, 'echo hello world') - self.assertIn('Id', res) - - exec_log = self.client.exec_start(res) - expected = b'hello world\n' if six.PY3 else 'hello world\n' - self.assertEqual(exec_log, expected) - - -@unittest.skipIf(not EXEC_DRIVER_IS_NATIVE, 'Exec driver not native') -class TestExecuteCommandStreaming(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', 'cat', - detach=True, stdin_open=True) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - - exec_id = self.client.exec_create(id, ['echo', 'hello\nworld']) - self.assertIn('Id', exec_id) - - res = b'' if six.PY3 else '' - for chunk in self.client.exec_start(exec_id, stream=True): - res += chunk - expected = b'hello\nworld\n' if six.PY3 else 'hello\nworld\n' - self.assertEqual(res, expected) - - -@unittest.skipIf(not EXEC_DRIVER_IS_NATIVE, 'Exec driver not native') -class TestExecInspect(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', 'cat', - detach=True, stdin_open=True) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - - exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist']) - self.assertIn('Id', exec_id) - self.client.exec_start(exec_id) - exec_info = self.client.exec_inspect(exec_id) - self.assertIn('ExitCode', exec_info) - self.assertNotEqual(exec_info['ExitCode'], 0) - - -class TestRunContainerStreaming(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', '/bin/sh', - detach=True, stdin_open=True) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - sock = self.client.attach_socket(container, ws=False) - self.assertTrue(sock.fileno() > -1) - - -class TestPauseUnpauseContainer(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['sleep', '9999']) - id = container['Id'] - self.tmp_containers.append(id) - self.client.start(container) - self.client.pause(id) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - self.assertEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], True) - self.assertIn('Paused', state) - self.assertEqual(state['Paused'], True) - - self.client.unpause(id) - container_info = self.client.inspect_container(id) - self.assertIn('State', container_info) - state = container_info['State'] - self.assertIn('ExitCode', state) - self.assertEqual(state['ExitCode'], 0) - self.assertIn('Running', state) - self.assertEqual(state['Running'], True) - self.assertIn('Paused', state) - self.assertEqual(state['Paused'], False) - - -class TestCreateContainerWithHostPidMode(BaseTestCase): - def runTest(self): - ctnr = self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - pid_mode='host' - ) - ) - self.assertIn('Id', ctnr) - self.tmp_containers.append(ctnr['Id']) - self.client.start(ctnr) - inspect = self.client.inspect_container(ctnr) - self.assertIn('HostConfig', inspect) - host_config = inspect['HostConfig'] - self.assertIn('PidMode', host_config) - self.assertEqual(host_config['PidMode'], 'host') - - -################# -# LINKS TESTS # -################# - - -class TestRemoveLink(BaseTestCase): - def runTest(self): - # Create containers - container1 = self.client.create_container( - 'busybox', 'cat', detach=True, stdin_open=True - ) - container1_id = container1['Id'] - self.tmp_containers.append(container1_id) - self.client.start(container1_id) - - # Create Link - # we don't want the first / - link_path = self.client.inspect_container(container1_id)['Name'][1:] - link_alias = 'mylink' - - container2 = self.client.create_container( - 'busybox', 'cat', host_config=create_host_config( - links={link_path: link_alias} - ) - ) - container2_id = container2['Id'] - self.tmp_containers.append(container2_id) - self.client.start(container2_id) - - # Remove link - linked_name = self.client.inspect_container(container2_id)['Name'][1:] - link_name = '%s/%s' % (linked_name, link_alias) - self.client.remove_container(link_name, link=True) - - # Link is gone - containers = self.client.containers(all=True) - retrieved = [x for x in containers if link_name in x['Names']] - self.assertEqual(len(retrieved), 0) - - # Containers are still there - retrieved = [ - x for x in containers if x['Id'].startswith(container1_id) or - x['Id'].startswith(container2_id) - ] - self.assertEqual(len(retrieved), 2) - -################## -# IMAGES TESTS # -################## - - -class TestPull(BaseTestCase): - def runTest(self): - self.client.close() - self.client = docker.Client(base_url=DEFAULT_BASE_URL, timeout=10) - try: - self.client.remove_image('busybox') - except docker.errors.APIError: - pass - res = self.client.pull('busybox') - self.assertEqual(type(res), six.text_type) - self.assertGreaterEqual( - len(self.client.images('busybox')), 1 - ) - img_info = self.client.inspect_image('busybox') - self.assertIn('Id', img_info) - - -class TestPullStream(BaseTestCase): - def runTest(self): - self.client.close() - self.client = docker.Client(base_url=DEFAULT_BASE_URL, timeout=10) - try: - self.client.remove_image('busybox') - except docker.errors.APIError: - pass - stream = self.client.pull('busybox', stream=True) - for chunk in stream: - if six.PY3: - chunk = chunk.decode('utf-8') - json.loads(chunk) # ensure chunk is a single, valid JSON blob - self.assertGreaterEqual( - len(self.client.images('busybox')), 1 - ) - img_info = self.client.inspect_image('busybox') - self.assertIn('Id', img_info) - - -class TestCommit(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['touch', '/test']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - res = self.client.commit(id) - self.assertIn('Id', res) - img_id = res['Id'] - self.tmp_imgs.append(img_id) - img = self.client.inspect_image(img_id) - self.assertIn('Container', img) - self.assertTrue(img['Container'].startswith(id)) - self.assertIn('ContainerConfig', img) - self.assertIn('Image', img['ContainerConfig']) - self.assertEqual('busybox', img['ContainerConfig']['Image']) - busybox_id = self.client.inspect_image('busybox')['Id'] - self.assertIn('Parent', img) - self.assertEqual(img['Parent'], busybox_id) - - -class TestRemoveImage(BaseTestCase): - def runTest(self): - container = self.client.create_container('busybox', ['touch', '/test']) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - res = self.client.commit(id) - self.assertIn('Id', res) - img_id = res['Id'] - self.tmp_imgs.append(img_id) - self.client.remove_image(img_id, force=True) - images = self.client.images(all=True) - res = [x for x in images if x['Id'].startswith(img_id)] - self.assertEqual(len(res), 0) - - -################## -# IMPORT TESTS # -################## - - -class ImportTestCase(BaseTestCase): - '''Base class for `docker import` test cases.''' - - # Use a large file size to increase the chance of triggering any - # MemoryError exceptions we might hit. - TAR_SIZE = 512 * 1024 * 1024 - - def write_dummy_tar_content(self, n_bytes, tar_fd): - def extend_file(f, n_bytes): - f.seek(n_bytes - 1) - f.write(bytearray([65])) - f.seek(0) - - tar = tarfile.TarFile(fileobj=tar_fd, mode='w') - - with tempfile.NamedTemporaryFile() as f: - extend_file(f, n_bytes) - tarinfo = tar.gettarinfo(name=f.name, arcname='testdata') - tar.addfile(tarinfo, fileobj=f) - - tar.close() - - @contextlib.contextmanager - def dummy_tar_stream(self, n_bytes): - '''Yields a stream that is valid tar data of size n_bytes.''' - with tempfile.NamedTemporaryFile() as tar_file: - self.write_dummy_tar_content(n_bytes, tar_file) - tar_file.seek(0) - yield tar_file - - @contextlib.contextmanager - def dummy_tar_file(self, n_bytes): - '''Yields the name of a valid tar file of size n_bytes.''' - with tempfile.NamedTemporaryFile() as tar_file: - self.write_dummy_tar_content(n_bytes, tar_file) - tar_file.seek(0) - yield tar_file.name - - -class TestImportFromBytes(ImportTestCase): - '''Tests importing an image from in-memory byte data.''' - - def runTest(self): - with self.dummy_tar_stream(n_bytes=500) as f: - content = f.read() - - # The generic import_image() function cannot import in-memory bytes - # data that happens to be represented as a string type, because - # import_image() will try to use it as a filename and usually then - # trigger an exception. So we test the import_image_from_data() - # function instead. - statuses = self.client.import_image_from_data( - content, repository='test/import-from-bytes') - - result_text = statuses.splitlines()[-1] - result = json.loads(result_text) - - self.assertNotIn('error', result) - - img_id = result['status'] - self.tmp_imgs.append(img_id) - - -class TestImportFromFile(ImportTestCase): - '''Tests importing an image from a tar file on disk.''' - - def runTest(self): - with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename: - # statuses = self.client.import_image( - # src=tar_filename, repository='test/import-from-file') - statuses = self.client.import_image_from_file( - tar_filename, repository='test/import-from-file') - - result_text = statuses.splitlines()[-1] - result = json.loads(result_text) - - self.assertNotIn('error', result) - - self.assertIn('status', result) - img_id = result['status'] - self.tmp_imgs.append(img_id) - - -class TestImportFromStream(ImportTestCase): - '''Tests importing an image from a stream containing tar data.''' - - def runTest(self): - with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream: - statuses = self.client.import_image( - src=tar_stream, repository='test/import-from-stream') - # statuses = self.client.import_image_from_stream( - # tar_stream, repository='test/import-from-stream') - result_text = statuses.splitlines()[-1] - result = json.loads(result_text) - - self.assertNotIn('error', result) - - self.assertIn('status', result) - img_id = result['status'] - self.tmp_imgs.append(img_id) - - -@unittest.skipIf(NOT_ON_HOST, 'Tests running inside a container') -class TestImportFromURL(ImportTestCase): - '''Tests downloading an image over HTTP.''' - - @contextlib.contextmanager - def temporary_http_file_server(self, stream): - '''Serve data from an IO stream over HTTP.''' - - class Handler(BaseHTTPServer.BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.send_header('Content-Type', 'application/x-tar') - self.end_headers() - shutil.copyfileobj(stream, self.wfile) - - server = socketserver.TCPServer(('', 0), Handler) - thread = threading.Thread(target=server.serve_forever) - thread.setDaemon(True) - thread.start() - - yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1]) - - server.shutdown() - - def runTest(self): - # The crappy test HTTP server doesn't handle large files well, so use - # a small file. - TAR_SIZE = 10240 - - with self.dummy_tar_stream(n_bytes=TAR_SIZE) as tar_data: - with self.temporary_http_file_server(tar_data) as url: - statuses = self.client.import_image( - src=url, repository='test/import-from-url') - - result_text = statuses.splitlines()[-1] - result = json.loads(result_text) - - self.assertNotIn('error', result) - - self.assertIn('status', result) - img_id = result['status'] - self.tmp_imgs.append(img_id) - - -################# -# BUILDER TESTS # -################# - - -class TestBuild(BaseTestCase): - def runTest(self): - if compare_version(self.client._version, '1.8') < 0: - return - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - img, logs = self.client.build(fileobj=script) - self.assertNotEqual(img, None) - self.assertNotEqual(img, '') - self.assertNotEqual(logs, '') - container1 = self.client.create_container(img, 'test -d /tmp/test') - id1 = container1['Id'] - self.client.start(id1) - self.tmp_containers.append(id1) - exitcode1 = self.client.wait(id1) - self.assertEqual(exitcode1, 0) - container2 = self.client.create_container(img, 'test -d /tmp/test') - id2 = container2['Id'] - self.client.start(id2) - self.tmp_containers.append(id2) - exitcode2 = self.client.wait(id2) - self.assertEqual(exitcode2, 0) - self.tmp_imgs.append(img) - - -class TestBuildStream(BaseTestCase): - def runTest(self): - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - stream = self.client.build(fileobj=script, stream=True) - logs = '' - for chunk in stream: - if six.PY3: - chunk = chunk.decode('utf-8') - json.loads(chunk) # ensure chunk is a single, valid JSON blob - logs += chunk - self.assertNotEqual(logs, '') - - -class TestBuildFromStringIO(BaseTestCase): - def runTest(self): - if six.PY3: - return - script = io.StringIO(six.text_type('\n').join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ])) - stream = self.client.build(fileobj=script, stream=True) - logs = '' - for chunk in stream: - if six.PY3: - chunk = chunk.decode('utf-8') - logs += chunk - self.assertNotEqual(logs, '') - - -class TestBuildWithDockerignore(Cleanup, BaseTestCase): - def runTest(self): - if compare_version(self.client._version, '1.8') >= 0: - return - - base_dir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, base_dir) - - with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: - f.write("\n".join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'ADD . /test', - 'RUN ls -A /test', - ])) - - with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: - f.write("\n".join([ - 'node_modules', - 'Dockerfile', - '.dockerginore', - '', # empty line - ])) - - with open(os.path.join(base_dir, 'not-ignored'), 'w') as f: - f.write("this file should not be ignored") - - subdir = os.path.join(base_dir, 'node_modules', 'grunt-cli') - os.makedirs(subdir) - with open(os.path.join(subdir, 'grunt'), 'w') as f: - f.write("grunt") - - stream = self.client.build(path=base_dir, stream=True) - logs = '' - for chunk in stream: - if six.PY3: - chunk = chunk.decode('utf-8') - logs += chunk - self.assertFalse('node_modules' in logs) - self.assertFalse('Dockerfile' in logs) - self.assertFalse('.dockerginore' in logs) - self.assertTrue('not-ignored' in logs) - -####################### -# PY SPECIFIC TESTS # -####################### - - -class TestRunShlex(BaseTestCase): - def runTest(self): - commands = [ - 'true', - 'echo "The Young Descendant of Tepes & Septette for the ' - 'Dead Princess"', - 'echo -n "The Young Descendant of Tepes & Septette for the ' - 'Dead Princess"', - '/bin/sh -c "echo Hello World"', - '/bin/sh -c \'echo "Hello World"\'', - 'echo "\"Night of Nights\""', - 'true && echo "Night of Nights"' - ] - for cmd in commands: - container = self.client.create_container('busybox', cmd) - id = container['Id'] - self.client.start(id) - self.tmp_containers.append(id) - exitcode = self.client.wait(id) - self.assertEqual(exitcode, 0, msg=cmd) - - -class TestLoadConfig(BaseTestCase): - def runTest(self): - folder = tempfile.mkdtemp() - self.tmp_folders.append(folder) - cfg_path = os.path.join(folder, '.dockercfg') - f = open(cfg_path, 'w') - auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') - f.write('auth = {0}\n'.format(auth_)) - f.write('email = sakuya@scarlet.net') - f.close() - cfg = docker.auth.load_config(cfg_path) - self.assertNotEqual(cfg[docker.auth.INDEX_URL], None) - cfg = cfg[docker.auth.INDEX_URL] - self.assertEqual(cfg['username'], 'sakuya') - self.assertEqual(cfg['password'], 'izayoi') - self.assertEqual(cfg['email'], 'sakuya@scarlet.net') - self.assertEqual(cfg.get('Auth'), None) - - -class TestLoadJSONConfig(BaseTestCase): - def runTest(self): - folder = tempfile.mkdtemp() - self.tmp_folders.append(folder) - cfg_path = os.path.join(folder, '.dockercfg') - f = open(os.path.join(folder, '.dockercfg'), 'w') - auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') - email_ = 'sakuya@scarlet.net' - f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format( - docker.auth.INDEX_URL, auth_, email_)) - f.close() - cfg = docker.auth.load_config(cfg_path) - self.assertNotEqual(cfg[docker.auth.INDEX_URL], None) - cfg = cfg[docker.auth.INDEX_URL] - self.assertEqual(cfg['username'], 'sakuya') - self.assertEqual(cfg['password'], 'izayoi') - self.assertEqual(cfg['email'], 'sakuya@scarlet.net') - self.assertEqual(cfg.get('Auth'), None) - - -class TestAutoDetectVersion(unittest.TestCase): - def test_client_init(self): - client = docker.Client(version='auto') - client_version = client._version - api_version = client.version(api_version=False)['ApiVersion'] - self.assertEqual(client_version, api_version) - api_version_2 = client.version()['ApiVersion'] - self.assertEqual(client_version, api_version_2) - client.close() - - def test_auto_client(self): - client = docker.AutoVersionClient() - client_version = client._version - api_version = client.version(api_version=False)['ApiVersion'] - self.assertEqual(client_version, api_version) - api_version_2 = client.version()['ApiVersion'] - self.assertEqual(client_version, api_version_2) - client.close() - with self.assertRaises(docker.errors.DockerException): - docker.AutoVersionClient(version='1.11') - - -class TestConnectionTimeout(unittest.TestCase): - def setUp(self): - self.timeout = 0.5 - self.client = docker.client.Client(base_url='http://192.168.10.2:4243', - timeout=self.timeout) - - def runTest(self): - start = time.time() - res = None - # This call isn't supposed to complete, and it should fail fast. - try: - res = self.client.inspect_container('id') - except: - pass - end = time.time() - self.assertTrue(res is None) - self.assertTrue(end - start < 2 * self.timeout) - - -class UnixconnTestCase(unittest.TestCase): - """ - Test UNIX socket connection adapter. - """ - - def test_resource_warnings(self): - """ - Test no warnings are produced when using the client. - """ - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - - client = docker.Client(base_url=DEFAULT_BASE_URL) - client.images() - client.close() - del client - - assert len(w) == 0, \ - "No warnings produced: {0}".format(w[0].message) - - -#################### -# REGRESSION TESTS # -#################### - -class TestRegressions(BaseTestCase): - def test_443(self): - dfile = io.BytesIO() - with self.assertRaises(docker.errors.APIError) as exc: - for line in self.client.build(fileobj=dfile, tag="a/b/c"): - pass - self.assertEqual(exc.exception.response.status_code, 500) - dfile.close() - - def test_542(self): - self.client.start( - self.client.create_container('busybox', ['true']) - ) - result = self.client.containers(all=True, trunc=True) - self.assertEqual(len(result[0]['Id']), 12) - - def test_647(self): - with self.assertRaises(docker.errors.APIError): - self.client.inspect_image('gensokyo.jp//kirisame') - - def test_649(self): - self.client.timeout = None - ctnr = self.client.create_container('busybox', ['sleep', '2']) - self.client.start(ctnr) - self.client.stop(ctnr) - - -if __name__ == '__main__': - c = docker.Client(base_url=DEFAULT_BASE_URL) - c.pull('busybox') - exec_driver = c.info()['ExecutionDriver'] - EXEC_DRIVER_IS_NATIVE = exec_driver.startswith('native') - c.close() - unittest.main() diff --git a/tests/ssh/__init__.py b/tests/ssh/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/ssh/api_build_test.py b/tests/ssh/api_build_test.py new file mode 100644 index 0000000000..f17c75630f --- /dev/null +++ b/tests/ssh/api_build_test.py @@ -0,0 +1,588 @@ +import io +import os +import shutil +import tempfile + +import pytest + +from docker import errors +from docker.utils.proxy import ProxyConfig + +from ..helpers import random_name, requires_api_version, requires_experimental +from .base import TEST_IMG, BaseAPIIntegrationTest + + +class BuildTest(BaseAPIIntegrationTest): + def test_build_with_proxy(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=a"', + 'RUN env | grep "ftp_proxy=a"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build(fileobj=script, decode=True) + + def test_build_with_proxy_and_buildargs(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=XXX"', + 'RUN env | grep "ftp_proxy=xxx"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build( + fileobj=script, + decode=True, + buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'} + ) + + def test_build_streaming(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN mkdir -p /tmp/test', + 'EXPOSE 8080', + 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' + ' /tmp/silence.tar.gz' + ]).encode('ascii')) + stream = self.client.build(fileobj=script, decode=True) + logs = [] + for chunk in stream: + logs.append(chunk) + assert len(logs) > 0 + + def test_build_from_stringio(self): + return + script = io.StringIO('\n'.join([ + 'FROM busybox', + 'RUN mkdir -p /tmp/test', + 'EXPOSE 8080', + 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' + ' /tmp/silence.tar.gz' + ])) + stream = self.client.build(fileobj=script) + logs = '' + for chunk in stream: + chunk = chunk.decode('utf-8') + logs += chunk + assert logs != '' + + def test_build_with_dockerignore(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write("\n".join([ + 'FROM busybox', + 'ADD . /test', + ])) + + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write("\n".join([ + 'ignored', + 'Dockerfile', + '.dockerignore', + '!ignored/subdir/excepted-file', + '', # empty line, + '#*', # comment line + ])) + + with open(os.path.join(base_dir, 'not-ignored'), 'w') as f: + f.write("this file should not be ignored") + + with open(os.path.join(base_dir, '#file.txt'), 'w') as f: + f.write('this file should not be ignored') + + subdir = os.path.join(base_dir, 'ignored', 'subdir') + os.makedirs(subdir) + with open(os.path.join(subdir, 'file'), 'w') as f: + f.write("this file should be ignored") + + with open(os.path.join(subdir, 'excepted-file'), 'w') as f: + f.write("this file should not be ignored") + + tag = 'docker-py-test-build-with-dockerignore' + stream = self.client.build( + path=base_dir, + tag=tag, + ) + for _chunk in stream: + pass + + c = self.client.create_container(tag, ['find', '/test', '-type', 'f']) + self.client.start(c) + self.client.wait(c) + logs = self.client.logs(c) + + logs = logs.decode('utf-8') + + assert sorted(filter(None, logs.split('\n'))) == sorted([ + '/test/#file.txt', + '/test/ignored/subdir/excepted-file', + '/test/not-ignored' + ]) + + def test_build_with_buildargs(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'ARG test', + 'USER $test' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag='buildargs', buildargs={'test': 'OK'} + ) + self.tmp_imgs.append('buildargs') + for _chunk in stream: + pass + + info = self.client.inspect_image('buildargs') + assert info['Config']['User'] == 'OK' + + @requires_api_version('1.22') + def test_build_shmsize(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'CMD sh -c "echo \'Hello, World!\'"', + ]).encode('ascii')) + + tag = 'shmsize' + shmsize = 134217728 + + stream = self.client.build( + fileobj=script, tag=tag, shmsize=shmsize + ) + self.tmp_imgs.append(tag) + for _chunk in stream: + pass + + # There is currently no way to get the shmsize + # that was used to build the image + + @requires_api_version('1.24') + def test_build_isolation(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + 'CMD sh -c "echo \'Deaf To All But The Song\'' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag='isolation', + isolation='default' + ) + + for _chunk in stream: + pass + + @requires_api_version('1.23') + def test_build_labels(self): + script = io.BytesIO('\n'.join([ + 'FROM scratch', + ]).encode('ascii')) + + labels = {'test': 'OK'} + + stream = self.client.build( + fileobj=script, tag='labels', labels=labels + ) + self.tmp_imgs.append('labels') + for _chunk in stream: + pass + + info = self.client.inspect_image('labels') + assert info['Config']['Labels'] == labels + + @requires_api_version('1.25') + def test_build_with_cache_from(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'ENV FOO=bar', + 'RUN touch baz', + 'RUN touch bax', + ]).encode('ascii')) + + stream = self.client.build(fileobj=script, tag='build1') + self.tmp_imgs.append('build1') + for _chunk in stream: + pass + + stream = self.client.build( + fileobj=script, tag='build2', cache_from=['build1'], + decode=True + ) + self.tmp_imgs.append('build2') + counter = 0 + for chunk in stream: + if 'Using cache' in chunk.get('stream', ''): + counter += 1 + assert counter == 3 + self.client.remove_image('build2') + + counter = 0 + stream = self.client.build( + fileobj=script, tag='build2', cache_from=['nosuchtag'], + decode=True + ) + for chunk in stream: + if 'Using cache' in chunk.get('stream', ''): + counter += 1 + assert counter == 0 + + @requires_api_version('1.29') + def test_build_container_with_target(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox as first', + 'RUN mkdir -p /tmp/test', + 'RUN touch /tmp/silence.tar.gz', + 'FROM alpine:latest', + 'WORKDIR /root/' + 'COPY --from=first /tmp/silence.tar.gz .', + 'ONBUILD RUN echo "This should not be in the final image"' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, target='first', tag='build1' + ) + self.tmp_imgs.append('build1') + for _chunk in stream: + pass + + info = self.client.inspect_image('build1') + assert 'OnBuild' not in info['Config'] or not info['Config']['OnBuild'] + + @requires_api_version('1.25') + def test_build_with_network_mode(self): + # Set up pingable endpoint on custom network + network = self.client.create_network(random_name())['Id'] + self.tmp_networks.append(network) + container = self.client.create_container(TEST_IMG, 'top') + self.tmp_containers.append(container) + self.client.start(container) + self.client.connect_container_to_network( + container, network, aliases=['pingtarget.docker'] + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN ping -c1 pingtarget.docker' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, network_mode=network, + tag='dockerpytest_customnetbuild' + ) + + self.tmp_imgs.append('dockerpytest_customnetbuild') + for _chunk in stream: + pass + + assert self.client.inspect_image('dockerpytest_customnetbuild') + + script.seek(0) + stream = self.client.build( + fileobj=script, network_mode='none', + tag='dockerpytest_nonebuild', nocache=True, decode=True + ) + + self.tmp_imgs.append('dockerpytest_nonebuild') + logs = list(stream) + assert 'errorDetail' in logs[-1] + assert logs[-1]['errorDetail']['code'] == 1 + + with pytest.raises(errors.NotFound): + self.client.inspect_image('dockerpytest_nonebuild') + + @requires_api_version('1.27') + def test_build_with_extra_hosts(self): + img_name = 'dockerpytest_extrahost_build' + self.tmp_imgs.append(img_name) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN ping -c1 hello.world.test', + 'RUN ping -c1 extrahost.local.test', + 'RUN cp /etc/hosts /hosts-file' + ]).encode('ascii')) + + stream = self.client.build( + fileobj=script, tag=img_name, + extra_hosts={ + 'extrahost.local.test': '127.0.0.1', + 'hello.world.test': '127.0.0.1', + }, decode=True + ) + for chunk in stream: + if 'errorDetail' in chunk: + pytest.fail(chunk) + + assert self.client.inspect_image(img_name) + ctnr = self.run_container(img_name, 'cat /hosts-file') + logs = self.client.logs(ctnr) + logs = logs.decode('utf-8') + assert '127.0.0.1\textrahost.local.test' in logs + assert '127.0.0.1\thello.world.test' in logs + + @requires_experimental(until=None) + @requires_api_version('1.25') + def test_build_squash(self): + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN echo blah > /file_1', + 'RUN echo blahblah > /file_2', + 'RUN echo blahblahblah > /file_3' + ]).encode('ascii')) + + def build_squashed(squash): + tag = 'squash' if squash else 'nosquash' + stream = self.client.build( + fileobj=script, tag=tag, squash=squash + ) + self.tmp_imgs.append(tag) + for _chunk in stream: + pass + + return self.client.inspect_image(tag) + + non_squashed = build_squashed(False) + squashed = build_squashed(True) + assert len(non_squashed['RootFS']['Layers']) == 4 + assert len(squashed['RootFS']['Layers']) == 2 + + def test_build_stderr_data(self): + control_chars = ['\x1b[91m', '\x1b[0m'] + snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)' + script = io.BytesIO(b'\n'.join([ + b'FROM busybox', + f'RUN sh -c ">&2 echo \'{snippet}\'"'.encode('utf-8') + ])) + + stream = self.client.build( + fileobj=script, decode=True, nocache=True + ) + lines = [] + for chunk in stream: + lines.append(chunk.get('stream')) + expected = f'{control_chars[0]}{snippet}\n{control_chars[1]}' + assert any(line == expected for line in lines) + + def test_build_gzip_encoding(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write("\n".join([ + 'FROM busybox', + 'ADD . /test', + ])) + + stream = self.client.build( + path=base_dir, decode=True, nocache=True, + gzip=True + ) + + lines = [] + for chunk in stream: + lines.append(chunk) + + assert 'Successfully built' in lines[-1]['stream'] + + def test_build_with_dockerfile_empty_lines(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: + f.write('FROM busybox\n') + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write('\n'.join([ + ' ', + '', + '\t\t', + '\t ', + ])) + + stream = self.client.build( + path=base_dir, decode=True, nocache=True + ) + + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully built' in lines[-1]['stream'] + + def test_build_gzip_custom_encoding(self): + with pytest.raises(errors.DockerException): + self.client.build(path='.', gzip=True, encoding='text/html') + + @requires_api_version('1.32') + @requires_experimental(until=None) + def test_build_invalid_platform(self): + script = io.BytesIO(b'FROM busybox\n') + + with pytest.raises(errors.APIError) as excinfo: + stream = self.client.build(fileobj=script, platform='foobar') + for _ in stream: + pass + + # Some API versions incorrectly returns 500 status; assert 4xx or 5xx + assert excinfo.value.is_error() + assert 'unknown operating system' in excinfo.exconly() \ + or 'invalid platform' in excinfo.exconly() + + def test_build_out_of_context_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(os.path.join(base_dir, '.dockerignore'), 'w') as f: + f.write('.dockerignore\n') + df_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, df_dir) + df_name = os.path.join(df_dir, 'Dockerfile') + with open(df_name, 'wb') as df: + df.write(('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])).encode('utf-8')) + df.flush() + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile=df_name, tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 3 + assert sorted([b'.', b'..', b'file.txt']) == sorted(lsdata) + + def test_build_in_context_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(os.path.join(base_dir, 'custom.dockerfile'), 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile='custom.dockerfile', tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'custom.dockerfile'] + ) == sorted(lsdata) + + def test_build_in_context_nested_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + subdir = os.path.join(base_dir, 'hello', 'world') + os.makedirs(subdir) + with open(os.path.join(subdir, 'custom.dockerfile'), 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile='hello/world/custom.dockerfile', + tag=img_name, decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'hello'] + ) == sorted(lsdata) + + def test_build_in_context_abs_dockerfile(self): + base_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base_dir) + abs_dockerfile_path = os.path.join(base_dir, 'custom.dockerfile') + with open(os.path.join(base_dir, 'file.txt'), 'w') as f: + f.write('hello world') + with open(abs_dockerfile_path, 'w') as df: + df.write('\n'.join([ + 'FROM busybox', + 'COPY . /src', + 'WORKDIR /src', + ])) + img_name = random_name() + self.tmp_imgs.append(img_name) + stream = self.client.build( + path=base_dir, dockerfile=abs_dockerfile_path, tag=img_name, + decode=True + ) + lines = [] + for chunk in stream: + lines.append(chunk) + assert 'Successfully tagged' in lines[-1]['stream'] + + ctnr = self.client.create_container(img_name, 'ls -a') + self.tmp_containers.append(ctnr) + self.client.start(ctnr) + lsdata = self.client.logs(ctnr).strip().split(b'\n') + assert len(lsdata) == 4 + assert sorted( + [b'.', b'..', b'file.txt', b'custom.dockerfile'] + ) == sorted(lsdata) + + @requires_api_version('1.31') + @pytest.mark.xfail( + True, + reason='Currently fails on 18.09: ' + 'https://github.com/moby/moby/issues/37920' + ) + def test_prune_builds(self): + prune_result = self.client.prune_builds() + assert 'SpaceReclaimed' in prune_result + assert isinstance(prune_result['SpaceReclaimed'], int) diff --git a/tests/ssh/base.py b/tests/ssh/base.py new file mode 100644 index 0000000000..bf3c11d7a7 --- /dev/null +++ b/tests/ssh/base.py @@ -0,0 +1,134 @@ +import os +import shutil +import unittest + +import pytest + +import docker +from docker.utils import kwargs_from_env + +from .. import helpers + +TEST_IMG = 'alpine:3.10' +TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION') + + +class BaseIntegrationTest(unittest.TestCase): + """ + A base class for integration test cases. It cleans up the Docker server + after itself. + """ + + def setUp(self): + self.tmp_imgs = [] + self.tmp_containers = [] + self.tmp_folders = [] + self.tmp_volumes = [] + self.tmp_networks = [] + self.tmp_plugins = [] + self.tmp_secrets = [] + self.tmp_configs = [] + + def tearDown(self): + client = docker.from_env(version=TEST_API_VERSION, use_ssh_client=True) + try: + for img in self.tmp_imgs: + try: + client.api.remove_image(img) + except docker.errors.APIError: + pass + for container in self.tmp_containers: + try: + client.api.remove_container(container, force=True, v=True) + except docker.errors.APIError: + pass + for network in self.tmp_networks: + try: + client.api.remove_network(network) + except docker.errors.APIError: + pass + for volume in self.tmp_volumes: + try: + client.api.remove_volume(volume) + except docker.errors.APIError: + pass + + for secret in self.tmp_secrets: + try: + client.api.remove_secret(secret) + except docker.errors.APIError: + pass + + for config in self.tmp_configs: + try: + client.api.remove_config(config) + except docker.errors.APIError: + pass + + for folder in self.tmp_folders: + shutil.rmtree(folder) + finally: + client.close() + + +@pytest.mark.skipif(not os.environ.get('DOCKER_HOST', '').startswith('ssh://'), + reason='DOCKER_HOST is not an SSH target') +class BaseAPIIntegrationTest(BaseIntegrationTest): + """ + A test case for `APIClient` integration tests. It sets up an `APIClient` + as `self.client`. + """ + @classmethod + def setUpClass(cls): + cls.client = cls.get_client_instance() + cls.client.pull(TEST_IMG) + + def tearDown(self): + super().tearDown() + self.client.close() + + @staticmethod + def get_client_instance(): + return docker.APIClient( + version=TEST_API_VERSION, + timeout=60, + use_ssh_client=True, + **kwargs_from_env() + ) + + @staticmethod + def _init_swarm(client, **kwargs): + return client.init_swarm( + '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs + ) + + def run_container(self, *args, **kwargs): + container = self.client.create_container(*args, **kwargs) + self.tmp_containers.append(container) + self.client.start(container) + exitcode = self.client.wait(container)['StatusCode'] + + if exitcode != 0: + output = self.client.logs(container) + raise Exception( + f"Container exited with code {exitcode}:\n{output}") + + return container + + def create_and_start(self, image=TEST_IMG, command='top', **kwargs): + container = self.client.create_container( + image=image, command=command, **kwargs) + self.tmp_containers.append(container) + self.client.start(container) + return container + + def execute(self, container, cmd, exit_code=0, **kwargs): + exc = self.client.exec_create(container, cmd, **kwargs) + output = self.client.exec_start(exc) + actual_exit_code = self.client.exec_inspect(exc)['ExitCode'] + msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format( + " ".join(cmd), exit_code, actual_exit_code, output) + assert actual_exit_code == exit_code, msg + + def init_swarm(self, **kwargs): + return self._init_swarm(self.client, **kwargs) diff --git a/tests/ssh/config/client/id_rsa b/tests/ssh/config/client/id_rsa new file mode 100644 index 0000000000..0ec063e2e4 --- /dev/null +++ b/tests/ssh/config/client/id_rsa @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAvwYl5Gy/aBGxNzyb9UtqddlyuQR1t6kE+UX/gmBtAE2MjDyFTOvi +F1cn90DcaZ7z172zwUCQrNKh3rj8GcthrG7d+UJ5pYK3MxT4l16LAg9jfsK3DkD2Rri40M +lFD9siUVUky6afM5NhfMN5WhiAdyZNYVHDFBMXpisUGJPy+NG+a1ypGqy5OWsAbonI0UrT +K3IT0R2dp+9eUxvs0r3/LQf1B0VymD6movyXuXoh98hlMwmOM5/rhKKgBW+FfJaSI/EcNx +F5gmFcBtL4PuOECENoCZyIU5XJscJMp72Z/e57oODS5RiUPrAwpyLzGqcnB3xpDZQc93xb +bvzkbMT6WW0zYP/Z6Gt2X/DqSMLxPxRzT6g3LRpbcMRIEMY+XxN+MdH2JxdPLXowFCSQmR +N2LBoDWm7EuKQ/pEYSPN3hWb4I90NQHkytFfW0TO47o3HPUc/lfRm+c2BBzf5fD8RFZY9D +pVEX/WZZJzUCvMUYefe4w1031UCgjDv50Wlh9m6tAAAFeM2kMyHNpDMhAAAAB3NzaC1yc2 +EAAAGBAL8GJeRsv2gRsTc8m/VLanXZcrkEdbepBPlF/4JgbQBNjIw8hUzr4hdXJ/dA3Gme +89e9s8FAkKzSod64/BnLYaxu3flCeaWCtzMU+JdeiwIPY37Ctw5A9ka4uNDJRQ/bIlFVJM +umnzOTYXzDeVoYgHcmTWFRwxQTF6YrFBiT8vjRvmtcqRqsuTlrAG6JyNFK0ytyE9Ednafv +XlMb7NK9/y0H9QdFcpg+pqL8l7l6IffIZTMJjjOf64SioAVvhXyWkiPxHDcReYJhXAbS+D +7jhAhDaAmciFOVybHCTKe9mf3ue6Dg0uUYlD6wMKci8xqnJwd8aQ2UHPd8W2785GzE+llt +M2D/2ehrdl/w6kjC8T8Uc0+oNy0aW3DESBDGPl8TfjHR9icXTy16MBQkkJkTdiwaA1puxL +ikP6RGEjzd4Vm+CPdDUB5MrRX1tEzuO6Nxz1HP5X0ZvnNgQc3+Xw/ERWWPQ6VRF/1mWSc1 +ArzFGHn3uMNdN9VAoIw7+dFpYfZurQAAAAMBAAEAAAGBAKtnotyiz+Vb6r57vh2OvEpfAd +gOrmpMWVArhSfBykz5SOIU9C+fgVIcPJpaMuz7WiX97Ku9eZP5tJGbP2sN2ejV2ovtICZp +cmV9rcp1ZRpGIKr/oS5DEDlJS1zdHQErSlHcqpWqPzQSTOmcpOk5Dxza25g1u2vp7dCG2x +NqvhySZ+ECViK/Vby1zL9jFzTlhTJ4vFtpzauA2AyPBCPdpHkNqMoLgNYncXLSYHpnos8p +m9T+AAFGwBhVrGz0Mr0mhRDnV/PgbKplKT7l+CGceb8LuWmj/vzuP5Wv6dglw3hJnT2V5p +nTBp3dJ6R006+yvr5T/Xb+ObGqFfgfenjLfHjqbJ/gZdGWt4Le84g8tmSkjJBJ2Yj3kynQ +sdfv9k7JJ4t5euoje0XW0YVN1ih5DdyO4hHDRD1lSTFYT5Gl2sCTt28qsMC12rWzFkezJo +Fhewq2Ddtg4AK6SxqH4rFQCmgOR/ci7jv9TXS9xEQxYliyN5aNymRTyXmwqBIzjNKR6QAA +AMEAxpme2upng9LS6Epa83d1gnWUilYPbpb1C8+1FgpnBv9zkjFE1vY0Vu4i9LcLGlCQ0x +PB1Z16TQlEluqiSuSA0eyaWSQBF9NyGsOCOZ63lpJs/2FRBfcbUvHhv8/g1fv/xvI+FnE+ +DoAhz8V3byU8HUZer7pQY3hSxisdYdsaromxC8DSSPFQoxpxwh7WuP4c3veWkdL13h4fSN +khGr3G1XGfsZOu6V6F1i7yMU6OcwBAxzPsHqZv66sT8lE6n4xjAAAAwQDzAaVaJqZ2ROoF +loltJZUtE7o+zpoDzjOJyGYaCYTU4dHPN1aeYBjw8QfmJhdmZfJp9AeJDB/W0wzoHi2ONI +chnQ1EdbCLk9pvA7rhfVdZaxPeHwniDp2iA/wZKTRG3hav9nEzS72uXuZprCsbBvGXeR0z +iuIx5odVXG8qyuI9lDY6B/IoLg7zd+V6iw9mqWYlLLsgHiAvg32LAT4j0KoTufOqpnxqTQ +P2EguTmxDWkfQmbEHdJvbD2tLQ90zMlwMAAADBAMk88wOA1i/TibH5gm/lAtKPcNKbrHfk +7O9gdSZd2HL0fLjptpOplS89Y7muTElsRDRGiKq+7KV/sxQRNcITkxdTKu8CKnftFWHrLk +9WHWVHXbu9h8ttsKeUr9i27ojxpe5I82of8k7fJTg1LxMnGzuDZfq1BGsQnOWrY7r1Yjcd +8EtSrwOB+J/S4U+rR6kwUEFYeBkhE599P1EtHTCm8kWh368di9Q+Y/VIOa3qRx4hxuiCLI +qj4ZpdVMk2cCNcjwAAAAAB +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/ssh/config/client/id_rsa.pub b/tests/ssh/config/client/id_rsa.pub new file mode 100644 index 0000000000..33252fe503 --- /dev/null +++ b/tests/ssh/config/client/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC/BiXkbL9oEbE3PJv1S2p12XK5BHW3qQT5Rf+CYG0ATYyMPIVM6+IXVyf3QNxpnvPXvbPBQJCs0qHeuPwZy2Gsbt35QnmlgrczFPiXXosCD2N+wrcOQPZGuLjQyUUP2yJRVSTLpp8zk2F8w3laGIB3Jk1hUcMUExemKxQYk/L40b5rXKkarLk5awBuicjRStMrchPRHZ2n715TG+zSvf8tB/UHRXKYPqai/Je5eiH3yGUzCY4zn+uEoqAFb4V8lpIj8Rw3EXmCYVwG0vg+44QIQ2gJnIhTlcmxwkynvZn97nug4NLlGJQ+sDCnIvMapycHfGkNlBz3fFtu/ORsxPpZbTNg/9noa3Zf8OpIwvE/FHNPqDctGltwxEgQxj5fE34x0fYnF08tejAUJJCZE3YsGgNabsS4pD+kRhI83eFZvgj3Q1AeTK0V9bRM7jujcc9Rz+V9Gb5zYEHN/l8PxEVlj0OlURf9ZlknNQK8xRh597jDXTfVQKCMO/nRaWH2bq0= diff --git a/tests/ssh/config/server/known_ed25519 b/tests/ssh/config/server/known_ed25519 new file mode 100644 index 0000000000..b79f217b88 --- /dev/null +++ b/tests/ssh/config/server/known_ed25519 @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4QAAAJgIMffcCDH3 +3AAAAAtzc2gtZWQyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4Q +AAAEDeXnt5AuNk4oTHjMU1vUsEwh64fuEPu4hXsG6wCVt/6Iax81dU/Xw3tcLohAa67FdB +FtPGU8YuP7n8IHKP16DhAAAAEXJvb3RAMGRkZmQyMWRkYjM3AQIDBA== +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/ssh/config/server/known_ed25519.pub b/tests/ssh/config/server/known_ed25519.pub new file mode 100644 index 0000000000..ec0296e9d4 --- /dev/null +++ b/tests/ssh/config/server/known_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIax81dU/Xw3tcLohAa67FdBFtPGU8YuP7n8IHKP16Dh docker-py integration tests known diff --git a/tests/ssh/config/server/sshd_config b/tests/ssh/config/server/sshd_config new file mode 100644 index 0000000000..970dca337c --- /dev/null +++ b/tests/ssh/config/server/sshd_config @@ -0,0 +1,3 @@ +IgnoreUserKnownHosts yes +PubkeyAuthentication yes +PermitRootLogin yes diff --git a/tests/ssh/config/server/unknown_ed25519 b/tests/ssh/config/server/unknown_ed25519 new file mode 100644 index 0000000000..b79f217b88 --- /dev/null +++ b/tests/ssh/config/server/unknown_ed25519 @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4QAAAJgIMffcCDH3 +3AAAAAtzc2gtZWQyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4Q +AAAEDeXnt5AuNk4oTHjMU1vUsEwh64fuEPu4hXsG6wCVt/6Iax81dU/Xw3tcLohAa67FdB +FtPGU8YuP7n8IHKP16DhAAAAEXJvb3RAMGRkZmQyMWRkYjM3AQIDBA== +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/ssh/config/server/unknown_ed25519.pub b/tests/ssh/config/server/unknown_ed25519.pub new file mode 100644 index 0000000000..a24403ed9b --- /dev/null +++ b/tests/ssh/config/server/unknown_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIax81dU/Xw3tcLohAa67FdBFtPGU8YuP7n8IHKP16Dh docker-py integration tests unknown diff --git a/tests/ssh/connect_test.py b/tests/ssh/connect_test.py new file mode 100644 index 0000000000..8780e3b8bd --- /dev/null +++ b/tests/ssh/connect_test.py @@ -0,0 +1,24 @@ +import os +import unittest + +import paramiko.ssh_exception +import pytest + +import docker + +from .base import TEST_API_VERSION + + +class SSHConnectionTest(unittest.TestCase): + @pytest.mark.skipif('UNKNOWN_DOCKER_SSH_HOST' not in os.environ, + reason='Unknown Docker SSH host not configured') + def test_ssh_unknown_host(self): + with self.assertRaises(paramiko.ssh_exception.SSHException) as cm: + docker.APIClient( + version=TEST_API_VERSION, + timeout=60, + # test only valid with Paramiko + use_ssh_client=False, + base_url=os.environ['UNKNOWN_DOCKER_SSH_HOST'], + ) + self.assertIn('not found in known_hosts', str(cm.exception)) diff --git a/tests/test.py b/tests/test.py deleted file mode 100644 index 2e3b652b0b..0000000000 --- a/tests/test.py +++ /dev/null @@ -1,2593 +0,0 @@ -# Copyright 2013 dotCloud inc. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import datetime -import gzip -import io -import json -import os -import re -import shutil -import signal -import socket -import sys -import tarfile -import tempfile -import threading -import time -import unittest -import warnings -import random - -import docker -import requests -import six - -import base -import fake_api - -try: - from unittest import mock -except ImportError: - import mock - - -DEFAULT_TIMEOUT_SECONDS = docker.client.constants.DEFAULT_TIMEOUT_SECONDS - -warnings.simplefilter('error') -warnings.filterwarnings('error') -create_host_config = docker.utils.create_host_config - - -def response(status_code=200, content='', headers=None, reason=None, elapsed=0, - request=None): - res = requests.Response() - res.status_code = status_code - if not isinstance(content, six.binary_type): - content = json.dumps(content).encode('ascii') - res._content = content - res.headers = requests.structures.CaseInsensitiveDict(headers or {}) - res.reason = reason - res.elapsed = datetime.timedelta(elapsed) - res.request = request - return res - - -def fake_resolve_authconfig(authconfig, registry=None): - return None - - -def fake_resp(url, data=None, **kwargs): - status_code, content = fake_api.fake_responses[url]() - return response(status_code=status_code, content=content) - - -fake_request = mock.Mock(side_effect=fake_resp) -url_prefix = 'http+docker://localunixsocket/v{0}/'.format( - docker.client.constants.DEFAULT_DOCKER_API_VERSION) - - -class Cleanup(object): - if sys.version_info < (2, 7): - # Provide a basic implementation of addCleanup for Python < 2.7 - def __init__(self, *args, **kwargs): - super(Cleanup, self).__init__(*args, **kwargs) - self._cleanups = [] - - def tearDown(self): - super(Cleanup, self).tearDown() - ok = True - while self._cleanups: - fn, args, kwargs = self._cleanups.pop(-1) - try: - fn(*args, **kwargs) - except KeyboardInterrupt: - raise - except: - ok = False - if not ok: - raise - - def addCleanup(self, function, *args, **kwargs): - self._cleanups.append((function, args, kwargs)) - - -@mock.patch.multiple('docker.Client', get=fake_request, post=fake_request, - put=fake_request, delete=fake_request) -class DockerClientTest(Cleanup, base.BaseTestCase): - def setUp(self): - self.client = docker.Client() - # Force-clear authconfig to avoid tampering with the tests - self.client._cfg = {'Configs': {}} - - def tearDown(self): - self.client.close() - - def assertIn(self, object, collection): - if six.PY2 and sys.version_info[1] <= 6: - return self.assertTrue(object in collection) - return super(DockerClientTest, self).assertIn(object, collection) - - def base_create_payload(self, img='busybox', cmd=None): - if not cmd: - cmd = ['true'] - return {"Tty": False, "Image": img, "Cmd": cmd, - "AttachStdin": False, - "AttachStderr": True, "AttachStdout": True, - "StdinOnce": False, - "OpenStdin": False, "NetworkDisabled": False, - } - - def test_ctor(self): - try: - docker.Client(version=1.12) - except Exception as e: - self.assertTrue(isinstance(e, docker.errors.DockerException)) - if not six.PY3: - self.assertEqual( - str(e), - 'Version parameter must be a string or None. Found float' - ) - - ######################### - # INFORMATION TESTS # - ######################### - def test_version(self): - try: - self.client.version() - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'version', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_retrieve_server_version(self): - client = docker.Client(version="auto") - self.assertTrue(isinstance(client._version, six.string_types)) - self.assertFalse(client._version == "auto") - client.close() - - def test_auto_retrieve_server_version(self): - try: - version = self.client._retrieve_server_version() - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.assertTrue(isinstance(version, six.string_types)) - - def test_info(self): - try: - self.client.info() - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'info', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_search(self): - try: - self.client.search('busybox') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/search', - params={'term': 'busybox'}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_image_viz(self): - try: - self.client.images('busybox', viz=True) - self.fail('Viz output should not be supported!') - except Exception: - pass - - def test_events(self): - try: - self.client.events() - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'events', - params={'since': None, 'until': None, 'filters': None}, - stream=True - ) - - def test_events_with_since_until(self): - ts = 1356048000 - now = datetime.datetime.fromtimestamp(ts) - since = now - datetime.timedelta(seconds=10) - until = now + datetime.timedelta(seconds=10) - try: - self.client.events(since=since, until=until) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'events', - params={ - 'since': ts - 10, - 'until': ts + 10, - 'filters': None - }, - stream=True - ) - - def test_events_with_filters(self): - filters = {'event': ['die', 'stop'], - 'container': fake_api.FAKE_CONTAINER_ID} - try: - self.client.events(filters=filters) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - expected_filters = docker.utils.convert_filters(filters) - fake_request.assert_called_with( - url_prefix + 'events', - params={ - 'since': None, - 'until': None, - 'filters': expected_filters - }, - stream=True - ) - - ################### - # LISTING TESTS # - ################### - - def test_images(self): - try: - self.client.images(all=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - fake_request.assert_called_with( - url_prefix + 'images/json', - params={'filter': None, 'only_ids': 0, 'all': 1}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_images_quiet(self): - try: - self.client.images(all=True, quiet=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - fake_request.assert_called_with( - url_prefix + 'images/json', - params={'filter': None, 'only_ids': 1, 'all': 1}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_image_ids(self): - try: - self.client.images(quiet=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/json', - params={'filter': None, 'only_ids': 1, 'all': 0}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_images_filters(self): - try: - self.client.images(filters={'dangling': True}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/json', - params={'filter': None, 'only_ids': 0, 'all': 0, - 'filters': '{"dangling": ["true"]}'}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_list_containers(self): - try: - self.client.containers(all=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/json', - params={ - 'all': 1, - 'since': None, - 'size': 0, - 'limit': -1, - 'trunc_cmd': 0, - 'before': None - }, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - ##################### - # CONTAINER TESTS # - ##################### - - def test_create_container(self): - try: - self.client.create_container('busybox', 'true') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", "Cmd": ["true"], - "AttachStdin": false, - "AttachStderr": true, "AttachStdout": true, - "StdinOnce": false, - "OpenStdin": false, "NetworkDisabled": false}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_binds(self): - mount_dest = '/mnt' - - try: - self.client.create_container('busybox', ['ls', mount_dest], - volumes=[mount_dest]) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["ls", "/mnt"], "AttachStdin": false, - "Volumes": {"/mnt": {}}, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_volume_string(self): - mount_dest = '/mnt' - - try: - self.client.create_container('busybox', ['ls', mount_dest], - volumes=mount_dest) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["ls", "/mnt"], "AttachStdin": false, - "Volumes": {"/mnt": {}}, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_ports(self): - try: - self.client.create_container('busybox', 'ls', - ports=[1111, (2222, 'udp'), (3333,)]) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["ls"], "AttachStdin": false, - "ExposedPorts": { - "1111/tcp": {}, - "2222/udp": {}, - "3333/tcp": {} - }, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_entrypoint(self): - try: - self.client.create_container('busybox', 'hello', - entrypoint='cowsay') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["hello"], "AttachStdin": false, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false, - "Entrypoint": "cowsay"}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_cpu_shares(self): - try: - self.client.create_container('busybox', 'ls', - cpu_shares=5) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["ls"], "AttachStdin": false, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false, - "CpuShares": 5}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_cpuset(self): - try: - self.client.create_container('busybox', 'ls', - cpuset='0,1') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["ls"], "AttachStdin": false, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false, - "Cpuset": "0,1", - "CpusetCpus": "0,1"}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_working_dir(self): - try: - self.client.create_container('busybox', 'ls', - working_dir='/root') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", - "Cmd": ["ls"], "AttachStdin": false, - "AttachStderr": true, - "AttachStdout": true, "OpenStdin": false, - "StdinOnce": false, - "NetworkDisabled": false, - "WorkingDir": "/root"}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_stdin_open(self): - try: - self.client.create_container('busybox', 'true', stdin_open=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", "Cmd": ["true"], - "AttachStdin": true, - "AttachStderr": true, "AttachStdout": true, - "StdinOnce": true, - "OpenStdin": true, "NetworkDisabled": false}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_with_volumes_from(self): - vol_names = ['foo', 'bar'] - try: - self.client.create_container('busybox', 'true', - volumes_from=vol_names) - except docker.errors.DockerException as e: - self.assertTrue( - docker.utils.compare_version('1.10', self.client._version) >= 0 - ) - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'], - ','.join(vol_names)) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_create_container_empty_volumes_from(self): - try: - self.client.create_container('busybox', 'true', volumes_from=[]) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - data = json.loads(args[1]['data']) - self.assertTrue('VolumesFrom' not in data) - - def test_create_named_container(self): - try: - self.client.create_container('busybox', 'true', - name='marisa-kirisame') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], - url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), - json.loads(''' - {"Tty": false, "Image": "busybox", "Cmd": ["true"], - "AttachStdin": false, - "AttachStderr": true, "AttachStdout": true, - "StdinOnce": false, - "OpenStdin": false, "NetworkDisabled": false}''')) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'}) - - def test_create_container_with_mem_limit_as_int(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - mem_limit=128.0 - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - data = json.loads(args[1]['data']) - self.assertEqual(data['HostConfig']['Memory'], 128.0) - - def test_create_container_with_mem_limit_as_string(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - mem_limit='128' - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - data = json.loads(args[1]['data']) - self.assertEqual(data['HostConfig']['Memory'], 128.0) - - def test_create_container_with_mem_limit_as_string_with_k_unit(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - mem_limit='128k' - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - data = json.loads(args[1]['data']) - self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024) - - def test_create_container_with_mem_limit_as_string_with_m_unit(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - mem_limit='128m' - ) - ) - - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - data = json.loads(args[1]['data']) - self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024) - - def test_create_container_with_mem_limit_as_string_with_g_unit(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - mem_limit='128g' - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - data = json.loads(args[1]['data']) - self.assertEqual( - data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024 - ) - - def test_create_container_with_mem_limit_as_string_with_wrong_value(self): - self.assertRaises( - docker.errors.DockerException, create_host_config, mem_limit='128p' - ) - - self.assertRaises( - docker.errors.DockerException, create_host_config, mem_limit='1f28' - ) - - def test_start_container(self): - try: - self.client.start(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - raise e - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual( - args[0][0], - url_prefix + 'containers/3cc2351ab11b/start' - ) - self.assertEqual(json.loads(args[1]['data']), {}) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_start_container_none(self): - try: - self.client.start(container=None) - except ValueError as e: - self.assertEqual(str(e), 'image or container param is undefined') - else: - self.fail('Command should raise ValueError') - - try: - self.client.start(None) - except ValueError as e: - self.assertEqual(str(e), 'image or container param is undefined') - else: - self.fail('Command should raise ValueError') - - def test_start_container_regression_573(self): - try: - self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_create_container_with_lxc_conf(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - lxc_conf={'lxc.conf.k': 'lxc.conf.value'} - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual( - args[0][0], - url_prefix + 'containers/create' - ) - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['LxcConf'] = [ - {"Value": "lxc.conf.value", "Key": "lxc.conf.k"} - ] - - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], - {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_lxc_conf_compat(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}] - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['LxcConf'] = [ - {"Value": "lxc.conf.value", "Key": "lxc.conf.k"} - ] - self.assertEqual( - json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_binds_ro(self): - try: - mount_dest = '/mnt' - mount_origin = '/tmp' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - binds={mount_origin: { - "bind": mount_dest, - "ro": True - }} - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + - 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_binds_rw(self): - try: - mount_dest = '/mnt' - mount_origin = '/tmp' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - binds={mount_origin: { - "bind": mount_dest, - "ro": False - }} - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + - 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_binds_mode(self): - try: - mount_dest = '/mnt' - mount_origin = '/tmp' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - binds={mount_origin: { - "bind": mount_dest, - "mode": "z", - }} - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + - 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_binds_mode_and_ro_error(self): - try: - mount_dest = '/mnt' - mount_origin = '/tmp' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - binds={mount_origin: { - "bind": mount_dest, - "mode": "z", - "ro": True, - }} - ) - ) - except ValueError: - return - - self.fail('Command should raise ValueError') - - def test_create_container_with_binds_list(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - binds=[ - "/tmp:/mnt/1:ro", - "/tmp:/mnt/2", - ], - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + - 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Binds'] = [ - "/tmp:/mnt/1:ro", - "/tmp:/mnt/2", - ] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_port_binds(self): - self.maxDiff = None - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - port_bindings={ - 1111: None, - 2222: 2222, - '3333/udp': (3333,), - 4444: ('127.0.0.1',), - 5555: ('127.0.0.1', 5555), - 6666: [('127.0.0.1',), ('192.168.0.1',)] - } - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - data = json.loads(args[1]['data']) - port_bindings = data['HostConfig']['PortBindings'] - self.assertTrue('1111/tcp' in port_bindings) - self.assertTrue('2222/tcp' in port_bindings) - self.assertTrue('3333/udp' in port_bindings) - self.assertTrue('4444/tcp' in port_bindings) - self.assertTrue('5555/tcp' in port_bindings) - self.assertTrue('6666/tcp' in port_bindings) - self.assertEqual( - [{"HostPort": "", "HostIp": ""}], - port_bindings['1111/tcp'] - ) - self.assertEqual( - [{"HostPort": "2222", "HostIp": ""}], - port_bindings['2222/tcp'] - ) - self.assertEqual( - [{"HostPort": "3333", "HostIp": ""}], - port_bindings['3333/udp'] - ) - self.assertEqual( - [{"HostPort": "", "HostIp": "127.0.0.1"}], - port_bindings['4444/tcp'] - ) - self.assertEqual( - [{"HostPort": "5555", "HostIp": "127.0.0.1"}], - port_bindings['5555/tcp'] - ) - self.assertEqual(len(port_bindings['6666/tcp']), 2) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_mac_address(self): - try: - mac_address_expected = "02:42:ac:11:00:0a" - container = self.client.create_container( - 'busybox', ['sleep', '60'], mac_address=mac_address_expected) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - res = self.client.inspect_container(container['Id']) - self.assertEqual(mac_address_expected, - res['NetworkSettings']['MacAddress']) - - def test_create_container_with_links(self): - try: - link_path = 'path' - alias = 'alias' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - links={link_path: alias} - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual( - args[0][0], url_prefix + 'containers/create' - ) - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Links'] = ['path:alias'] - - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - - def test_create_container_with_multiple_links(self): - try: - link_path = 'path' - alias = 'alias' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - links={ - link_path + '1': alias + '1', - link_path + '2': alias + '2' - } - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Links'] = [ - 'path1:alias1', 'path2:alias2' - ] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - - def test_create_container_with_links_as_list_of_tuples(self): - try: - link_path = 'path' - alias = 'alias' - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - links=[(link_path, alias)] - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Links'] = ['path:alias'] - - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - - def test_create_container_privileged(self): - try: - self.client.create_container( - 'busybox', 'true', - host_config=create_host_config(privileged=True) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Privileged'] = True - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_start_container_with_lxc_conf(self): - if six.PY2: - try: - self.client.start( - fake_api.FAKE_CONTAINER_ID, - lxc_conf={'lxc.conf.k': 'lxc.conf.value'} - ) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start( - fake_api.FAKE_CONTAINER_ID, - lxc_conf={'lxc.conf.k': 'lxc.conf.value'} - ) - - def test_start_container_with_lxc_conf_compat(self): - if six.PY2: - try: - self.client.start( - fake_api.FAKE_CONTAINER_ID, - lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}] - ) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start( - fake_api.FAKE_CONTAINER_ID, - lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}] - ) - - def test_start_container_with_binds_ro(self): - mount_dest = '/mnt' - mount_origin = '/tmp' - - if six.PY2: - try: - self.client.start( - fake_api.FAKE_CONTAINER_ID, binds={ - mount_origin: { - "bind": mount_dest, - "ro": True - } - } - ) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start( - fake_api.FAKE_CONTAINER_ID, binds={ - mount_origin: { - "bind": mount_dest, - "ro": True - } - } - ) - - def test_start_container_with_binds_rw(self): - mount_dest = '/mnt' - mount_origin = '/tmp' - if six.PY2: - try: - self.client.start( - fake_api.FAKE_CONTAINER_ID, binds={ - mount_origin: {"bind": mount_dest, "ro": False} - } - ) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start( - fake_api.FAKE_CONTAINER_ID, binds={ - mount_origin: {"bind": mount_dest, "ro": False} - } - ) - - def test_start_container_with_port_binds(self): - self.maxDiff = None - if six.PY2: - try: - self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={ - 1111: None, - 2222: 2222, - '3333/udp': (3333,), - 4444: ('127.0.0.1',), - 5555: ('127.0.0.1', 5555), - 6666: [('127.0.0.1',), ('192.168.0.1',)] - }) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={ - 1111: None, - 2222: 2222, - '3333/udp': (3333,), - 4444: ('127.0.0.1',), - 5555: ('127.0.0.1', 5555), - 6666: [('127.0.0.1',), ('192.168.0.1',)] - }) - - def test_start_container_with_links(self): - # one link - link_path = 'path' - alias = 'alias' - - if six.PY2: - try: - self.client.start(fake_api.FAKE_CONTAINER_ID, - links={link_path: alias}) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start( - fake_api.FAKE_CONTAINER_ID, links={link_path: alias} - ) - - def test_start_container_with_multiple_links(self): - link_path = 'path' - alias = 'alias' - if six.PY2: - try: - self.client.start( - fake_api.FAKE_CONTAINER_ID, - links={ - link_path + '1': alias + '1', - link_path + '2': alias + '2' - } - ) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start( - fake_api.FAKE_CONTAINER_ID, - links={ - link_path + '1': alias + '1', - link_path + '2': alias + '2' - } - ) - - def test_start_container_with_links_as_list_of_tuples(self): - # one link - link_path = 'path' - alias = 'alias' - if six.PY2: - try: - self.client.start(fake_api.FAKE_CONTAINER_ID, - links=[(link_path, alias)]) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start(fake_api.FAKE_CONTAINER_ID, - links=[(link_path, alias)]) - - def test_start_container_privileged(self): - if six.PY2: - try: - self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True) - except DeprecationWarning as e: - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - else: - self.fail('Expected a DeprecationWarning') - else: - with self.assertWarns(DeprecationWarning): - self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True) - - def test_start_container_with_dict_instead_of_id(self): - try: - self.client.start({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual( - args[0][0], - url_prefix + 'containers/3cc2351ab11b/start' - ) - self.assertEqual(json.loads(args[1]['data']), {}) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_restart_policy(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - restart_policy={ - "Name": "always", - "MaximumRetryCount": 0 - } - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['RestartPolicy'] = { - "MaximumRetryCount": 0, "Name": "always" - } - self.assertEqual(json.loads(args[1]['data']), expected_payload) - - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_added_capabilities(self): - try: - self.client.create_container( - 'busybox', 'true', - host_config=create_host_config(cap_add=['MKNOD']) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['CapAdd'] = ['MKNOD'] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_dropped_capabilities(self): - try: - self.client.create_container( - 'busybox', 'true', - host_config=create_host_config(cap_drop=['MKNOD']) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['CapDrop'] = ['MKNOD'] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_devices(self): - try: - self.client.create_container( - 'busybox', 'true', host_config=create_host_config( - devices=['/dev/sda:/dev/xvda:rwm', - '/dev/sdb:/dev/xvdb', - '/dev/sdc'] - ) - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Devices'] = [ - {'CgroupPermissions': 'rwm', - 'PathInContainer': '/dev/xvda', - 'PathOnHost': '/dev/sda'}, - {'CgroupPermissions': 'rwm', - 'PathInContainer': '/dev/xvdb', - 'PathOnHost': '/dev/sdb'}, - {'CgroupPermissions': 'rwm', - 'PathInContainer': '/dev/sdc', - 'PathOnHost': '/dev/sdc'} - ] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_labels_dict(self): - labels_dict = { - six.text_type('foo'): six.text_type('1'), - six.text_type('bar'): six.text_type('2'), - } - try: - self.client.create_container( - 'busybox', 'true', - labels=labels_dict, - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_labels_list(self): - labels_list = [ - six.text_type('foo'), - six.text_type('bar'), - ] - labels_dict = { - six.text_type('foo'): six.text_type(), - six.text_type('bar'): six.text_type(), - } - try: - self.client.create_container( - 'busybox', 'true', - labels=labels_list, - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + 'containers/create') - self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict) - self.assertEqual( - args[1]['headers'], {'Content-Type': 'application/json'} - ) - self.assertEqual( - args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS - ) - - def test_create_container_with_named_volume(self): - try: - mount_dest = '/mnt' - volume_name = 'name' - self.client.create_container( - 'busybox', 'true', - host_config=create_host_config( - binds={volume_name: { - "bind": mount_dest, - "ro": False - }}), - volume_driver='foodriver', - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual(args[0][0], url_prefix + - 'containers/create') - expected_payload = self.base_create_payload() - expected_payload['VolumeDriver'] = 'foodriver' - expected_payload['HostConfig'] = create_host_config() - expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"] - self.assertEqual(json.loads(args[1]['data']), expected_payload) - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - self.assertEqual( - args[1]['timeout'], - DEFAULT_TIMEOUT_SECONDS - ) - - def test_resize_container(self): - try: - self.client.resize( - {'Id': fake_api.FAKE_CONTAINER_ID}, - height=15, - width=120 - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/resize', - params={'h': 15, 'w': 120}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_rename_container(self): - try: - self.client.rename( - {'Id': fake_api.FAKE_CONTAINER_ID}, - name='foobar' - ) - except Exception as e: - self.fail('Command shold not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/rename', - params={'name': 'foobar'}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_wait(self): - try: - self.client.wait(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/wait', - timeout=None - ) - - def test_wait_with_dict_instead_of_id(self): - try: - self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - raise e - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/wait', - timeout=None - ) - - def _socket_path_for_client_session(self, client): - socket_adapter = client.get_adapter('http+docker://') - return socket_adapter.socket_path - - def test_url_compatibility_unix(self): - c = docker.Client(base_url="unix://socket") - - assert self._socket_path_for_client_session(c) == '/socket' - - def test_url_compatibility_unix_triple_slash(self): - c = docker.Client(base_url="unix:///socket") - - assert self._socket_path_for_client_session(c) == '/socket' - - def test_url_compatibility_http_unix_triple_slash(self): - c = docker.Client(base_url="http+unix:///socket") - - assert self._socket_path_for_client_session(c) == '/socket' - - def test_url_compatibility_http(self): - c = docker.Client(base_url="http://hostname:1234") - - assert c.base_url == "http://hostname:1234" - - def test_url_compatibility_tcp(self): - c = docker.Client(base_url="tcp://hostname:1234") - - assert c.base_url == "http://hostname:1234" - - def test_logs(self): - try: - logs = self.client.logs(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/logs', - params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, - 'tail': 'all'}, - timeout=DEFAULT_TIMEOUT_SECONDS, - stream=False - ) - - self.assertEqual( - logs, - 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii') - ) - - def test_logs_with_dict_instead_of_id(self): - try: - logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/logs', - params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, - 'tail': 'all'}, - timeout=DEFAULT_TIMEOUT_SECONDS, - stream=False - ) - - self.assertEqual( - logs, - 'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii') - ) - - def test_log_streaming(self): - try: - self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/logs', - params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1, - 'tail': 'all'}, - timeout=DEFAULT_TIMEOUT_SECONDS, - stream=True - ) - - def test_log_tail(self): - try: - self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, tail=10) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/logs', - params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, - 'tail': 10}, - timeout=DEFAULT_TIMEOUT_SECONDS, - stream=False - ) - - def test_diff(self): - try: - self.client.diff(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/changes', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_diff_with_dict_instead_of_id(self): - try: - self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/changes', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_port(self): - try: - self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/json', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_stop_container(self): - timeout = 2 - try: - self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/stop', - params={'t': timeout}, - timeout=(DEFAULT_TIMEOUT_SECONDS + timeout) - ) - - def test_stop_container_with_dict_instead_of_id(self): - timeout = 2 - try: - self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID}, - timeout=timeout) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/stop', - params={'t': timeout}, - timeout=(DEFAULT_TIMEOUT_SECONDS + timeout) - ) - - def test_exec_create(self): - try: - self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1']) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual( - args[0][0], url_prefix + 'containers/{0}/exec'.format( - fake_api.FAKE_CONTAINER_ID - ) - ) - - self.assertEqual( - json.loads(args[1]['data']), { - 'Tty': False, - 'AttachStdout': True, - 'Container': fake_api.FAKE_CONTAINER_ID, - 'Cmd': ['ls', '-1'], - 'Privileged': False, - 'AttachStdin': False, - 'AttachStderr': True, - 'User': '' - } - ) - - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_exec_start(self): - try: - self.client.exec_start(fake_api.FAKE_EXEC_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual( - args[0][0], url_prefix + 'exec/{0}/start'.format( - fake_api.FAKE_EXEC_ID - ) - ) - - self.assertEqual( - json.loads(args[1]['data']), { - 'Tty': False, - 'Detach': False, - } - ) - - self.assertEqual(args[1]['headers'], - {'Content-Type': 'application/json'}) - - def test_exec_inspect(self): - try: - self.client.exec_inspect(fake_api.FAKE_EXEC_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual( - args[0][0], url_prefix + 'exec/{0}/json'.format( - fake_api.FAKE_EXEC_ID - ) - ) - - def test_exec_resize(self): - try: - self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID), - params={'h': 20, 'w': 60}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_pause_container(self): - try: - self.client.pause(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/pause', - timeout=(DEFAULT_TIMEOUT_SECONDS) - ) - - def test_unpause_container(self): - try: - self.client.unpause(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/unpause', - timeout=(DEFAULT_TIMEOUT_SECONDS) - ) - - def test_kill_container(self): - try: - self.client.kill(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/kill', - params={}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_kill_container_with_dict_instead_of_id(self): - try: - self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/kill', - params={}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_kill_container_with_signal(self): - try: - self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/kill', - params={'signal': signal.SIGTERM}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_restart_container(self): - try: - self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2) - except Exception as e: - self.fail('Command should not raise exception : {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/restart', - params={'t': 2}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_restart_container_with_dict_instead_of_id(self): - try: - self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/restart', - params={'t': 2}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_remove_container(self): - try: - self.client.remove_container(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b', - params={'v': False, 'link': False, 'force': False}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_remove_container_with_dict_instead_of_id(self): - try: - self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b', - params={'v': False, 'link': False, 'force': False}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_remove_link(self): - try: - self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b', - params={'v': False, 'link': True, 'force': False}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_export(self): - try: - self.client.export(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/export', - stream=True, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_export_with_dict_instead_of_id(self): - try: - self.client.export({'Id': fake_api.FAKE_CONTAINER_ID}) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/export', - stream=True, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_inspect_container(self): - try: - self.client.inspect_container(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/json', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_inspect_container_undefined_id(self): - for arg in None, '', {True: True}: - try: - self.client.inspect_container(arg) - except docker.errors.NullResource as e: - self.assertEqual( - e.args[0], 'image or container param is undefined' - ) - else: - self.fail('Command expected NullResource exception') - - def test_container_stats(self): - try: - self.client.stats(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'containers/3cc2351ab11b/stats', - timeout=60, - stream=True - ) - - ################## - # IMAGES TESTS # - ################## - - def test_pull(self): - try: - self.client.pull('joffrey/test001') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual( - args[0][0], - url_prefix + 'images/create' - ) - self.assertEqual( - args[1]['params'], - {'tag': None, 'fromImage': 'joffrey/test001'} - ) - self.assertFalse(args[1]['stream']) - - def test_pull_stream(self): - try: - self.client.pull('joffrey/test001', stream=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - args = fake_request.call_args - self.assertEqual( - args[0][0], - url_prefix + 'images/create' - ) - self.assertEqual( - args[1]['params'], - {'tag': None, 'fromImage': 'joffrey/test001'} - ) - self.assertTrue(args[1]['stream']) - - def test_commit(self): - try: - self.client.commit(fake_api.FAKE_CONTAINER_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'commit', - data='{}', - headers={'Content-Type': 'application/json'}, - params={ - 'repo': None, - 'comment': None, - 'tag': None, - 'container': '3cc2351ab11b', - 'author': None - }, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_remove_image(self): - try: - self.client.remove_image(fake_api.FAKE_IMAGE_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/e9aa60c60128', - params={'force': False, 'noprune': False}, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_image_history(self): - try: - self.client.history(fake_api.FAKE_IMAGE_NAME) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/test_image/history', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_import_image(self): - try: - self.client.import_image( - fake_api.FAKE_TARBALL_PATH, - repository=fake_api.FAKE_REPO_NAME, - tag=fake_api.FAKE_TAG_NAME - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/create', - params={ - 'repo': fake_api.FAKE_REPO_NAME, - 'tag': fake_api.FAKE_TAG_NAME, - 'fromSrc': fake_api.FAKE_TARBALL_PATH - }, - data=None, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_import_image_from_bytes(self): - stream = (i for i in range(0, 100)) - try: - self.client.import_image( - stream, - repository=fake_api.FAKE_REPO_NAME, - tag=fake_api.FAKE_TAG_NAME - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/create', - params={ - 'repo': fake_api.FAKE_REPO_NAME, - 'tag': fake_api.FAKE_TAG_NAME, - 'fromSrc': '-', - }, - headers={ - 'Content-Type': 'application/tar', - }, - data=stream, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_import_image_from_image(self): - try: - self.client.import_image( - image=fake_api.FAKE_IMAGE_NAME, - repository=fake_api.FAKE_REPO_NAME, - tag=fake_api.FAKE_TAG_NAME - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/create', - params={ - 'repo': fake_api.FAKE_REPO_NAME, - 'tag': fake_api.FAKE_TAG_NAME, - 'fromImage': fake_api.FAKE_IMAGE_NAME - }, - data=None, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_inspect_image(self): - try: - self.client.inspect_image(fake_api.FAKE_IMAGE_NAME) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/test_image/json', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_inspect_image_undefined_id(self): - for arg in None, '', {True: True}: - try: - self.client.inspect_image(arg) - except docker.errors.NullResource as e: - self.assertEqual( - e.args[0], 'image or container param is undefined' - ) - else: - self.fail('Command expected NullResource exception') - - def test_insert_image(self): - try: - self.client.insert(fake_api.FAKE_IMAGE_NAME, - fake_api.FAKE_URL, fake_api.FAKE_PATH) - except docker.errors.DeprecatedMethod as e: - self.assertTrue( - docker.utils.compare_version('1.12', self.client._version) >= 0 - ) - return - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/test_image/insert', - params={ - 'url': fake_api.FAKE_URL, - 'path': fake_api.FAKE_PATH - }, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_push_image(self): - try: - with mock.patch('docker.auth.auth.resolve_authconfig', - fake_resolve_authconfig): - self.client.push(fake_api.FAKE_IMAGE_NAME) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/test_image/push', - params={ - 'tag': None - }, - data='{}', - headers={'Content-Type': 'application/json'}, - stream=False, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_push_image_with_tag(self): - try: - with mock.patch('docker.auth.auth.resolve_authconfig', - fake_resolve_authconfig): - self.client.push( - fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/test_image/push', - params={ - 'tag': fake_api.FAKE_TAG_NAME, - }, - data='{}', - headers={'Content-Type': 'application/json'}, - stream=False, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_push_image_stream(self): - try: - with mock.patch('docker.auth.auth.resolve_authconfig', - fake_resolve_authconfig): - self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/test_image/push', - params={ - 'tag': None - }, - data='{}', - headers={'Content-Type': 'application/json'}, - stream=True, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_tag_image(self): - try: - self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/e9aa60c60128/tag', - params={ - 'tag': None, - 'repo': 'repo', - 'force': 0 - }, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_tag_image_tag(self): - try: - self.client.tag( - fake_api.FAKE_IMAGE_ID, - fake_api.FAKE_REPO_NAME, - tag=fake_api.FAKE_TAG_NAME - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/e9aa60c60128/tag', - params={ - 'tag': 'tag', - 'repo': 'repo', - 'force': 0 - }, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_tag_image_force(self): - try: - self.client.tag( - fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/e9aa60c60128/tag', - params={ - 'tag': None, - 'repo': 'repo', - 'force': 1 - }, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_get_image(self): - try: - self.client.get_image(fake_api.FAKE_IMAGE_ID) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/e9aa60c60128/get', - stream=True, - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - def test_load_image(self): - try: - self.client.load_image('Byte Stream....') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - fake_request.assert_called_with( - url_prefix + 'images/load', - data='Byte Stream....', - timeout=DEFAULT_TIMEOUT_SECONDS - ) - - ################# - # BUILDER TESTS # - ################# - - def test_build_container(self): - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - try: - self.client.build(fileobj=script) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_pull(self): - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - try: - self.client.build(fileobj=script, pull=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_stream(self): - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - try: - self.client.build(fileobj=script, stream=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_custom_context(self): - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - context = docker.utils.mkbuildcontext(script) - try: - self.client.build(fileobj=context, custom_context=True) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_custom_context_gzip(self): - script = io.BytesIO('\n'.join([ - 'FROM busybox', - 'MAINTAINER docker-py', - 'RUN mkdir -p /tmp/test', - 'EXPOSE 8080', - 'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz' - ' /tmp/silence.tar.gz' - ]).encode('ascii')) - context = docker.utils.mkbuildcontext(script) - gz_context = gzip.GzipFile(fileobj=context) - try: - self.client.build( - fileobj=gz_context, - custom_context=True, - encoding="gzip" - ) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_remote_with_registry_auth(self): - try: - self.client._auth_configs = { - 'https://example.com': { - 'user': 'example', - 'password': 'example', - 'email': 'example@example.com' - } - } - - self.client.build(path='https://github.com/docker-library/mongo') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_with_named_dockerfile(self): - try: - self.client.build('.', dockerfile='nameddockerfile') - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_with_container_limits(self): - try: - self.client.build('.', container_limits={ - 'memory': 1024 * 1024, - 'cpusetcpus': 1, - 'cpushares': 1000, - 'memswap': 1024 * 1024 * 8 - }) - except Exception as e: - self.fail('Command should not raise exception: {0}'.format(e)) - - def test_build_container_invalid_container_limits(self): - self.assertRaises( - docker.errors.DockerException, - lambda: self.client.build('.', container_limits={ - 'foo': 'bar' - }) - ) - - ####################### - # PY SPECIFIC TESTS # - ####################### - - def test_load_config_no_file(self): - folder = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, folder) - cfg = docker.auth.load_config(folder) - self.assertTrue(cfg is not None) - - def test_load_config(self): - folder = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, folder) - dockercfg_path = os.path.join(folder, '.dockercfg') - with open(dockercfg_path, 'w') as f: - auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') - f.write('auth = {0}\n'.format(auth_)) - f.write('email = sakuya@scarlet.net') - cfg = docker.auth.load_config(dockercfg_path) - self.assertTrue(docker.auth.INDEX_URL in cfg) - self.assertNotEqual(cfg[docker.auth.INDEX_URL], None) - cfg = cfg[docker.auth.INDEX_URL] - self.assertEqual(cfg['username'], 'sakuya') - self.assertEqual(cfg['password'], 'izayoi') - self.assertEqual(cfg['email'], 'sakuya@scarlet.net') - self.assertEqual(cfg.get('auth'), None) - - def test_load_config_with_random_name(self): - folder = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, folder) - - dockercfg_path = os.path.join(folder, - '.{0}.dockercfg'.format( - random.randrange(100000))) - registry = 'https://your.private.registry.io' - auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') - config = { - registry: { - 'auth': '{0}'.format(auth_), - 'email': 'sakuya@scarlet.net' - } - } - - with open(dockercfg_path, 'w') as f: - f.write(json.dumps(config)) - - cfg = docker.auth.load_config(dockercfg_path) - self.assertTrue(registry in cfg) - self.assertNotEqual(cfg[registry], None) - cfg = cfg[registry] - self.assertEqual(cfg['username'], 'sakuya') - self.assertEqual(cfg['password'], 'izayoi') - self.assertEqual(cfg['email'], 'sakuya@scarlet.net') - self.assertEqual(cfg.get('auth'), None) - - def test_tar_with_excludes(self): - base = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, base) - for d in ['test/foo', 'bar']: - os.makedirs(os.path.join(base, d)) - for f in ['a.txt', 'b.py', 'other.png']: - with open(os.path.join(base, d, f), 'w') as f: - f.write("content") - - for exclude, names in ( - (['*.py'], ['bar', 'bar/a.txt', 'bar/other.png', - 'test', 'test/foo', 'test/foo/a.txt', - 'test/foo/other.png']), - (['*.png', 'bar'], ['test', 'test/foo', 'test/foo/a.txt', - 'test/foo/b.py']), - (['test/foo', 'a.txt'], ['bar', 'bar/a.txt', 'bar/b.py', - 'bar/other.png', 'test']), - ): - with docker.utils.tar(base, exclude=exclude) as archive: - tar = tarfile.open(fileobj=archive) - self.assertEqual(sorted(tar.getnames()), names) - - def test_tar_with_empty_directory(self): - base = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, base) - for d in ['foo', 'bar']: - os.makedirs(os.path.join(base, d)) - with docker.utils.tar(base) as archive: - tar = tarfile.open(fileobj=archive) - self.assertEqual(sorted(tar.getnames()), ['bar', 'foo']) - - def test_tar_with_file_symlinks(self): - base = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, base) - with open(os.path.join(base, 'foo'), 'w') as f: - f.write("content") - os.makedirs(os.path.join(base, 'bar')) - os.symlink('../foo', os.path.join(base, 'bar/foo')) - with docker.utils.tar(base) as archive: - tar = tarfile.open(fileobj=archive) - self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo']) - - def test_tar_with_directory_symlinks(self): - base = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, base) - for d in ['foo', 'bar']: - os.makedirs(os.path.join(base, d)) - os.symlink('../foo', os.path.join(base, 'bar/foo')) - with docker.utils.tar(base) as archive: - tar = tarfile.open(fileobj=archive) - self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo']) - - ####################### - # HOST CONFIG TESTS # - ####################### - - def test_create_host_config_secopt(self): - security_opt = ['apparmor:test_profile'] - result = create_host_config(security_opt=security_opt) - self.assertIn('SecurityOpt', result) - self.assertEqual(result['SecurityOpt'], security_opt) - - self.assertRaises( - docker.errors.DockerException, create_host_config, - security_opt='wrong' - ) - - -class StreamTest(Cleanup, base.BaseTestCase): - - def setUp(self): - socket_dir = tempfile.mkdtemp() - self.build_context = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, socket_dir) - self.addCleanup(shutil.rmtree, self.build_context) - self.socket_file = os.path.join(socket_dir, 'test_sock.sock') - self.server_socket = self._setup_socket() - self.stop_server = False - server_thread = threading.Thread(target=self.run_server) - server_thread.setDaemon(True) - server_thread.start() - self.response = None - self.request_handler = None - self.addCleanup(server_thread.join) - self.addCleanup(self.stop) - - def stop(self): - self.stop_server = True - - def _setup_socket(self): - server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - server_sock.bind(self.socket_file) - # Non-blocking mode so that we can shut the test down easily - server_sock.setblocking(0) - server_sock.listen(5) - return server_sock - - def run_server(self): - try: - while not self.stop_server: - try: - connection, client_address = self.server_socket.accept() - except socket.error: - # Probably no connection to accept yet - time.sleep(0.01) - continue - - connection.setblocking(1) - try: - self.request_handler(connection) - finally: - connection.close() - finally: - self.server_socket.close() - - def early_response_sending_handler(self, connection): - data = b'' - headers = None - - connection.sendall(self.response) - while not headers: - data += connection.recv(2048) - parts = data.split(b'\r\n\r\n', 1) - if len(parts) == 2: - headers, data = parts - - mo = re.search(r'Content-Length: ([0-9]+)', headers.decode()) - assert mo - content_length = int(mo.group(1)) - - while True: - if len(data) >= content_length: - break - - data += connection.recv(2048) - - def test_early_stream_response(self): - self.request_handler = self.early_response_sending_handler - lines = [] - for i in range(0, 50): - line = str(i).encode() - lines += [('%x' % len(line)).encode(), line] - lines.append(b'0') - lines.append(b'') - - self.response = ( - b'HTTP/1.1 200 OK\r\n' - b'Transfer-Encoding: chunked\r\n' - b'\r\n' - ) + b'\r\n'.join(lines) - - with docker.Client(base_url="http+unix://" + self.socket_file) \ - as client: - for i in range(5): - try: - stream = client.build( - path=self.build_context, - stream=True - ) - break - except requests.ConnectionError as e: - if i == 4: - raise e - - self.assertEqual(list(stream), [ - str(i).encode() for i in range(50)]) - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/api_build_test.py b/tests/unit/api_build_test.py new file mode 100644 index 0000000000..01958c3e1f --- /dev/null +++ b/tests/unit/api_build_test.py @@ -0,0 +1,252 @@ +import gzip +import io +import shutil + +import pytest + +import docker +from docker import auth, errors +from docker.api.build import process_dockerfile + +from ..helpers import make_tree +from .api_test import BaseAPIClientTest, fake_request, url_prefix + + +class BuildTest(BaseAPIClientTest): + def test_build_container(self): + script = io.BytesIO( + "\n".join( + [ + "FROM busybox", + "RUN mkdir -p /tmp/test", + "EXPOSE 8080", + "ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz" + " /tmp/silence.tar.gz", + ] + ).encode("ascii") + ) + + self.client.build(fileobj=script) + + def test_build_container_pull(self): + script = io.BytesIO( + "\n".join( + [ + "FROM busybox", + "RUN mkdir -p /tmp/test", + "EXPOSE 8080", + "ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz" + " /tmp/silence.tar.gz", + ] + ).encode("ascii") + ) + + self.client.build(fileobj=script, pull=True) + + def test_build_container_custom_context(self): + script = io.BytesIO( + "\n".join( + [ + "FROM busybox", + "RUN mkdir -p /tmp/test", + "EXPOSE 8080", + "ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz" + " /tmp/silence.tar.gz", + ] + ).encode("ascii") + ) + context = docker.utils.mkbuildcontext(script) + + self.client.build(fileobj=context, custom_context=True) + + def test_build_container_custom_context_gzip(self): + script = io.BytesIO( + "\n".join( + [ + "FROM busybox", + "RUN mkdir -p /tmp/test", + "EXPOSE 8080", + "ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz" + " /tmp/silence.tar.gz", + ] + ).encode("ascii") + ) + context = docker.utils.mkbuildcontext(script) + gz_context = gzip.GzipFile(fileobj=context) + + self.client.build(fileobj=gz_context, custom_context=True, encoding="gzip") + + def test_build_remote_with_registry_auth(self): + self.client._auth_configs = auth.AuthConfig( + { + "auths": { + "https://example.com": { + "user": "example", + "password": "example", + "email": "example@example.com", + } + } + } + ) + + expected_params = { + "t": None, + "q": False, + "dockerfile": None, + "rm": False, + "nocache": False, + "pull": False, + "forcerm": False, + "remote": "https://github.com/docker-library/mongo", + } + expected_headers = { + "X-Registry-Config": auth.encode_header(self.client._auth_configs.auths) + } + + self.client.build(path="https://github.com/docker-library/mongo") + + fake_request.assert_called_with( + "POST", + f"{url_prefix}build", + stream=True, + data=None, + headers=expected_headers, + params=expected_params, + timeout=None, + ) + + def test_build_container_with_named_dockerfile(self): + self.client.build(".", dockerfile="nameddockerfile") + + def test_build_with_invalid_tag(self): + with pytest.raises(errors.DockerException): + self.client.build(".", tag="https://example.com") + + def test_build_container_with_container_limits(self): + self.client.build( + ".", + container_limits={ + "memory": 1024 * 1024, + "cpusetcpus": 1, + "cpushares": 1000, + "memswap": 1024 * 1024 * 8, + }, + ) + + def test_build_container_invalid_container_limits(self): + with pytest.raises(docker.errors.DockerException): + self.client.build(".", container_limits={"foo": "bar"}) + + def test_set_auth_headers_with_empty_dict_and_auth_configs(self): + self.client._auth_configs = auth.AuthConfig( + { + "auths": { + "https://example.com": { + "user": "example", + "password": "example", + "email": "example@example.com", + } + } + } + ) + + headers = {} + expected_headers = { + "X-Registry-Config": auth.encode_header(self.client._auth_configs.auths) + } + + self.client._set_auth_headers(headers) + assert headers == expected_headers + + def test_set_auth_headers_with_dict_and_auth_configs(self): + self.client._auth_configs = auth.AuthConfig( + { + "auths": { + "https://example.com": { + "user": "example", + "password": "example", + "email": "example@example.com", + } + } + } + ) + + headers = {"foo": "bar"} + expected_headers = { + "X-Registry-Config": auth.encode_header(self.client._auth_configs.auths), + "foo": "bar", + } + + self.client._set_auth_headers(headers) + assert headers == expected_headers + + def test_set_auth_headers_with_dict_and_no_auth_configs(self): + headers = {"foo": "bar"} + expected_headers = {"foo": "bar"} + + self.client._set_auth_headers(headers) + assert headers == expected_headers + + @pytest.mark.skipif( + not docker.constants.IS_WINDOWS_PLATFORM, reason="Windows-specific syntax" + ) + def test_process_dockerfile_win_longpath_prefix(self): + dirs = [ + "foo", + "foo/bar", + "baz", + ] + + files = [ + "Dockerfile", + "foo/Dockerfile.foo", + "foo/bar/Dockerfile.bar", + "baz/Dockerfile.baz", + ] + + base = make_tree(dirs, files) + self.addCleanup(shutil.rmtree, base) + + def pre(path): + return docker.constants.WINDOWS_LONGPATH_PREFIX + path + + assert process_dockerfile(None, pre(base)) == (None, None) + assert process_dockerfile("Dockerfile", pre(base)) == ("Dockerfile", None) + assert process_dockerfile("foo/Dockerfile.foo", pre(base)) == ( + "foo/Dockerfile.foo", + None, + ) + assert process_dockerfile("../Dockerfile", pre(f"{base}\\foo"))[1] is not None + assert process_dockerfile("../baz/Dockerfile.baz", pre(f"{base}/baz")) == ( + "../baz/Dockerfile.baz", + None, + ) + + def test_process_dockerfile(self): + dirs = [ + "foo", + "foo/bar", + "baz", + ] + + files = [ + "Dockerfile", + "foo/Dockerfile.foo", + "foo/bar/Dockerfile.bar", + "baz/Dockerfile.baz", + ] + + base = make_tree(dirs, files) + self.addCleanup(shutil.rmtree, base) + + assert process_dockerfile(None, base) == (None, None) + assert process_dockerfile("Dockerfile", base) == ("Dockerfile", None) + assert process_dockerfile("foo/Dockerfile.foo", base) == ( + "foo/Dockerfile.foo", + None, + ) + assert process_dockerfile("../Dockerfile", f"{base}/foo")[1] is not None + assert process_dockerfile("../baz/Dockerfile.baz", f"{base}/baz") == ( + "../baz/Dockerfile.baz", + None, + ) diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py new file mode 100644 index 0000000000..b2e5237a2a --- /dev/null +++ b/tests/unit/api_container_test.py @@ -0,0 +1,1594 @@ +import datetime +import json +import signal +from unittest import mock + +import pytest + +import docker +from docker.api import APIClient + +from ..helpers import requires_api_version +from . import fake_api +from .api_test import ( + DEFAULT_TIMEOUT_SECONDS, + BaseAPIClientTest, + fake_inspect_container, + fake_request, + url_base, + url_prefix, +) + + +def fake_inspect_container_tty(self, container): + return fake_inspect_container(self, container, tty=True) + + +class StartContainerTest(BaseAPIClientTest): + def test_start_container(self): + self.client.start(fake_api.FAKE_CONTAINER_ID) + + args = fake_request.call_args + assert args[0][1] == (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/start') + assert 'data' not in args[1] + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_start_container_none(self): + with pytest.raises(ValueError) as excinfo: + self.client.start(container=None) + + assert str(excinfo.value) == 'Resource ID was not provided' + + with pytest.raises(ValueError) as excinfo: + self.client.start(None) + + assert str(excinfo.value) == 'Resource ID was not provided' + + def test_start_container_regression_573(self): + self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID}) + + def test_start_container_with_lxc_conf(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start( + fake_api.FAKE_CONTAINER_ID, + lxc_conf={'lxc.conf.k': 'lxc.conf.value'} + ) + + def test_start_container_with_lxc_conf_compat(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start( + fake_api.FAKE_CONTAINER_ID, + lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}] + ) + + def test_start_container_with_binds_ro(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start( + fake_api.FAKE_CONTAINER_ID, binds={ + '/tmp': { + "bind": '/mnt', + "ro": True + } + } + ) + + def test_start_container_with_binds_rw(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start( + fake_api.FAKE_CONTAINER_ID, binds={ + '/tmp': {"bind": '/mnt', "ro": False} + } + ) + + def test_start_container_with_port_binds(self): + self.maxDiff = None + + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={ + 1111: None, + 2222: 2222, + '3333/udp': (3333,), + 4444: ('127.0.0.1',), + 5555: ('127.0.0.1', 5555), + 6666: [('127.0.0.1',), ('192.168.0.1',)] + }) + + def test_start_container_with_links(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start( + fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'} + ) + + def test_start_container_with_multiple_links(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start( + fake_api.FAKE_CONTAINER_ID, + links={ + 'path1': 'alias1', + 'path2': 'alias2' + } + ) + + def test_start_container_with_links_as_list_of_tuples(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start(fake_api.FAKE_CONTAINER_ID, + links=[('path', 'alias')]) + + def test_start_container_privileged(self): + with pytest.raises(docker.errors.DeprecatedMethod): + self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True) + + def test_start_container_with_dict_instead_of_id(self): + self.client.start({'Id': fake_api.FAKE_CONTAINER_ID}) + + args = fake_request.call_args + assert args[0][1] == (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/start') + assert 'data' not in args[1] + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + +class CreateContainerTest(BaseAPIClientTest): + def test_create_container(self): + self.client.create_container('busybox', 'true') + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", "Cmd": ["true"], + "AttachStdin": false, + "AttachStderr": true, "AttachStdout": true, + "StdinOnce": false, + "OpenStdin": false, "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_binds(self): + mount_dest = '/mnt' + + self.client.create_container('busybox', ['ls', mount_dest], + volumes=[mount_dest]) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls", "/mnt"], "AttachStdin": false, + "Volumes": {"/mnt": {}}, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_volume_string(self): + mount_dest = '/mnt' + + self.client.create_container('busybox', ['ls', mount_dest], + volumes=mount_dest) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls", "/mnt"], "AttachStdin": false, + "Volumes": {"/mnt": {}}, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_ports(self): + self.client.create_container('busybox', 'ls', + ports=[1111, (2222, 'udp'), (3333,)]) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "ExposedPorts": { + "1111/tcp": {}, + "2222/udp": {}, + "3333/tcp": {} + }, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_entrypoint(self): + self.client.create_container('busybox', 'hello', + entrypoint='cowsay entry') + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["hello"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "Entrypoint": ["cowsay", "entry"]} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_host_config_cpu_shares(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cpu_shares=512 + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "CpuShares": 512, + "NetworkMode": "default" + }} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_host_config_cpuset(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cpuset_cpus='0,1' + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "CpusetCpus": "0,1", + "NetworkMode": "default" + }} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_host_config_cpuset_mems(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cpuset_mems='0' + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "CpusetMems": "0", + "NetworkMode": "default" + }} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_cgroup_parent(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cgroup_parent='test' + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + data = json.loads(args[1]['data']) + assert 'HostConfig' in data + assert 'CgroupParent' in data['HostConfig'] + assert data['HostConfig']['CgroupParent'] == 'test' + + def test_create_container_with_working_dir(self): + self.client.create_container('busybox', 'ls', + working_dir='/root') + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "WorkingDir": "/root"} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_stdin_open(self): + self.client.create_container('busybox', 'true', stdin_open=True) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", "Cmd": ["true"], + "AttachStdin": true, + "AttachStderr": true, "AttachStdout": true, + "StdinOnce": true, + "OpenStdin": true, "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_named_container(self): + self.client.create_container('busybox', 'true', + name='marisa-kirisame') + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", "Cmd": ["true"], + "AttachStdin": false, + "AttachStderr": true, "AttachStdout": true, + "StdinOnce": false, + "OpenStdin": false, "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['params'] == {'name': 'marisa-kirisame'} + + def test_create_container_with_platform(self): + self.client.create_container('busybox', 'true', + platform='linux') + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", "Cmd": ["true"], + "AttachStdin": false, + "AttachStderr": true, "AttachStdout": true, + "StdinOnce": false, + "OpenStdin": false, "NetworkDisabled": false} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['params'] == {'name': None, 'platform': 'linux'} + + def test_create_container_with_mem_limit_as_int(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + mem_limit=128.0 + ) + ) + + args = fake_request.call_args + data = json.loads(args[1]['data']) + assert data['HostConfig']['Memory'] == 128.0 + + def test_create_container_with_mem_limit_as_string(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + mem_limit='128' + ) + ) + + args = fake_request.call_args + data = json.loads(args[1]['data']) + assert data['HostConfig']['Memory'] == 128.0 + + def test_create_container_with_mem_limit_as_string_with_k_unit(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + mem_limit='128k' + ) + ) + + args = fake_request.call_args + data = json.loads(args[1]['data']) + assert data['HostConfig']['Memory'] == 128.0 * 1024 + + def test_create_container_with_mem_limit_as_string_with_m_unit(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + mem_limit='128m' + ) + ) + + args = fake_request.call_args + data = json.loads(args[1]['data']) + assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024 + + def test_create_container_with_mem_limit_as_string_with_g_unit(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + mem_limit='128g' + ) + ) + + args = fake_request.call_args + data = json.loads(args[1]['data']) + assert data['HostConfig']['Memory'] == 128.0 * 1024 * 1024 * 1024 + + def test_create_container_with_mem_limit_as_string_with_wrong_value(self): + with pytest.raises(docker.errors.DockerException): + self.client.create_host_config(mem_limit='128p') + + with pytest.raises(docker.errors.DockerException): + self.client.create_host_config(mem_limit='1f28') + + def test_create_container_with_lxc_conf(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + lxc_conf={'lxc.conf.k': 'lxc.conf.value'} + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['LxcConf'] = [ + {"Value": "lxc.conf.value", "Key": "lxc.conf.k"} + ] + + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_lxc_conf_compat(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}] + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['LxcConf'] = [ + {"Value": "lxc.conf.value", "Key": "lxc.conf.k"} + ] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_binds_ro(self): + mount_dest = '/mnt' + mount_origin = '/tmp' + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + binds={mount_origin: { + "bind": mount_dest, + "ro": True + }} + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_binds_rw(self): + mount_dest = '/mnt' + mount_origin = '/tmp' + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + binds={mount_origin: { + "bind": mount_dest, + "ro": False + }} + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_binds_mode(self): + mount_dest = '/mnt' + mount_origin = '/tmp' + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + binds={mount_origin: { + "bind": mount_dest, + "mode": "z", + }} + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_binds_mode_and_ro_error(self): + with pytest.raises(ValueError): + mount_dest = '/mnt' + mount_origin = '/tmp' + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + binds={mount_origin: { + "bind": mount_dest, + "mode": "z", + "ro": True, + }} + ) + ) + + def test_create_container_with_binds_list(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + binds=[ + "/tmp:/mnt/1:ro", + "/tmp:/mnt/2", + ], + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Binds'] = [ + "/tmp:/mnt/1:ro", + "/tmp:/mnt/2", + ] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_port_binds(self): + self.maxDiff = None + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + port_bindings={ + 1111: None, + 2222: 2222, + '3333/udp': (3333,), + 4444: ('127.0.0.1',), + 5555: ('127.0.0.1', 5555), + 6666: [('127.0.0.1',), ('192.168.0.1',)] + } + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + data = json.loads(args[1]['data']) + port_bindings = data['HostConfig']['PortBindings'] + assert '1111/tcp' in port_bindings + assert '2222/tcp' in port_bindings + assert '3333/udp' in port_bindings + assert '4444/tcp' in port_bindings + assert '5555/tcp' in port_bindings + assert '6666/tcp' in port_bindings + assert [{"HostPort": "", "HostIp": ""}] == port_bindings['1111/tcp'] + assert [ + {"HostPort": "2222", "HostIp": ""} + ] == port_bindings['2222/tcp'] + assert [ + {"HostPort": "3333", "HostIp": ""} + ] == port_bindings['3333/udp'] + assert [ + {"HostPort": "", "HostIp": "127.0.0.1"} + ] == port_bindings['4444/tcp'] + assert [ + {"HostPort": "5555", "HostIp": "127.0.0.1"} + ] == port_bindings['5555/tcp'] + assert len(port_bindings['6666/tcp']) == 2 + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_mac_address(self): + expected = "02:42:ac:11:00:0a" + + self.client.create_container( + 'busybox', + ['sleep', '60'], + mac_address=expected + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + data = json.loads(args[1]['data']) + assert data['MacAddress'] == expected + + def test_create_container_with_links(self): + link_path = 'path' + alias = 'alias' + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + links={link_path: alias} + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Links'] = ['path:alias'] + + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_multiple_links(self): + link_path = 'path' + alias = 'alias' + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + links={ + link_path + '1': alias + '1', + link_path + '2': alias + '2' + } + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Links'] = [ + 'path1:alias1', 'path2:alias2' + ] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_with_links_as_list_of_tuples(self): + link_path = 'path' + alias = 'alias' + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + links=[(link_path, alias)] + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Links'] = ['path:alias'] + + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_create_container_privileged(self): + self.client.create_container( + 'busybox', 'true', + host_config=self.client.create_host_config(privileged=True) + ) + + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Privileged'] = True + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_restart_policy(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + restart_policy={ + "Name": "always", + "MaximumRetryCount": 0 + } + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['RestartPolicy'] = { + "MaximumRetryCount": 0, "Name": "always" + } + assert json.loads(args[1]['data']) == expected_payload + + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_added_capabilities(self): + self.client.create_container( + 'busybox', 'true', + host_config=self.client.create_host_config(cap_add=['MKNOD']) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['CapAdd'] = ['MKNOD'] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_dropped_capabilities(self): + self.client.create_container( + 'busybox', 'true', + host_config=self.client.create_host_config(cap_drop=['MKNOD']) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['CapDrop'] = ['MKNOD'] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_devices(self): + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + devices=['/dev/sda:/dev/xvda:rwm', + '/dev/sdb:/dev/xvdb', + '/dev/sdc'] + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Devices'] = [ + {'CgroupPermissions': 'rwm', + 'PathInContainer': '/dev/xvda', + 'PathOnHost': '/dev/sda'}, + {'CgroupPermissions': 'rwm', + 'PathInContainer': '/dev/xvdb', + 'PathOnHost': '/dev/sdb'}, + {'CgroupPermissions': 'rwm', + 'PathInContainer': '/dev/sdc', + 'PathOnHost': '/dev/sdc'} + ] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_device_requests(self): + client = APIClient(version='1.40') + fake_api.fake_responses.setdefault( + f'{fake_api.prefix}/v1.40/containers/create', + fake_api.post_fake_create_container, + ) + client.create_container( + 'busybox', 'true', host_config=client.create_host_config( + device_requests=[ + { + 'device_ids': [ + '0', + 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' + ] + }, + { + 'driver': 'nvidia', + 'Count': -1, + 'capabilities': [ + ['gpu', 'utility'] + ], + 'options': { + 'key': 'value' + } + } + ] + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_base + 'v1.40/' + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = client.create_host_config() + expected_payload['HostConfig']['DeviceRequests'] = [ + { + 'Driver': '', + 'Count': 0, + 'DeviceIDs': [ + '0', + 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a' + ], + 'Capabilities': [], + 'Options': {} + }, + { + 'Driver': 'nvidia', + 'Count': -1, + 'DeviceIDs': [], + 'Capabilities': [ + ['gpu', 'utility'] + ], + 'Options': { + 'key': 'value' + } + } + ] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers']['Content-Type'] == 'application/json' + assert set(args[1]['headers']) <= {'Content-Type', 'User-Agent'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_labels_dict(self): + labels_dict = { + 'foo': '1', + 'bar': '2', + } + + self.client.create_container( + 'busybox', 'true', + labels=labels_dict, + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data'])['Labels'] == labels_dict + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_labels_list(self): + labels_list = [ + 'foo', + 'bar', + ] + labels_dict = { + 'foo': '', + 'bar': '', + } + + self.client.create_container( + 'busybox', 'true', + labels=labels_list, + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data'])['Labels'] == labels_dict + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_named_volume(self): + mount_dest = '/mnt' + volume_name = 'name' + + self.client.create_container( + 'busybox', 'true', + host_config=self.client.create_host_config( + volume_driver='foodriver', + binds={volume_name: { + "bind": mount_dest, + "ro": False + }}), + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['VolumeDriver'] = 'foodriver' + expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"] + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_stop_signal(self): + self.client.create_container('busybox', 'ls', + stop_signal='SIGINT') + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "StopSignal": "SIGINT"} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + @requires_api_version('1.22') + def test_create_container_with_aliases(self): + self.client.create_container( + 'busybox', 'ls', + host_config=self.client.create_host_config( + network_mode='some-network', + ), + networking_config=self.client.create_networking_config({ + 'some-network': self.client.create_endpoint_config( + aliases=['foo', 'bar'], + ), + }), + ) + + args = fake_request.call_args + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "NetworkMode": "some-network" + }, + "NetworkingConfig": { + "EndpointsConfig": { + "some-network": {"Aliases": ["foo", "bar"]} + } + }} + ''') + + @requires_api_version('1.22') + def test_create_container_with_tmpfs_list(self): + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + tmpfs=[ + "/tmp", + "/mnt:size=3G,uid=100" + ] + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Tmpfs'] = { + "/tmp": "", + "/mnt": "size=3G,uid=100" + } + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + @requires_api_version('1.22') + def test_create_container_with_tmpfs_dict(self): + + self.client.create_container( + 'busybox', 'true', host_config=self.client.create_host_config( + tmpfs={ + "/tmp": "", + "/mnt": "size=3G,uid=100" + } + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Tmpfs'] = { + "/tmp": "", + "/mnt": "size=3G,uid=100" + } + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + @requires_api_version('1.24') + def test_create_container_with_sysctl(self): + self.client.create_container( + 'busybox', 'true', + host_config=self.client.create_host_config( + sysctls={ + 'net.core.somaxconn': 1024, + 'net.ipv4.tcp_syncookies': '0', + } + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['Sysctls'] = { + 'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0', + } + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert args[1]['timeout'] == DEFAULT_TIMEOUT_SECONDS + + def test_create_container_with_unicode_envvars(self): + envvars_dict = { + 'foo': '☃', + } + + expected = [ + 'foo=☃' + ] + + self.client.create_container( + 'busybox', 'true', + environment=envvars_dict, + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + assert json.loads(args[1]['data'])['Env'] == expected + + @requires_api_version('1.25') + def test_create_container_with_host_config_cpus(self): + self.client.create_container( + 'busybox', 'ls', host_config=self.client.create_host_config( + cpu_count=1, + cpu_percent=20, + nano_cpus=1000 + ) + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + + assert json.loads(args[1]['data']) == json.loads(''' + {"Tty": false, "Image": "busybox", + "Cmd": ["ls"], "AttachStdin": false, + "AttachStderr": true, + "AttachStdout": true, "OpenStdin": false, + "StdinOnce": false, + "NetworkDisabled": false, + "HostConfig": { + "CpuCount": 1, + "CpuPercent": 20, + "NanoCpus": 1000, + "NetworkMode": "default" + }} + ''') + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + @requires_api_version('1.41') + def test_create_container_with_cgroupns(self): + self.client.create_container( + image='busybox', + command='true', + host_config=self.client.create_host_config( + cgroupns='private', + ), + ) + + args = fake_request.call_args + assert args[0][1] == url_prefix + 'containers/create' + + expected_payload = self.base_create_payload() + expected_payload['HostConfig'] = self.client.create_host_config() + expected_payload['HostConfig']['CgroupnsMode'] = 'private' + assert json.loads(args[1]['data']) == expected_payload + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + +class ContainerTest(BaseAPIClientTest): + def test_list_containers(self): + self.client.containers(all=True) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/json', + params={ + 'all': 1, + 'since': None, + 'size': 0, + 'limit': -1, + 'trunc_cmd': 0, + 'before': None + }, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_resize_container(self): + self.client.resize( + {'Id': fake_api.FAKE_CONTAINER_ID}, + height=15, + width=120 + ) + + fake_request.assert_called_with( + 'POST', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/resize'), + params={'h': 15, 'w': 120}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_rename_container(self): + self.client.rename( + {'Id': fake_api.FAKE_CONTAINER_ID}, + name='foobar' + ) + + fake_request.assert_called_with( + 'POST', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/rename'), + params={'name': 'foobar'}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_wait(self): + self.client.wait(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/wait', + timeout=None, + params={} + ) + + def test_wait_with_dict_instead_of_id(self): + self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID}) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/wait', + timeout=None, + params={} + ) + + def test_logs(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + logs = self.client.logs(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + assert logs == b'Flowering Nights\n(Sakuya Iyazoi)\n' + + def test_logs_with_dict_instead_of_id(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID}) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + assert logs == b'Flowering Nights\n(Sakuya Iyazoi)\n' + + def test_log_streaming(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True, + follow=False) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=True + ) + + def test_log_following(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, + follow=True) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + def test_log_following_backwards(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=True + ) + + def test_log_streaming_and_following(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True, + follow=True) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=True + ) + + def test_log_tail(self): + + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, + follow=False, tail=10) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 10}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + def test_log_since(self): + ts = 809222400 + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, + follow=False, since=ts) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 'all', 'since': ts}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + def test_log_since_with_float(self): + ts = 809222400.000000 + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, + follow=False, since=ts) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 'all', 'since': ts}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + def test_log_since_with_datetime(self): + ts = 809222400 + time = datetime.datetime.utcfromtimestamp(ts) + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, + follow=False, since=time) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1, + 'tail': 'all', 'since': ts}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=False + ) + + def test_log_since_with_invalid_value_raises_error(self): + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container): + with pytest.raises(docker.errors.InvalidArgument): + self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, + follow=False, since="42.42") + + def test_log_tty(self): + m = mock.Mock() + with mock.patch('docker.api.client.APIClient.inspect_container', + fake_inspect_container_tty): + with mock.patch('docker.api.client.APIClient._stream_raw_result', + m): + self.client.logs(fake_api.FAKE_CONTAINER_ID, + follow=True, stream=True) + + assert m.called + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/logs', + params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1, + 'tail': 'all'}, + timeout=DEFAULT_TIMEOUT_SECONDS, + stream=True + ) + + def test_diff(self): + self.client.diff(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'GET', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/changes'), + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_diff_with_dict_instead_of_id(self): + self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID}) + + fake_request.assert_called_with( + 'GET', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/changes'), + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_port(self): + self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/json', + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_stop_container(self): + timeout = 2 + + self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/stop', + params={'t': timeout}, + timeout=(DEFAULT_TIMEOUT_SECONDS + timeout) + ) + + def test_stop_container_with_dict_instead_of_id(self): + timeout = 2 + + self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID}, + timeout=timeout) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/stop', + params={'t': timeout}, + timeout=(DEFAULT_TIMEOUT_SECONDS + timeout) + ) + + def test_pause_container(self): + self.client.pause(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'POST', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/pause'), + timeout=(DEFAULT_TIMEOUT_SECONDS) + ) + + def test_unpause_container(self): + self.client.unpause(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'POST', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/unpause'), + timeout=(DEFAULT_TIMEOUT_SECONDS) + ) + + def test_kill_container(self): + self.client.kill(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/kill', + params={}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_kill_container_with_dict_instead_of_id(self): + self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID}) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/kill', + params={}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_kill_container_with_signal(self): + self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM) + + fake_request.assert_called_with( + 'POST', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/kill', + params={'signal': signal.SIGTERM}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_restart_container(self): + self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2) + + fake_request.assert_called_with( + 'POST', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/restart'), + params={'t': 2}, + timeout=(DEFAULT_TIMEOUT_SECONDS + 2) + ) + + def test_restart_container_with_dict_instead_of_id(self): + self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2) + + fake_request.assert_called_with( + 'POST', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/restart'), + params={'t': 2}, + timeout=(DEFAULT_TIMEOUT_SECONDS + 2) + ) + + def test_remove_container(self): + self.client.remove_container(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'DELETE', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID, + params={'v': False, 'link': False, 'force': False}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_remove_container_with_dict_instead_of_id(self): + self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID}) + + fake_request.assert_called_with( + 'DELETE', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID, + params={'v': False, 'link': False, 'force': False}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_export(self): + self.client.export(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'GET', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/export'), + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_export_with_dict_instead_of_id(self): + self.client.export({'Id': fake_api.FAKE_CONTAINER_ID}) + + fake_request.assert_called_with( + 'GET', + (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/export'), + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_inspect_container(self): + self.client.inspect_container(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/json', + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_inspect_container_undefined_id(self): + for arg in None, '', {True: True}: + with pytest.raises(docker.errors.NullResource) as excinfo: + self.client.inspect_container(arg) + + assert excinfo.value.args[0] == 'Resource ID was not provided' + + def test_container_stats(self): + self.client.stats(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/stats', + stream=True, + timeout=60, + params={'stream': True} + ) + + def test_container_stats_without_streaming(self): + self.client.stats(fake_api.FAKE_CONTAINER_ID, stream=False) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/stats', + timeout=60, + params={'stream': False} + ) + + def test_container_stats_with_one_shot(self): + self.client.stats( + fake_api.FAKE_CONTAINER_ID, stream=False, one_shot=True) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/stats', + timeout=60, + params={'stream': False, 'one-shot': True} + ) + + def test_container_top(self): + self.client.top(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/top', + params={}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_container_top_with_psargs(self): + self.client.top(fake_api.FAKE_CONTAINER_ID, 'waux') + + fake_request.assert_called_with( + 'GET', + url_prefix + 'containers/' + fake_api.FAKE_CONTAINER_ID + '/top', + params={'ps_args': 'waux'}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + @requires_api_version('1.22') + def test_container_update(self): + self.client.update_container( + fake_api.FAKE_CONTAINER_ID, mem_limit='2k', cpu_shares=124, + blkio_weight=345 + ) + args = fake_request.call_args + assert args[0][1] == (url_prefix + 'containers/' + + fake_api.FAKE_CONTAINER_ID + '/update') + assert json.loads(args[1]['data']) == { + 'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345 + } + assert args[1]['headers']['Content-Type'] == 'application/json' diff --git a/tests/unit/api_exec_test.py b/tests/unit/api_exec_test.py new file mode 100644 index 0000000000..9d789723a0 --- /dev/null +++ b/tests/unit/api_exec_test.py @@ -0,0 +1,80 @@ +import json + +from . import fake_api +from .api_test import ( + DEFAULT_TIMEOUT_SECONDS, + BaseAPIClientTest, + fake_request, + url_prefix, +) + + +class ExecTest(BaseAPIClientTest): + def test_exec_create(self): + self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1']) + + args = fake_request.call_args + assert 'POST' == args[0][0], url_prefix + 'containers/{}/exec'.format( + fake_api.FAKE_CONTAINER_ID + ) + + assert json.loads(args[1]['data']) == { + 'Tty': False, + 'AttachStdout': True, + 'Container': fake_api.FAKE_CONTAINER_ID, + 'Cmd': ['ls', '-1'], + 'Privileged': False, + 'AttachStdin': False, + 'AttachStderr': True, + 'User': '' + } + + assert args[1]['headers'] == {'Content-Type': 'application/json'} + + def test_exec_start(self): + self.client.exec_start(fake_api.FAKE_EXEC_ID) + + args = fake_request.call_args + assert args[0][1] == f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/start" + + assert json.loads(args[1]['data']) == { + 'Tty': False, + 'Detach': False, + } + + assert args[1]['headers'] == { + 'Content-Type': 'application/json', + 'Connection': 'Upgrade', + 'Upgrade': 'tcp' + } + + def test_exec_start_detached(self): + self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True) + + args = fake_request.call_args + assert args[0][1] == f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/start" + + assert json.loads(args[1]['data']) == { + 'Tty': False, + 'Detach': True + } + + assert args[1]['headers'] == { + 'Content-Type': 'application/json' + } + + def test_exec_inspect(self): + self.client.exec_inspect(fake_api.FAKE_EXEC_ID) + + args = fake_request.call_args + assert args[0][1] == f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/json" + + def test_exec_resize(self): + self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/resize", + params={'h': 20, 'w': 60}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py new file mode 100644 index 0000000000..148109d37e --- /dev/null +++ b/tests/unit/api_image_test.py @@ -0,0 +1,370 @@ +from unittest import mock + +import pytest + +import docker +from docker import auth + +from . import fake_api +from .api_test import ( + DEFAULT_TIMEOUT_SECONDS, + BaseAPIClientTest, + fake_request, + fake_resolve_authconfig, + url_prefix, +) + + +class ImageTest(BaseAPIClientTest): + def test_image_viz(self): + with pytest.raises(Exception): # noqa: B017 + self.client.images('busybox', viz=True) + self.fail('Viz output should not be supported!') + + def test_images(self): + self.client.images(all=True) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/json", + params={'only_ids': 0, 'all': 1}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_images_name(self): + self.client.images('foo:bar') + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/json", + params={'only_ids': 0, 'all': 0, + 'filters': '{"reference": ["foo:bar"]}'}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_images_quiet(self): + self.client.images(all=True, quiet=True) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/json", + params={'only_ids': 1, 'all': 1}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_image_ids(self): + self.client.images(quiet=True) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/json", + params={'only_ids': 1, 'all': 0}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_images_filters(self): + self.client.images(filters={'dangling': True}) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/json", + params={'only_ids': 0, 'all': 0, + 'filters': '{"dangling": ["true"]}'}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_pull(self): + self.client.pull('joffrey/test001') + + args = fake_request.call_args + assert args[0][1] == f"{url_prefix}images/create" + assert args[1]['params'] == { + 'tag': 'latest', 'fromImage': 'joffrey/test001' + } + assert not args[1]['stream'] + + def test_pull_stream(self): + self.client.pull('joffrey/test001', stream=True) + + args = fake_request.call_args + assert args[0][1] == f"{url_prefix}images/create" + assert args[1]['params'] == { + 'tag': 'latest', 'fromImage': 'joffrey/test001' + } + assert args[1]['stream'] + + def test_commit(self): + self.client.commit(fake_api.FAKE_CONTAINER_ID) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}commit", + data='{}', + headers={'Content-Type': 'application/json'}, + params={ + 'repo': None, + 'comment': None, + 'tag': None, + 'container': fake_api.FAKE_CONTAINER_ID, + 'author': None, + 'pause': True, + 'changes': None + }, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_remove_image(self): + self.client.remove_image(fake_api.FAKE_IMAGE_ID) + + fake_request.assert_called_with( + 'DELETE', + f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}", + params={'force': False, 'noprune': False}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_image_history(self): + self.client.history(fake_api.FAKE_IMAGE_NAME) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/test_image/history", + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_import_image(self): + self.client.import_image( + fake_api.FAKE_TARBALL_PATH, + repository=fake_api.FAKE_REPO_NAME, + tag=fake_api.FAKE_TAG_NAME + ) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/create", + params={ + 'repo': fake_api.FAKE_REPO_NAME, + 'tag': fake_api.FAKE_TAG_NAME, + 'fromSrc': fake_api.FAKE_TARBALL_PATH + }, + data=None, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_import_image_from_bytes(self): + stream = (i for i in range(0, 100)) + + self.client.import_image( + stream, + repository=fake_api.FAKE_REPO_NAME, + tag=fake_api.FAKE_TAG_NAME + ) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/create", + params={ + 'repo': fake_api.FAKE_REPO_NAME, + 'tag': fake_api.FAKE_TAG_NAME, + 'fromSrc': '-', + }, + headers={ + 'Content-Type': 'application/tar', + }, + data=stream, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_import_image_from_image(self): + self.client.import_image( + image=fake_api.FAKE_IMAGE_NAME, + repository=fake_api.FAKE_REPO_NAME, + tag=fake_api.FAKE_TAG_NAME + ) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/create", + params={ + 'repo': fake_api.FAKE_REPO_NAME, + 'tag': fake_api.FAKE_TAG_NAME, + 'fromImage': fake_api.FAKE_IMAGE_NAME + }, + data=None, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_inspect_image(self): + self.client.inspect_image(fake_api.FAKE_IMAGE_NAME) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/test_image/json", + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_inspect_image_undefined_id(self): + for arg in None, '', {True: True}: + with pytest.raises(docker.errors.NullResource) as excinfo: + self.client.inspect_image(arg) + + assert excinfo.value.args[0] == 'Resource ID was not provided' + + def test_push_image(self): + with mock.patch('docker.auth.resolve_authconfig', + fake_resolve_authconfig): + self.client.push(fake_api.FAKE_IMAGE_NAME) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/test_image/push", + params={ + 'tag': None + }, + data='{}', + headers={'Content-Type': 'application/json'}, + stream=False, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_push_image_with_tag(self): + with mock.patch('docker.auth.resolve_authconfig', + fake_resolve_authconfig): + self.client.push( + fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME + ) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/test_image/push", + params={ + 'tag': fake_api.FAKE_TAG_NAME, + }, + data='{}', + headers={'Content-Type': 'application/json'}, + stream=False, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_push_image_with_auth(self): + auth_config = { + 'username': "test_user", + 'password': "test_password", + 'serveraddress': "test_server", + } + encoded_auth = auth.encode_header(auth_config) + self.client.push( + fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME, + auth_config=auth_config + ) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/test_image/push", + params={ + 'tag': fake_api.FAKE_TAG_NAME, + }, + data='{}', + headers={'Content-Type': 'application/json', + 'X-Registry-Auth': encoded_auth}, + stream=False, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_push_image_stream(self): + with mock.patch('docker.auth.resolve_authconfig', + fake_resolve_authconfig): + self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/test_image/push", + params={ + 'tag': None + }, + data='{}', + headers={'Content-Type': 'application/json'}, + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_tag_image(self): + self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/tag", + params={ + 'tag': None, + 'repo': 'repo', + 'force': 0 + }, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_tag_image_tag(self): + self.client.tag( + fake_api.FAKE_IMAGE_ID, + fake_api.FAKE_REPO_NAME, + tag=fake_api.FAKE_TAG_NAME + ) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/tag", + params={ + 'tag': 'tag', + 'repo': 'repo', + 'force': 0 + }, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_tag_image_force(self): + self.client.tag( + fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/tag", + params={ + 'tag': None, + 'repo': 'repo', + 'force': 1 + }, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_get_image(self): + self.client.get_image(fake_api.FAKE_IMAGE_ID) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/{fake_api.FAKE_IMAGE_ID}/get", + stream=True, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_load_image(self): + self.client.load_image('Byte Stream....') + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/load", + data='Byte Stream....', + stream=True, + params={}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_load_image_quiet(self): + self.client.load_image('Byte Stream....', quiet=True) + + fake_request.assert_called_with( + 'POST', + f"{url_prefix}images/load", + data='Byte Stream....', + stream=True, + params={'quiet': True}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) diff --git a/tests/unit/api_network_test.py b/tests/unit/api_network_test.py new file mode 100644 index 0000000000..1f9e596655 --- /dev/null +++ b/tests/unit/api_network_test.py @@ -0,0 +1,166 @@ +import json +from unittest import mock + +from docker.types import IPAMConfig, IPAMPool + +from .api_test import BaseAPIClientTest, response, url_prefix + + +class NetworkTest(BaseAPIClientTest): + def test_list_networks(self): + networks = [ + { + "name": "none", + "id": "8e4e55c6863ef424", + "type": "null", + "endpoints": [] + }, + { + "name": "host", + "id": "062b6d9ea7913fde", + "type": "host", + "endpoints": [] + }, + ] + + get = mock.Mock(return_value=response( + status_code=200, content=json.dumps(networks).encode('utf-8'))) + + with mock.patch('docker.api.client.APIClient.get', get): + assert self.client.networks() == networks + + assert get.call_args[0][0] == f"{url_prefix}networks" + + filters = json.loads(get.call_args[1]['params']['filters']) + assert not filters + + self.client.networks(names=['foo']) + filters = json.loads(get.call_args[1]['params']['filters']) + assert filters == {'name': ['foo']} + + self.client.networks(ids=['123']) + filters = json.loads(get.call_args[1]['params']['filters']) + assert filters == {'id': ['123']} + + def test_create_network(self): + network_data = { + "id": 'abc12345', + "warning": "", + } + + network_response = response(status_code=200, content=network_data) + post = mock.Mock(return_value=network_response) + + with mock.patch('docker.api.client.APIClient.post', post): + result = self.client.create_network('foo') + assert result == network_data + + assert post.call_args[0][0] == f"{url_prefix}networks/create" + + assert json.loads(post.call_args[1]['data']) == {"Name": "foo"} + + opts = { + 'com.docker.network.bridge.enable_icc': False, + 'com.docker.network.bridge.enable_ip_masquerade': False, + } + self.client.create_network('foo', 'bridge', opts) + + assert json.loads(post.call_args[1]['data']) == { + "Name": "foo", "Driver": "bridge", "Options": opts + } + + ipam_pool_config = IPAMPool(subnet="192.168.52.0/24", + gateway="192.168.52.254") + ipam_config = IPAMConfig(pool_configs=[ipam_pool_config]) + + self.client.create_network("bar", driver="bridge", + ipam=ipam_config) + + assert json.loads(post.call_args[1]['data']) == { + "Name": "bar", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [{ + "IPRange": None, + "Gateway": "192.168.52.254", + "Subnet": "192.168.52.0/24", + "AuxiliaryAddresses": None, + }], + } + } + + def test_remove_network(self): + network_id = 'abc12345' + delete = mock.Mock(return_value=response(status_code=200)) + + with mock.patch('docker.api.client.APIClient.delete', delete): + self.client.remove_network(network_id) + + args = delete.call_args + assert args[0][0] == f"{url_prefix}networks/{network_id}" + + def test_inspect_network(self): + network_id = 'abc12345' + network_name = 'foo' + network_data = { + 'name': network_name, + 'id': network_id, + 'driver': 'bridge', + 'containers': {}, + } + + network_response = response(status_code=200, content=network_data) + get = mock.Mock(return_value=network_response) + + with mock.patch('docker.api.client.APIClient.get', get): + result = self.client.inspect_network(network_id) + assert result == network_data + + args = get.call_args + assert args[0][0] == f"{url_prefix}networks/{network_id}" + + def test_connect_container_to_network(self): + network_id = 'abc12345' + container_id = 'def45678' + + post = mock.Mock(return_value=response(status_code=201)) + + with mock.patch('docker.api.client.APIClient.post', post): + self.client.connect_container_to_network( + container={'Id': container_id}, + net_id=network_id, + aliases=['foo', 'bar'], + links=[('baz', 'quux')], + driver_opt={'com.docker-py.setting': 'yes'}, + ) + + assert post.call_args[0][0] == ( + f"{url_prefix}networks/{network_id}/connect" + ) + + assert json.loads(post.call_args[1]['data']) == { + 'Container': container_id, + 'EndpointConfig': { + 'Aliases': ['foo', 'bar'], + 'Links': ['baz:quux'], + 'DriverOpts': {'com.docker-py.setting': 'yes'}, + }, + } + + def test_disconnect_container_from_network(self): + network_id = 'abc12345' + container_id = 'def45678' + + post = mock.Mock(return_value=response(status_code=201)) + + with mock.patch('docker.api.client.APIClient.post', post): + self.client.disconnect_container_from_network( + container={'Id': container_id}, net_id=network_id) + + assert post.call_args[0][0] == ( + f"{url_prefix}networks/{network_id}/disconnect" + ) + assert json.loads(post.call_args[1]['data']) == { + 'Container': container_id + } diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py new file mode 100644 index 0000000000..3ce127b346 --- /dev/null +++ b/tests/unit/api_test.py @@ -0,0 +1,664 @@ +import datetime +import http.server +import io +import json +import os +import re +import shutil +import socket +import socketserver +import struct +import tempfile +import threading +import time +import unittest +from unittest import mock + +import pytest +import requests +import urllib3 + +import docker +from docker.api import APIClient +from docker.constants import DEFAULT_DOCKER_API_VERSION + +from . import fake_api + +DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS + + +def response(status_code=200, content='', headers=None, reason=None, elapsed=0, + request=None, raw=None): + res = requests.Response() + res.status_code = status_code + if not isinstance(content, bytes): + content = json.dumps(content).encode('ascii') + res._content = content + res.headers = requests.structures.CaseInsensitiveDict(headers or {}) + res.reason = reason + res.elapsed = datetime.timedelta(elapsed) + res.request = request + res.raw = raw + return res + + +def fake_resolve_authconfig(authconfig, registry=None, *args, **kwargs): + return None + + +def fake_inspect_container(self, container, tty=False): + return fake_api.get_fake_inspect_container(tty=tty)[1] + + +def fake_resp(method, url, *args, **kwargs): + key = None + if url in fake_api.fake_responses: + key = url + elif (url, method) in fake_api.fake_responses: + key = (url, method) + if not key: + raise Exception(f'{method} {url}') + status_code, content = fake_api.fake_responses[key]() + return response(status_code=status_code, content=content) + + +fake_request = mock.Mock(side_effect=fake_resp) + + +def fake_get(self, url, *args, **kwargs): + return fake_request('GET', url, *args, **kwargs) + + +def fake_post(self, url, *args, **kwargs): + return fake_request('POST', url, *args, **kwargs) + + +def fake_put(self, url, *args, **kwargs): + return fake_request('PUT', url, *args, **kwargs) + + +def fake_delete(self, url, *args, **kwargs): + return fake_request('DELETE', url, *args, **kwargs) + + +def fake_read_from_socket(self, response, stream, tty=False, demux=False): + return b'' + + +url_base = f'{fake_api.prefix}/' +url_prefix = f'{url_base}v{docker.constants.DEFAULT_DOCKER_API_VERSION}/' + + +class BaseAPIClientTest(unittest.TestCase): + def setUp(self): + self.patcher = mock.patch.multiple( + 'docker.api.client.APIClient', + get=fake_get, + post=fake_post, + put=fake_put, + delete=fake_delete, + _read_from_socket=fake_read_from_socket + ) + self.patcher.start() + self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION) + + def tearDown(self): + self.client.close() + self.patcher.stop() + + def base_create_payload(self, img='busybox', cmd=None): + if not cmd: + cmd = ['true'] + return {"Tty": False, "Image": img, "Cmd": cmd, + "AttachStdin": False, + "AttachStderr": True, "AttachStdout": True, + "StdinOnce": False, + "OpenStdin": False, "NetworkDisabled": False, + } + + +class DockerApiTest(BaseAPIClientTest): + def test_ctor(self): + with pytest.raises(docker.errors.DockerException) as excinfo: + APIClient(version=1.12) + + assert str( + excinfo.value + ) == 'Version parameter must be a string or None. Found float' + + def test_url_valid_resource(self): + url = self.client._url('/hello/{0}/world', 'somename') + assert url == f"{url_prefix}hello/somename/world" + + url = self.client._url( + '/hello/{0}/world/{1}', 'somename', 'someothername' + ) + assert url == f"{url_prefix}hello/somename/world/someothername" + + url = self.client._url('/hello/{0}/world', 'some?name') + assert url == f"{url_prefix}hello/some%3Fname/world" + + url = self.client._url("/images/{0}/push", "localhost:5000/image") + assert url == f"{url_prefix}images/localhost:5000/image/push" + + def test_url_invalid_resource(self): + with pytest.raises(ValueError): + self.client._url('/hello/{0}/world', ['sakuya', 'izayoi']) + + def test_url_no_resource(self): + url = self.client._url('/simple') + assert url == f"{url_prefix}simple" + + def test_url_unversioned_api(self): + url = self.client._url( + '/hello/{0}/world', 'somename', versioned_api=False + ) + assert url == f"{url_base}hello/somename/world" + + def test_version(self): + self.client.version() + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}version", + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_version_no_api_version(self): + self.client.version(False) + + fake_request.assert_called_with( + 'GET', + f"{url_base}version", + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_retrieve_server_version(self): + client = APIClient(version="auto") + assert isinstance(client._version, str) + assert not (client._version == "auto") + client.close() + + def test_auto_retrieve_server_version(self): + version = self.client._retrieve_server_version() + assert isinstance(version, str) + + def test_info(self): + self.client.info() + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}info", + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_search(self): + self.client.search('busybox') + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}images/search", + params={'term': 'busybox'}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_login(self): + self.client.login('sakuya', 'izayoi') + args = fake_request.call_args + assert args[0][0] == 'POST' + assert args[0][1] == f"{url_prefix}auth" + assert json.loads(args[1]['data']) == { + 'username': 'sakuya', 'password': 'izayoi' + } + assert args[1]['headers'] == {'Content-Type': 'application/json'} + assert self.client._auth_configs.auths['docker.io'] == { + 'email': None, + 'password': 'izayoi', + 'username': 'sakuya', + 'serveraddress': None, + } + + def test_events(self): + self.client.events() + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}events", + params={'since': None, 'until': None, 'filters': None}, + stream=True, + timeout=None + ) + + def test_events_with_since_until(self): + ts = 1356048000 + now = datetime.datetime.utcfromtimestamp(ts) + since = now - datetime.timedelta(seconds=10) + until = now + datetime.timedelta(seconds=10) + + self.client.events(since=since, until=until) + + fake_request.assert_called_with( + 'GET', + f"{url_prefix}events", + params={ + 'since': ts - 10, + 'until': ts + 10, + 'filters': None + }, + stream=True, + timeout=None + ) + + def test_events_with_filters(self): + filters = {'event': ['die', 'stop'], + 'container': fake_api.FAKE_CONTAINER_ID} + + self.client.events(filters=filters) + + expected_filters = docker.utils.convert_filters(filters) + fake_request.assert_called_with( + 'GET', + f"{url_prefix}events", + params={ + 'since': None, + 'until': None, + 'filters': expected_filters + }, + stream=True, + timeout=None + ) + + def _socket_path_for_client_session(self, client): + socket_adapter = client.get_adapter('http+docker://') + return socket_adapter.socket_path + + def test_url_compatibility_unix(self): + c = APIClient( + base_url="unix://socket", + version=DEFAULT_DOCKER_API_VERSION) + + assert self._socket_path_for_client_session(c) == '/socket' + + def test_url_compatibility_unix_triple_slash(self): + c = APIClient( + base_url="unix:///socket", + version=DEFAULT_DOCKER_API_VERSION) + + assert self._socket_path_for_client_session(c) == '/socket' + + def test_url_compatibility_http_unix_triple_slash(self): + c = APIClient( + base_url="http+unix:///socket", + version=DEFAULT_DOCKER_API_VERSION) + + assert self._socket_path_for_client_session(c) == '/socket' + + def test_url_compatibility_http(self): + c = APIClient( + base_url="http://hostname:1234", + version=DEFAULT_DOCKER_API_VERSION) + + assert c.base_url == "http://hostname:1234" + + def test_url_compatibility_tcp(self): + c = APIClient( + base_url="tcp://hostname:1234", + version=DEFAULT_DOCKER_API_VERSION) + + assert c.base_url == "http://hostname:1234" + + def test_remove_link(self): + self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True) + + fake_request.assert_called_with( + 'DELETE', + f"{url_prefix}containers/{fake_api.FAKE_CONTAINER_ID}", + params={'v': False, 'link': True, 'force': False}, + timeout=DEFAULT_TIMEOUT_SECONDS + ) + + def test_create_host_config_secopt(self): + security_opt = ['apparmor:test_profile'] + result = self.client.create_host_config(security_opt=security_opt) + assert 'SecurityOpt' in result + assert result['SecurityOpt'] == security_opt + with pytest.raises(TypeError): + self.client.create_host_config(security_opt='wrong') + + def test_stream_helper_decoding(self): + status_code, content = fake_api.fake_responses[f"{url_prefix}events"]() + content_str = json.dumps(content) + content_str = content_str.encode('utf-8') + body = io.BytesIO(content_str) + + # mock a stream interface + raw_resp = urllib3.HTTPResponse(body=body) + raw_resp._fp.chunked = True + raw_resp._fp.chunk_left = len(body.getvalue()) - 1 + + # pass `decode=False` to the helper + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp)) + assert result == content_str + + # pass `decode=True` to the helper + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp, decode=True)) + assert result == content + + # non-chunked response, pass `decode=False` to the helper + raw_resp._fp.chunked = False + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp)) + assert result == content_str.decode('utf-8') + + # non-chunked response, pass `decode=True` to the helper + raw_resp._fp.seek(0) + resp = response(status_code=status_code, content=content, raw=raw_resp) + result = next(self.client._stream_helper(resp, decode=True)) + assert result == content + + +class UnixSocketStreamTest(unittest.TestCase): + def setUp(self): + socket_dir = tempfile.mkdtemp() + self.build_context = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, socket_dir) + self.addCleanup(shutil.rmtree, self.build_context) + self.socket_file = os.path.join(socket_dir, 'test_sock.sock') + self.server_socket = self._setup_socket() + self.stop_server = False + server_thread = threading.Thread(target=self.run_server) + server_thread.daemon = True + server_thread.start() + self.response = None + self.request_handler = None + self.addCleanup(server_thread.join) + self.addCleanup(self.stop) + + def stop(self): + self.stop_server = True + + def _setup_socket(self): + server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + server_sock.bind(self.socket_file) + # Non-blocking mode so that we can shut the test down easily + server_sock.setblocking(0) + server_sock.listen(5) + return server_sock + + def run_server(self): + try: + while not self.stop_server: + try: + connection, client_address = self.server_socket.accept() + except OSError: + # Probably no connection to accept yet + time.sleep(0.01) + continue + + connection.setblocking(1) + try: + self.request_handler(connection) + finally: + connection.close() + finally: + self.server_socket.close() + + def early_response_sending_handler(self, connection): + data = b'' + headers = None + + connection.sendall(self.response) + while not headers: + data += connection.recv(2048) + parts = data.split(b'\r\n\r\n', 1) + if len(parts) == 2: + headers, data = parts + + mo = re.search(r'Content-Length: ([0-9]+)', headers.decode()) + assert mo + content_length = int(mo.group(1)) + + while True: + if len(data) >= content_length: + break + + data += connection.recv(2048) + + @pytest.mark.skipif( + docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only' + ) + def test_early_stream_response(self): + self.request_handler = self.early_response_sending_handler + lines = [] + for i in range(0, 50): + line = str(i).encode() + lines += [f'{len(line):x}'.encode(), line] + lines.append(b'0') + lines.append(b'') + + self.response = ( + b'HTTP/1.1 200 OK\r\n' + b'Transfer-Encoding: chunked\r\n' + b'\r\n' + ) + b'\r\n'.join(lines) + + with APIClient( + base_url=f"http+unix://{self.socket_file}", + version=DEFAULT_DOCKER_API_VERSION) as client: + for i in range(5): + try: + stream = client.build( + path=self.build_context, + ) + break + except requests.ConnectionError as e: + if i == 4: + raise e + + assert list(stream) == [ + str(i).encode() for i in range(50) + ] + + +class TCPSocketStreamTest(unittest.TestCase): + stdout_data = b''' + Now, those children out there, they're jumping through the + flames in the hope that the god of the fire will make them fruitful. + Really, you can't blame them. After all, what girl would not prefer the + child of a god to that of some acne-scarred artisan? + ''' + stderr_data = b''' + And what of the true God? To whose glory churches and monasteries have been + built on these islands for generations past? Now shall what of Him? + ''' + + @classmethod + def setup_class(cls): + cls.server = socketserver.ThreadingTCPServer( + ('', 0), cls.get_handler_class()) + cls.thread = threading.Thread(target=cls.server.serve_forever) + cls.thread.daemon = True + cls.thread.start() + cls.address = f'http://{socket.gethostname()}:{cls.server.server_address[1]}' + + @classmethod + def teardown_class(cls): + cls.server.shutdown() + cls.server.server_close() + cls.thread.join() + + @classmethod + def get_handler_class(cls): + stdout_data = cls.stdout_data + stderr_data = cls.stderr_data + + class Handler(http.server.BaseHTTPRequestHandler): + def do_POST(self): + resp_data = self.get_resp_data() + self.send_response(101) + self.send_header( + 'Content-Type', 'application/vnd.docker.raw-stream') + self.send_header('Connection', 'Upgrade') + self.send_header('Upgrade', 'tcp') + self.end_headers() + self.wfile.flush() + time.sleep(0.2) + self.wfile.write(resp_data) + self.wfile.flush() + + def get_resp_data(self): + path = self.path.split('/')[-1] + if path == 'tty': + return stdout_data + stderr_data + elif path == 'no-tty': + data = b'' + data += self.frame_header(1, stdout_data) + data += stdout_data + data += self.frame_header(2, stderr_data) + data += stderr_data + return data + else: + raise Exception(f'Unknown path {path}') + + @staticmethod + def frame_header(stream, data): + return struct.pack('>BxxxL', stream, len(data)) + + return Handler + + def request(self, stream=None, tty=None, demux=None): + assert stream is not None and tty is not None and demux is not None + with APIClient( + base_url=self.address, + version=DEFAULT_DOCKER_API_VERSION + ) as client: + if tty: + url = client._url('/tty') + else: + url = client._url('/no-tty') + resp = client._post(url, stream=True) + return client._read_from_socket( + resp, stream=stream, tty=tty, demux=demux) + + def test_read_from_socket_tty(self): + res = self.request(stream=True, tty=True, demux=False) + assert next(res) == self.stdout_data + self.stderr_data + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_tty_demux(self): + res = self.request(stream=True, tty=True, demux=True) + assert next(res) == (self.stdout_data + self.stderr_data, None) + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_no_tty(self): + res = self.request(stream=True, tty=False, demux=False) + assert next(res) == self.stdout_data + assert next(res) == self.stderr_data + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_no_tty_demux(self): + res = self.request(stream=True, tty=False, demux=True) + assert (self.stdout_data, None) == next(res) + assert (None, self.stderr_data) == next(res) + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_no_stream_tty(self): + res = self.request(stream=False, tty=True, demux=False) + assert res == self.stdout_data + self.stderr_data + + def test_read_from_socket_no_stream_tty_demux(self): + res = self.request(stream=False, tty=True, demux=True) + assert res == (self.stdout_data + self.stderr_data, None) + + def test_read_from_socket_no_stream_no_tty(self): + res = self.request(stream=False, tty=False, demux=False) + assert res == self.stdout_data + self.stderr_data + + def test_read_from_socket_no_stream_no_tty_demux(self): + res = self.request(stream=False, tty=False, demux=True) + assert res == (self.stdout_data, self.stderr_data) + + +class UserAgentTest(unittest.TestCase): + def setUp(self): + self.patcher = mock.patch.object( + APIClient, + 'send', + return_value=fake_resp("GET", f"{fake_api.prefix}/version") + ) + self.mock_send = self.patcher.start() + + def tearDown(self): + self.patcher.stop() + + def test_default_user_agent(self): + client = APIClient(version=DEFAULT_DOCKER_API_VERSION) + client.version() + + assert self.mock_send.call_count == 1 + headers = self.mock_send.call_args[0][0].headers + expected = f'docker-sdk-python/{docker.__version__}' + assert headers['User-Agent'] == expected + + def test_custom_user_agent(self): + client = APIClient( + user_agent='foo/bar', + version=DEFAULT_DOCKER_API_VERSION) + client.version() + + assert self.mock_send.call_count == 1 + headers = self.mock_send.call_args[0][0].headers + assert headers['User-Agent'] == 'foo/bar' + + +class DisableSocketTest(unittest.TestCase): + class DummySocket: + def __init__(self, timeout=60): + self.timeout = timeout + + def settimeout(self, timeout): + self.timeout = timeout + + def gettimeout(self): + return self.timeout + + def setUp(self): + self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION) + + def test_disable_socket_timeout(self): + """Test that the timeout is disabled on a generic socket object.""" + socket = self.DummySocket() + + self.client._disable_socket_timeout(socket) + + assert socket.timeout is None + + def test_disable_socket_timeout2(self): + """Test that the timeouts are disabled on a generic socket object + and it's _sock object if present.""" + socket = self.DummySocket() + socket._sock = self.DummySocket() + + self.client._disable_socket_timeout(socket) + + assert socket.timeout is None + assert socket._sock.timeout is None + + def test_disable_socket_timout_non_blocking(self): + """Test that a non-blocking socket does not get set to blocking.""" + socket = self.DummySocket() + socket._sock = self.DummySocket(0.0) + + self.client._disable_socket_timeout(socket) + + assert socket.timeout is None + assert socket._sock.timeout == 0.0 diff --git a/tests/unit/api_volume_test.py b/tests/unit/api_volume_test.py new file mode 100644 index 0000000000..fd32063966 --- /dev/null +++ b/tests/unit/api_volume_test.py @@ -0,0 +1,115 @@ +import json + +import pytest + +from ..helpers import requires_api_version +from .api_test import BaseAPIClientTest, fake_request, url_prefix + + +class VolumeTest(BaseAPIClientTest): + def test_list_volumes(self): + volumes = self.client.volumes() + assert 'Volumes' in volumes + assert len(volumes['Volumes']) == 2 + args = fake_request.call_args + + assert args[0][0] == 'GET' + assert args[0][1] == f"{url_prefix}volumes" + + def test_list_volumes_and_filters(self): + volumes = self.client.volumes(filters={'dangling': True}) + assert 'Volumes' in volumes + assert len(volumes['Volumes']) == 2 + args = fake_request.call_args + + assert args[0][0] == 'GET' + assert args[0][1] == f"{url_prefix}volumes" + assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'}, + 'timeout': 60} + + def test_create_volume(self): + name = 'perfectcherryblossom' + result = self.client.create_volume(name) + assert 'Name' in result + assert result['Name'] == name + assert 'Driver' in result + assert result['Driver'] == 'local' + args = fake_request.call_args + + assert args[0][0] == 'POST' + assert args[0][1] == f"{url_prefix}volumes/create" + assert json.loads(args[1]['data']) == {'Name': name} + + @requires_api_version('1.23') + def test_create_volume_with_labels(self): + name = 'perfectcherryblossom' + result = self.client.create_volume(name, labels={ + 'com.example.some-label': 'some-value' + }) + assert result["Labels"] == { + 'com.example.some-label': 'some-value' + } + + @requires_api_version('1.23') + def test_create_volume_with_invalid_labels(self): + name = 'perfectcherryblossom' + with pytest.raises(TypeError): + self.client.create_volume(name, labels=1) + + def test_create_volume_with_driver(self): + name = 'perfectcherryblossom' + driver_name = 'sshfs' + self.client.create_volume(name, driver=driver_name) + args = fake_request.call_args + + assert args[0][0] == 'POST' + assert args[0][1] == f"{url_prefix}volumes/create" + data = json.loads(args[1]['data']) + assert 'Driver' in data + assert data['Driver'] == driver_name + + def test_create_volume_invalid_opts_type(self): + with pytest.raises(TypeError): + self.client.create_volume( + 'perfectcherryblossom', driver_opts='hello=world' + ) + + with pytest.raises(TypeError): + self.client.create_volume( + 'perfectcherryblossom', driver_opts=['hello=world'] + ) + + with pytest.raises(TypeError): + self.client.create_volume( + 'perfectcherryblossom', driver_opts='' + ) + + @requires_api_version('1.24') + def test_create_volume_with_no_specified_name(self): + result = self.client.create_volume(name=None) + assert 'Name' in result + assert result['Name'] is not None + assert 'Driver' in result + assert result['Driver'] == 'local' + assert 'Scope' in result + assert result['Scope'] == 'local' + + def test_inspect_volume(self): + name = 'perfectcherryblossom' + result = self.client.inspect_volume(name) + assert 'Name' in result + assert result['Name'] == name + assert 'Driver' in result + assert result['Driver'] == 'local' + args = fake_request.call_args + + assert args[0][0] == 'GET' + assert args[0][1] == f'{url_prefix}volumes/{name}' + + def test_remove_volume(self): + name = 'perfectcherryblossom' + self.client.remove_volume(name) + args = fake_request.call_args + + assert args[0][0] == 'DELETE' + assert args[0][1] == f'{url_prefix}volumes/{name}' diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py new file mode 100644 index 0000000000..b2fedb32e4 --- /dev/null +++ b/tests/unit/auth_test.py @@ -0,0 +1,798 @@ +import base64 +import json +import os +import os.path +import random +import shutil +import tempfile +import unittest +from unittest import mock + +import pytest + +from docker import auth, credentials, errors + + +class RegressionTest(unittest.TestCase): + def test_803_urlsafe_encode(self): + auth_data = { + 'username': 'root', + 'password': 'GR?XGR?XGR?XGR?X' + } + encoded = auth.encode_header(auth_data) + assert b'/' not in encoded + assert b'_' in encoded + + +class ResolveRepositoryNameTest(unittest.TestCase): + def test_resolve_repository_name_hub_library_image(self): + assert auth.resolve_repository_name('image') == ( + 'docker.io', 'image' + ) + + def test_resolve_repository_name_dotted_hub_library_image(self): + assert auth.resolve_repository_name('image.valid') == ( + 'docker.io', 'image.valid' + ) + + def test_resolve_repository_name_hub_image(self): + assert auth.resolve_repository_name('username/image') == ( + 'docker.io', 'username/image' + ) + + def test_explicit_hub_index_library_image(self): + assert auth.resolve_repository_name('docker.io/image') == ( + 'docker.io', 'image' + ) + + def test_explicit_legacy_hub_index_library_image(self): + assert auth.resolve_repository_name('index.docker.io/image') == ( + 'docker.io', 'image' + ) + + def test_resolve_repository_name_private_registry(self): + assert auth.resolve_repository_name('my.registry.net/image') == ( + 'my.registry.net', 'image' + ) + + def test_resolve_repository_name_private_registry_with_port(self): + assert auth.resolve_repository_name('my.registry.net:5000/image') == ( + 'my.registry.net:5000', 'image' + ) + + def test_resolve_repository_name_private_registry_with_username(self): + assert auth.resolve_repository_name( + 'my.registry.net/username/image' + ) == ('my.registry.net', 'username/image') + + def test_resolve_repository_name_no_dots_but_port(self): + assert auth.resolve_repository_name('hostname:5000/image') == ( + 'hostname:5000', 'image' + ) + + def test_resolve_repository_name_no_dots_but_port_and_username(self): + assert auth.resolve_repository_name( + 'hostname:5000/username/image' + ) == ('hostname:5000', 'username/image') + + def test_resolve_repository_name_localhost(self): + assert auth.resolve_repository_name('localhost/image') == ( + 'localhost', 'image' + ) + + def test_resolve_repository_name_localhost_with_username(self): + assert auth.resolve_repository_name('localhost/username/image') == ( + 'localhost', 'username/image' + ) + + def test_invalid_index_name(self): + with pytest.raises(errors.InvalidRepository): + auth.resolve_repository_name('-gecko.com/image') + + +def encode_auth(auth_info): + return base64.b64encode( + auth_info.get('username', '').encode('utf-8') + b':' + + auth_info.get('password', '').encode('utf-8')) + + +class ResolveAuthTest(unittest.TestCase): + index_config = {'auth': encode_auth({'username': 'indexuser'})} + private_config = {'auth': encode_auth({'username': 'privateuser'})} + legacy_config = {'auth': encode_auth({'username': 'legacyauth'})} + + auth_config = auth.AuthConfig({ + 'auths': auth.parse_auth({ + 'https://index.docker.io/v1/': index_config, + 'my.registry.net': private_config, + 'http://legacy.registry.url/v1/': legacy_config, + }) + }) + + def test_resolve_authconfig_hostname_only(self): + assert auth.resolve_authconfig( + self.auth_config, 'my.registry.net' + )['username'] == 'privateuser' + + def test_resolve_authconfig_no_protocol(self): + assert auth.resolve_authconfig( + self.auth_config, 'my.registry.net/v1/' + )['username'] == 'privateuser' + + def test_resolve_authconfig_no_path(self): + assert auth.resolve_authconfig( + self.auth_config, 'http://my.registry.net' + )['username'] == 'privateuser' + + def test_resolve_authconfig_no_path_trailing_slash(self): + assert auth.resolve_authconfig( + self.auth_config, 'http://my.registry.net/' + )['username'] == 'privateuser' + + def test_resolve_authconfig_no_path_wrong_secure_proto(self): + assert auth.resolve_authconfig( + self.auth_config, 'https://my.registry.net' + )['username'] == 'privateuser' + + def test_resolve_authconfig_no_path_wrong_insecure_proto(self): + assert auth.resolve_authconfig( + self.auth_config, 'http://index.docker.io' + )['username'] == 'indexuser' + + def test_resolve_authconfig_path_wrong_proto(self): + assert auth.resolve_authconfig( + self.auth_config, 'https://my.registry.net/v1/' + )['username'] == 'privateuser' + + def test_resolve_authconfig_default_registry(self): + assert auth.resolve_authconfig( + self.auth_config + )['username'] == 'indexuser' + + def test_resolve_authconfig_default_explicit_none(self): + assert auth.resolve_authconfig( + self.auth_config, None + )['username'] == 'indexuser' + + def test_resolve_authconfig_fully_explicit(self): + assert auth.resolve_authconfig( + self.auth_config, 'http://my.registry.net/v1/' + )['username'] == 'privateuser' + + def test_resolve_authconfig_legacy_config(self): + assert auth.resolve_authconfig( + self.auth_config, 'legacy.registry.url' + )['username'] == 'legacyauth' + + def test_resolve_authconfig_no_match(self): + assert auth.resolve_authconfig( + self.auth_config, 'does.not.exist' + ) is None + + def test_resolve_registry_and_auth_library_image(self): + image = 'image' + assert auth.resolve_authconfig( + self.auth_config, auth.resolve_repository_name(image)[0] + )['username'] == 'indexuser' + + def test_resolve_registry_and_auth_hub_image(self): + image = 'username/image' + assert auth.resolve_authconfig( + self.auth_config, auth.resolve_repository_name(image)[0] + )['username'] == 'indexuser' + + def test_resolve_registry_and_auth_explicit_hub(self): + image = 'docker.io/username/image' + assert auth.resolve_authconfig( + self.auth_config, auth.resolve_repository_name(image)[0] + )['username'] == 'indexuser' + + def test_resolve_registry_and_auth_explicit_legacy_hub(self): + image = 'index.docker.io/username/image' + assert auth.resolve_authconfig( + self.auth_config, auth.resolve_repository_name(image)[0] + )['username'] == 'indexuser' + + def test_resolve_registry_and_auth_private_registry(self): + image = 'my.registry.net/image' + assert auth.resolve_authconfig( + self.auth_config, auth.resolve_repository_name(image)[0] + )['username'] == 'privateuser' + + def test_resolve_registry_and_auth_unauthenticated_registry(self): + image = 'other.registry.net/image' + assert auth.resolve_authconfig( + self.auth_config, auth.resolve_repository_name(image)[0] + ) is None + + def test_resolve_auth_with_empty_credstore_and_auth_dict(self): + auth_config = auth.AuthConfig({ + 'auths': auth.parse_auth({ + 'https://index.docker.io/v1/': self.index_config, + }), + 'credsStore': 'blackbox' + }) + with mock.patch( + 'docker.auth.AuthConfig._resolve_authconfig_credstore' + ) as m: + m.return_value = None + assert 'indexuser' == auth.resolve_authconfig( + auth_config, None + )['username'] + + +class LoadConfigTest(unittest.TestCase): + def test_load_config_no_file(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + cfg = auth.load_config(folder) + assert cfg is not None + + def test_load_legacy_config(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + cfg_path = os.path.join(folder, '.dockercfg') + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + with open(cfg_path, 'w') as f: + f.write(f'auth = {auth_}\n') + f.write('email = sakuya@scarlet.net') + + cfg = auth.load_config(cfg_path) + assert auth.resolve_authconfig(cfg) is not None + assert cfg.auths[auth.INDEX_NAME] is not None + cfg = cfg.auths[auth.INDEX_NAME] + assert cfg['username'] == 'sakuya' + assert cfg['password'] == 'izayoi' + assert cfg['email'] == 'sakuya@scarlet.net' + assert cfg.get('Auth') is None + + def test_load_json_config(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + cfg_path = os.path.join(folder, '.dockercfg') + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + email = 'sakuya@scarlet.net' + with open(cfg_path, 'w') as f: + json.dump( + {auth.INDEX_URL: {'auth': auth_, 'email': email}}, f + ) + cfg = auth.load_config(cfg_path) + assert auth.resolve_authconfig(cfg) is not None + assert cfg.auths[auth.INDEX_URL] is not None + cfg = cfg.auths[auth.INDEX_URL] + assert cfg['username'] == 'sakuya' + assert cfg['password'] == 'izayoi' + assert cfg['email'] == email + assert cfg.get('Auth') is None + + def test_load_modern_json_config(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + cfg_path = os.path.join(folder, 'config.json') + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + email = 'sakuya@scarlet.net' + with open(cfg_path, 'w') as f: + json.dump({ + 'auths': { + auth.INDEX_URL: { + 'auth': auth_, 'email': email + } + } + }, f) + cfg = auth.load_config(cfg_path) + assert auth.resolve_authconfig(cfg) is not None + assert cfg.auths[auth.INDEX_URL] is not None + cfg = cfg.auths[auth.INDEX_URL] + assert cfg['username'] == 'sakuya' + assert cfg['password'] == 'izayoi' + assert cfg['email'] == email + + def test_load_config_with_random_name(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + + dockercfg_path = os.path.join( + folder, + f'.{random.randrange(100000)}.dockercfg', + ) + registry = 'https://your.private.registry.io' + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + config = { + registry: { + 'auth': f'{auth_}', + 'email': 'sakuya@scarlet.net' + } + } + + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + cfg = auth.load_config(dockercfg_path).auths + assert registry in cfg + assert cfg[registry] is not None + cfg = cfg[registry] + assert cfg['username'] == 'sakuya' + assert cfg['password'] == 'izayoi' + assert cfg['email'] == 'sakuya@scarlet.net' + assert cfg.get('auth') is None + + def test_load_config_custom_config_env(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + + dockercfg_path = os.path.join(folder, 'config.json') + registry = 'https://your.private.registry.io' + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + config = { + registry: { + 'auth': f'{auth_}', + 'email': 'sakuya@scarlet.net' + } + } + + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): + cfg = auth.load_config(None).auths + assert registry in cfg + assert cfg[registry] is not None + cfg = cfg[registry] + assert cfg['username'] == 'sakuya' + assert cfg['password'] == 'izayoi' + assert cfg['email'] == 'sakuya@scarlet.net' + assert cfg.get('auth') is None + + def test_load_config_custom_config_env_with_auths(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + + dockercfg_path = os.path.join(folder, 'config.json') + registry = 'https://your.private.registry.io' + auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii') + config = { + 'auths': { + registry: { + 'auth': f'{auth_}', + 'email': 'sakuya@scarlet.net' + } + } + } + + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): + cfg = auth.load_config(None) + assert registry in cfg.auths + cfg = cfg.auths[registry] + assert cfg['username'] == 'sakuya' + assert cfg['password'] == 'izayoi' + assert cfg['email'] == 'sakuya@scarlet.net' + assert cfg.get('auth') is None + + def test_load_config_custom_config_env_utf8(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + + dockercfg_path = os.path.join(folder, 'config.json') + registry = 'https://your.private.registry.io' + auth_ = base64.b64encode( + b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii') + config = { + 'auths': { + registry: { + 'auth': f'{auth_}', + 'email': 'sakuya@scarlet.net' + } + } + } + + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): + cfg = auth.load_config(None) + assert registry in cfg.auths + cfg = cfg.auths[registry] + assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8') + assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8') + assert cfg['email'] == 'sakuya@scarlet.net' + assert cfg.get('auth') is None + + def test_load_config_unknown_keys(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + dockercfg_path = os.path.join(folder, 'config.json') + config = { + 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i' + } + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + cfg = auth.load_config(dockercfg_path) + assert dict(cfg) == {'auths': {}} + + def test_load_config_invalid_auth_dict(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + dockercfg_path = os.path.join(folder, 'config.json') + config = { + 'auths': { + 'scarlet.net': {'sakuya': 'izayoi'} + } + } + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + cfg = auth.load_config(dockercfg_path) + assert dict(cfg) == {'auths': {'scarlet.net': {}}} + + def test_load_config_identity_token(self): + folder = tempfile.mkdtemp() + registry = 'scarlet.net' + token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce' + self.addCleanup(shutil.rmtree, folder) + dockercfg_path = os.path.join(folder, 'config.json') + auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii') + config = { + 'auths': { + registry: { + 'auth': auth_entry, + 'identitytoken': token + } + } + } + with open(dockercfg_path, 'w') as f: + json.dump(config, f) + + cfg = auth.load_config(dockercfg_path) + assert registry in cfg.auths + cfg = cfg.auths[registry] + assert 'IdentityToken' in cfg + assert cfg['IdentityToken'] == token + + +class CredstoreTest(unittest.TestCase): + def setUp(self): + self.authconfig = auth.AuthConfig({'credsStore': 'default'}) + self.default_store = InMemoryStore('default') + self.authconfig._stores['default'] = self.default_store + self.default_store.store( + 'https://gensokyo.jp/v2', 'sakuya', 'izayoi', + ) + self.default_store.store( + 'https://default.com/v2', 'user', 'hunter2', + ) + + def test_get_credential_store(self): + auth_config = auth.AuthConfig({ + 'credHelpers': { + 'registry1.io': 'truesecret', + 'registry2.io': 'powerlock' + }, + 'credsStore': 'blackbox', + }) + + assert auth_config.get_credential_store('registry1.io') == 'truesecret' + assert auth_config.get_credential_store('registry2.io') == 'powerlock' + assert auth_config.get_credential_store('registry3.io') == 'blackbox' + + def test_get_credential_store_no_default(self): + auth_config = auth.AuthConfig({ + 'credHelpers': { + 'registry1.io': 'truesecret', + 'registry2.io': 'powerlock' + }, + }) + assert auth_config.get_credential_store('registry2.io') == 'powerlock' + assert auth_config.get_credential_store('registry3.io') is None + + def test_get_credential_store_default_index(self): + auth_config = auth.AuthConfig({ + 'credHelpers': { + 'https://index.docker.io/v1/': 'powerlock' + }, + 'credsStore': 'truesecret' + }) + + assert auth_config.get_credential_store(None) == 'powerlock' + assert auth_config.get_credential_store('docker.io') == 'powerlock' + assert auth_config.get_credential_store('images.io') == 'truesecret' + + def test_get_credential_store_with_plain_dict(self): + auth_config = { + 'credHelpers': { + 'registry1.io': 'truesecret', + 'registry2.io': 'powerlock' + }, + 'credsStore': 'blackbox', + } + + assert auth.get_credential_store( + auth_config, 'registry1.io' + ) == 'truesecret' + assert auth.get_credential_store( + auth_config, 'registry2.io' + ) == 'powerlock' + assert auth.get_credential_store( + auth_config, 'registry3.io' + ) == 'blackbox' + + def test_get_all_credentials_credstore_only(self): + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_with_empty_credhelper(self): + self.authconfig['credHelpers'] = { + 'registry1.io': 'truesecret', + } + self.authconfig._stores['truesecret'] = InMemoryStore() + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'registry1.io': None, + } + + def test_get_all_credentials_with_credhelpers_only(self): + del self.authconfig['credsStore'] + assert self.authconfig.get_all_credentials() == {} + + self.authconfig['credHelpers'] = { + 'https://gensokyo.jp/v2': 'default', + 'https://default.com/v2': 'default', + } + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_with_auths_entries(self): + self.authconfig.add_auth('registry1.io', { + 'ServerAddress': 'registry1.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }) + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'registry1.io': { + 'ServerAddress': 'registry1.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }, + } + + def test_get_all_credentials_with_empty_auths_entry(self): + self.authconfig.add_auth('default.com', {}) + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_credstore_overrides_auth_entry(self): + self.authconfig.add_auth('default.com', { + 'Username': 'shouldnotsee', + 'Password': 'thisentry', + 'ServerAddress': 'https://default.com/v2', + }) + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_helpers_override_default(self): + self.authconfig['credHelpers'] = { + 'https://default.com/v2': 'truesecret', + } + truesecret = InMemoryStore('truesecret') + truesecret.store('https://default.com/v2', 'reimu', 'hakurei') + self.authconfig._stores['truesecret'] = truesecret + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'reimu', + 'Password': 'hakurei', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'reimu', + 'Password': 'hakurei', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_3_sources(self): + self.authconfig['credHelpers'] = { + 'registry1.io': 'truesecret', + } + truesecret = InMemoryStore('truesecret') + truesecret.store('registry1.io', 'reimu', 'hakurei') + self.authconfig._stores['truesecret'] = truesecret + self.authconfig.add_auth('registry2.io', { + 'ServerAddress': 'registry2.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }) + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'gensokyo.jp': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'default.com': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'registry1.io': { + 'ServerAddress': 'registry1.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }, + 'registry2.io': { + 'ServerAddress': 'registry2.io', + 'Username': 'reimu', + 'Password': 'hakurei', + } + } + + +class InMemoryStore(credentials.Store): + def __init__(self, *args, **kwargs): + self.__store = {} + + def get(self, server): + try: + return self.__store[server] + except KeyError as ke: + raise credentials.errors.CredentialsNotFound() from ke + + def store(self, server, username, secret): + self.__store[server] = { + 'ServerURL': server, + 'Username': username, + 'Secret': secret, + } + + def list(self): + return { + k: v['Username'] for k, v in self.__store.items() + } + + def erase(self, server): + del self.__store[server] diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py new file mode 100644 index 0000000000..5ba712d240 --- /dev/null +++ b/tests/unit/client_test.py @@ -0,0 +1,258 @@ +import datetime +import os +import unittest +from unittest import mock + +import pytest + +import docker +from docker.constants import ( + DEFAULT_DOCKER_API_VERSION, + DEFAULT_MAX_POOL_SIZE, + DEFAULT_TIMEOUT_SECONDS, + IS_WINDOWS_PLATFORM, +) +from docker.utils import kwargs_from_env + +from . import fake_api + +TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs') +POOL_SIZE = 20 + + +class ClientTest(unittest.TestCase): + + @mock.patch('docker.api.APIClient.events') + def test_events(self, mock_func): + since = datetime.datetime(2016, 1, 1, 0, 0) + mock_func.return_value = fake_api.get_fake_events()[1] + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + assert client.events(since=since) == mock_func.return_value + mock_func.assert_called_with(since=since) + + @mock.patch('docker.api.APIClient.info') + def test_info(self, mock_func): + mock_func.return_value = fake_api.get_fake_info()[1] + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + assert client.info() == mock_func.return_value + mock_func.assert_called_with() + + @mock.patch('docker.api.APIClient.ping') + def test_ping(self, mock_func): + mock_func.return_value = True + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + assert client.ping() is True + mock_func.assert_called_with() + + @mock.patch('docker.api.APIClient.version') + def test_version(self, mock_func): + mock_func.return_value = fake_api.get_fake_version()[1] + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + assert client.version() == mock_func.return_value + mock_func.assert_called_with() + + def test_call_api_client_method(self): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + with pytest.raises(AttributeError) as cm: + client.create_container() + s = cm.exconly() + assert "'DockerClient' object has no attribute 'create_container'" in s + assert "this method is now on the object APIClient" in s + + with pytest.raises(AttributeError) as cm: + client.abcdef() + s = cm.exconly() + assert "'DockerClient' object has no attribute 'abcdef'" in s + assert "this method is now on the object APIClient" not in s + + def test_call_containers(self): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION, + **kwargs_from_env()) + + with pytest.raises(TypeError) as cm: + client.containers() + + s = cm.exconly() + assert "'ContainerCollection' object is not callable" in s + assert "docker.APIClient" in s + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_default_pool_size_unix(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = f"{client.api.base_url}/v{client.api._version}/_ping" + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_default_pool_size_win(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Unix Connection Pool only on Linux' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_pool_size_unix(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = f"{client.api.base_url}/v{client.api._version}/_ping" + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_pool_size_win(self, mock_obj): + client = docker.DockerClient( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=POOL_SIZE + ) + + +class FromEnvTest(unittest.TestCase): + + def setUp(self): + self.os_environ = os.environ.copy() + + def tearDown(self): + os.environ.clear() + os.environ.update(self.os_environ) + + def test_from_env(self): + """Test that environment variables are passed through to + utils.kwargs_from_env(). KwargsFromEnvTest tests that environment + variables are parsed correctly.""" + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='1') + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + assert client.api.base_url == "https://192.168.59.103:2376" + + def test_from_env_with_version(self): + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='1') + client = docker.from_env(version='2.32') + assert client.api.base_url == "https://192.168.59.103:2376" + assert client.api._version == '2.32' + + def test_from_env_without_version_uses_default(self): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + + assert client.api._version == DEFAULT_DOCKER_API_VERSION + + def test_from_env_without_timeout_uses_default(self): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + + assert client.api.timeout == DEFAULT_TIMEOUT_SECONDS + + @pytest.mark.skipif( + os.environ.get('DOCKER_HOST', '').startswith('tcp://') or IS_WINDOWS_PLATFORM, + reason='Requires a Unix socket' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_default_pool_size_from_env_unix(self, mock_obj): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = f"{client.api.base_url}/v{client.api._version}/_ping" + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_default_pool_size_from_env_win(self, mock_obj): + client = docker.from_env(version=DEFAULT_DOCKER_API_VERSION) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=DEFAULT_MAX_POOL_SIZE + ) + + @pytest.mark.skipif( + os.environ.get('DOCKER_HOST', '').startswith('tcp://') or IS_WINDOWS_PLATFORM, + reason='Requires a Unix socket' + ) + @mock.patch("docker.transport.unixconn.UnixHTTPConnectionPool") + def test_pool_size_from_env_unix(self, mock_obj): + client = docker.from_env( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + base_url = f"{client.api.base_url}/v{client.api._version}/_ping" + + mock_obj.assert_called_once_with(base_url, + "/var/run/docker.sock", + 60, + maxsize=POOL_SIZE + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Npipe Connection Pool only on Windows' + ) + @mock.patch("docker.transport.npipeconn.NpipeHTTPConnectionPool") + def test_pool_size_from_env_win(self, mock_obj): + client = docker.from_env( + version=DEFAULT_DOCKER_API_VERSION, + max_pool_size=POOL_SIZE + ) + mock_obj.return_value.urlopen.return_value.status = 200 + client.ping() + + mock_obj.assert_called_once_with("//./pipe/docker_engine", + 60, + maxsize=POOL_SIZE + ) diff --git a/tests/unit/context_test.py b/tests/unit/context_test.py new file mode 100644 index 0000000000..9e9fc9ba13 --- /dev/null +++ b/tests/unit/context_test.py @@ -0,0 +1,51 @@ +import unittest + +import pytest + +import docker +from docker.constants import DEFAULT_NPIPE, DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM +from docker.context import Context, ContextAPI + + +class BaseContextTest(unittest.TestCase): + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM, reason='Linux specific path check' + ) + def test_url_compatibility_on_linux(self): + c = Context("test") + assert c.Host == DEFAULT_UNIX_SOCKET[5:] + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Windows specific path check' + ) + def test_url_compatibility_on_windows(self): + c = Context("test") + assert c.Host == DEFAULT_NPIPE + + def test_fail_on_default_context_create(self): + with pytest.raises(docker.errors.ContextException): + ContextAPI.create_context("default") + + def test_default_in_context_list(self): + found = False + ctx = ContextAPI.contexts() + for c in ctx: + if c.Name == "default": + found = True + assert found is True + + def test_get_current_context(self): + assert ContextAPI.get_current_context().Name == "default" + + def test_https_host(self): + c = Context("test", host="tcp://testdomain:8080", tls=True) + assert c.Host == "https://testdomain:8080" + + def test_context_inspect_without_params(self): + ctx = ContextAPI.inspect_context() + assert ctx["Name"] == "default" + assert ctx["Metadata"]["StackOrchestrator"] == "swarm" + assert ctx["Endpoints"]["docker"]["Host"] in ( + DEFAULT_NPIPE, + DEFAULT_UNIX_SOCKET[5:], + ) diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py new file mode 100644 index 0000000000..03e7d2eda0 --- /dev/null +++ b/tests/unit/dockertypes_test.py @@ -0,0 +1,493 @@ +import unittest +from unittest import mock + +import pytest + +from docker.constants import DEFAULT_DOCKER_API_VERSION +from docker.errors import InvalidArgument, InvalidVersion +from docker.types import ( + ContainerSpec, + EndpointConfig, + HostConfig, + IPAMConfig, + IPAMPool, + LogConfig, + Mount, + ServiceMode, + Ulimit, +) +from docker.types.services import convert_service_ports + + +def create_host_config(*args, **kwargs): + return HostConfig(*args, **kwargs) + + +class HostConfigTest(unittest.TestCase): + def test_create_host_config_no_options_newer_api_version(self): + config = create_host_config(version='1.21') + assert config['NetworkMode'] == 'default' + + def test_create_host_config_invalid_cpu_cfs_types(self): + with pytest.raises(TypeError): + create_host_config(version='1.21', cpu_quota='0') + + with pytest.raises(TypeError): + create_host_config(version='1.21', cpu_period='0') + + with pytest.raises(TypeError): + create_host_config(version='1.21', cpu_quota=23.11) + + with pytest.raises(TypeError): + create_host_config(version='1.21', cpu_period=1999.0) + + def test_create_host_config_with_cpu_quota(self): + config = create_host_config(version='1.21', cpu_quota=1999) + assert config.get('CpuQuota') == 1999 + + def test_create_host_config_with_cpu_period(self): + config = create_host_config(version='1.21', cpu_period=1999) + assert config.get('CpuPeriod') == 1999 + + def test_create_host_config_with_blkio_constraints(self): + blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}] + config = create_host_config( + version='1.22', blkio_weight=1999, blkio_weight_device=blkio_rate, + device_read_bps=blkio_rate, device_write_bps=blkio_rate, + device_read_iops=blkio_rate, device_write_iops=blkio_rate + ) + + assert config.get('BlkioWeight') == 1999 + assert config.get('BlkioWeightDevice') is blkio_rate + assert config.get('BlkioDeviceReadBps') is blkio_rate + assert config.get('BlkioDeviceWriteBps') is blkio_rate + assert config.get('BlkioDeviceReadIOps') is blkio_rate + assert config.get('BlkioDeviceWriteIOps') is blkio_rate + assert blkio_rate[0]['Path'] == "/dev/sda" + assert blkio_rate[0]['Rate'] == 1000 + + def test_create_host_config_with_shm_size(self): + config = create_host_config(version='1.22', shm_size=67108864) + assert config.get('ShmSize') == 67108864 + + def test_create_host_config_with_shm_size_in_mb(self): + config = create_host_config(version='1.22', shm_size='64M') + assert config.get('ShmSize') == 67108864 + + def test_create_host_config_with_oom_kill_disable(self): + config = create_host_config(version='1.21', oom_kill_disable=True) + assert config.get('OomKillDisable') is True + + def test_create_host_config_with_userns_mode(self): + config = create_host_config(version='1.23', userns_mode='host') + assert config.get('UsernsMode') == 'host' + with pytest.raises(InvalidVersion): + create_host_config(version='1.22', userns_mode='host') + with pytest.raises(ValueError): + create_host_config(version='1.23', userns_mode='host12') + + def test_create_host_config_with_uts(self): + config = create_host_config(version='1.15', uts_mode='host') + assert config.get('UTSMode') == 'host' + with pytest.raises(ValueError): + create_host_config(version='1.15', uts_mode='host12') + + def test_create_host_config_with_oom_score_adj(self): + config = create_host_config(version='1.22', oom_score_adj=100) + assert config.get('OomScoreAdj') == 100 + with pytest.raises(InvalidVersion): + create_host_config(version='1.21', oom_score_adj=100) + with pytest.raises(TypeError): + create_host_config(version='1.22', oom_score_adj='100') + + def test_create_host_config_with_dns_opt(self): + + tested_opts = ['use-vc', 'no-tld-query'] + config = create_host_config(version='1.21', dns_opt=tested_opts) + dns_opts = config.get('DnsOptions') + + assert 'use-vc' in dns_opts + assert 'no-tld-query' in dns_opts + + def test_create_host_config_with_mem_reservation(self): + config = create_host_config(version='1.21', mem_reservation=67108864) + assert config.get('MemoryReservation') == 67108864 + + def test_create_host_config_with_kernel_memory(self): + config = create_host_config(version='1.21', kernel_memory=67108864) + assert config.get('KernelMemory') == 67108864 + + def test_create_host_config_with_pids_limit(self): + config = create_host_config(version='1.23', pids_limit=1024) + assert config.get('PidsLimit') == 1024 + + with pytest.raises(InvalidVersion): + create_host_config(version='1.22', pids_limit=1024) + with pytest.raises(TypeError): + create_host_config(version='1.23', pids_limit='1024') + + def test_create_host_config_with_isolation(self): + config = create_host_config(version='1.24', isolation='hyperv') + assert config.get('Isolation') == 'hyperv' + + with pytest.raises(InvalidVersion): + create_host_config(version='1.23', isolation='hyperv') + with pytest.raises(TypeError): + create_host_config( + version='1.24', isolation={'isolation': 'hyperv'} + ) + + def test_create_host_config_pid_mode(self): + with pytest.raises(ValueError): + create_host_config(version='1.23', pid_mode='baccab125') + + config = create_host_config(version='1.23', pid_mode='host') + assert config.get('PidMode') == 'host' + config = create_host_config(version='1.24', pid_mode='baccab125') + assert config.get('PidMode') == 'baccab125' + + def test_create_host_config_invalid_mem_swappiness(self): + with pytest.raises(TypeError): + create_host_config(version='1.24', mem_swappiness='40') + + def test_create_host_config_with_volume_driver(self): + config = create_host_config(version='1.21', volume_driver='local') + assert config.get('VolumeDriver') == 'local' + + def test_create_host_config_invalid_cpu_count_types(self): + with pytest.raises(TypeError): + create_host_config(version='1.25', cpu_count='1') + + def test_create_host_config_with_cpu_count(self): + config = create_host_config(version='1.25', cpu_count=2) + assert config.get('CpuCount') == 2 + with pytest.raises(InvalidVersion): + create_host_config(version='1.24', cpu_count=1) + + def test_create_host_config_invalid_cpu_percent_types(self): + with pytest.raises(TypeError): + create_host_config(version='1.25', cpu_percent='1') + + def test_create_host_config_with_cpu_percent(self): + config = create_host_config(version='1.25', cpu_percent=15) + assert config.get('CpuPercent') == 15 + with pytest.raises(InvalidVersion): + create_host_config(version='1.24', cpu_percent=10) + + def test_create_host_config_invalid_nano_cpus_types(self): + with pytest.raises(TypeError): + create_host_config(version='1.25', nano_cpus='0') + + def test_create_host_config_with_nano_cpus(self): + config = create_host_config(version='1.25', nano_cpus=1000) + assert config.get('NanoCpus') == 1000 + with pytest.raises(InvalidVersion): + create_host_config(version='1.24', nano_cpus=1) + + def test_create_host_config_with_cpu_rt_period_types(self): + with pytest.raises(TypeError): + create_host_config(version='1.25', cpu_rt_period='1000') + + def test_create_host_config_with_cpu_rt_period(self): + config = create_host_config(version='1.25', cpu_rt_period=1000) + assert config.get('CPURealtimePeriod') == 1000 + with pytest.raises(InvalidVersion): + create_host_config(version='1.24', cpu_rt_period=1000) + + def test_ctrate_host_config_with_cpu_rt_runtime_types(self): + with pytest.raises(TypeError): + create_host_config(version='1.25', cpu_rt_runtime='1000') + + def test_create_host_config_with_cpu_rt_runtime(self): + config = create_host_config(version='1.25', cpu_rt_runtime=1000) + assert config.get('CPURealtimeRuntime') == 1000 + with pytest.raises(InvalidVersion): + create_host_config(version='1.24', cpu_rt_runtime=1000) + + +class ContainerSpecTest(unittest.TestCase): + def test_parse_mounts(self): + spec = ContainerSpec( + image='scratch', mounts=[ + '/local:/container', + '/local2:/container2:ro', + Mount(target='/target', source='/source') + ] + ) + + assert 'Mounts' in spec + assert len(spec['Mounts']) == 3 + for mount in spec['Mounts']: + assert isinstance(mount, Mount) + + +class UlimitTest(unittest.TestCase): + def test_create_host_config_dict_ulimit(self): + ulimit_dct = {'name': 'nofile', 'soft': 8096} + config = create_host_config( + ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION + ) + assert 'Ulimits' in config + assert len(config['Ulimits']) == 1 + ulimit_obj = config['Ulimits'][0] + assert isinstance(ulimit_obj, Ulimit) + assert ulimit_obj.name == ulimit_dct['name'] + assert ulimit_obj.soft == ulimit_dct['soft'] + assert ulimit_obj['Soft'] == ulimit_obj.soft + + def test_create_host_config_dict_ulimit_capitals(self): + ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4} + config = create_host_config( + ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION + ) + assert 'Ulimits' in config + assert len(config['Ulimits']) == 1 + ulimit_obj = config['Ulimits'][0] + assert isinstance(ulimit_obj, Ulimit) + assert ulimit_obj.name == ulimit_dct['Name'] + assert ulimit_obj.soft == ulimit_dct['Soft'] + assert ulimit_obj.hard == ulimit_dct['Hard'] + assert ulimit_obj['Soft'] == ulimit_obj.soft + + def test_create_host_config_obj_ulimit(self): + ulimit_dct = Ulimit(name='nofile', soft=8096) + config = create_host_config( + ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION + ) + assert 'Ulimits' in config + assert len(config['Ulimits']) == 1 + ulimit_obj = config['Ulimits'][0] + assert isinstance(ulimit_obj, Ulimit) + assert ulimit_obj == ulimit_dct + + def test_ulimit_invalid_type(self): + with pytest.raises(ValueError): + Ulimit(name=None) + with pytest.raises(ValueError): + Ulimit(name='hello', soft='123') + with pytest.raises(ValueError): + Ulimit(name='hello', hard='456') + + +class LogConfigTest(unittest.TestCase): + def test_create_host_config_dict_logconfig(self): + dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}} + config = create_host_config( + version=DEFAULT_DOCKER_API_VERSION, log_config=dct + ) + assert 'LogConfig' in config + assert isinstance(config['LogConfig'], LogConfig) + assert dct['type'] == config['LogConfig'].type + + def test_create_host_config_obj_logconfig(self): + obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'}) + config = create_host_config( + version=DEFAULT_DOCKER_API_VERSION, log_config=obj + ) + assert 'LogConfig' in config + assert isinstance(config['LogConfig'], LogConfig) + assert obj == config['LogConfig'] + + def test_logconfig_invalid_config_type(self): + with pytest.raises(ValueError): + LogConfig(type=LogConfig.types.JSON, config='helloworld') + + +class EndpointConfigTest(unittest.TestCase): + def test_create_endpoint_config_with_aliases(self): + config = EndpointConfig(version='1.22', aliases=['foo', 'bar']) + assert config == {'Aliases': ['foo', 'bar']} + + with pytest.raises(InvalidVersion): + EndpointConfig(version='1.21', aliases=['foo', 'bar']) + + +class IPAMConfigTest(unittest.TestCase): + def test_create_ipam_config(self): + ipam_pool = IPAMPool(subnet='192.168.52.0/24', + gateway='192.168.52.254') + + ipam_config = IPAMConfig(pool_configs=[ipam_pool]) + assert ipam_config == { + 'Driver': 'default', + 'Config': [{ + 'Subnet': '192.168.52.0/24', + 'Gateway': '192.168.52.254', + 'AuxiliaryAddresses': None, + 'IPRange': None, + }] + } + + +class ServiceModeTest(unittest.TestCase): + def test_replicated_simple(self): + mode = ServiceMode('replicated') + assert mode == {'replicated': {}} + assert mode.mode == 'replicated' + assert mode.replicas is None + + def test_global_simple(self): + mode = ServiceMode('global') + assert mode == {'global': {}} + assert mode.mode == 'global' + assert mode.replicas is None + + def test_replicated_job_simple(self): + mode = ServiceMode('replicated-job') + assert mode == {'ReplicatedJob': {}} + assert mode.mode == 'ReplicatedJob' + assert mode.replicas is None + + def test_global_job_simple(self): + mode = ServiceMode('global-job') + assert mode == {'GlobalJob': {}} + assert mode.mode == 'GlobalJob' + assert mode.replicas is None + + def test_global_replicas_error(self): + with pytest.raises(InvalidArgument): + ServiceMode('global', 21) + + def test_global_job_replicas_simple(self): + with pytest.raises(InvalidArgument): + ServiceMode('global-job', 21) + + def test_replicated_replicas(self): + mode = ServiceMode('replicated', 21) + assert mode == {'replicated': {'Replicas': 21}} + assert mode.mode == 'replicated' + assert mode.replicas == 21 + + def test_replicated_replicas_0(self): + mode = ServiceMode('replicated', 0) + assert mode == {'replicated': {'Replicas': 0}} + assert mode.mode == 'replicated' + assert mode.replicas == 0 + + def test_invalid_mode(self): + with pytest.raises(InvalidArgument): + ServiceMode('foobar') + + +class MountTest(unittest.TestCase): + def test_parse_mount_string_ro(self): + mount = Mount.parse_mount_string("/foo/bar:/baz:ro") + assert mount['Source'] == "/foo/bar" + assert mount['Target'] == "/baz" + assert mount['ReadOnly'] is True + + def test_parse_mount_string_rw(self): + mount = Mount.parse_mount_string("/foo/bar:/baz:rw") + assert mount['Source'] == "/foo/bar" + assert mount['Target'] == "/baz" + assert not mount['ReadOnly'] + + def test_parse_mount_string_short_form(self): + mount = Mount.parse_mount_string("/foo/bar:/baz") + assert mount['Source'] == "/foo/bar" + assert mount['Target'] == "/baz" + assert not mount['ReadOnly'] + + def test_parse_mount_string_no_source(self): + mount = Mount.parse_mount_string("foo/bar") + assert mount['Source'] is None + assert mount['Target'] == "foo/bar" + assert not mount['ReadOnly'] + + def test_parse_mount_string_invalid(self): + with pytest.raises(InvalidArgument): + Mount.parse_mount_string("foo:bar:baz:rw") + + def test_parse_mount_named_volume(self): + mount = Mount.parse_mount_string("foobar:/baz") + assert mount['Source'] == 'foobar' + assert mount['Target'] == '/baz' + assert mount['Type'] == 'volume' + + def test_parse_mount_bind(self): + mount = Mount.parse_mount_string('/foo/bar:/baz') + assert mount['Source'] == "/foo/bar" + assert mount['Target'] == "/baz" + assert mount['Type'] == 'bind' + + @pytest.mark.xfail + def test_parse_mount_bind_windows(self): + with mock.patch('docker.types.services.IS_WINDOWS_PLATFORM', True): + mount = Mount.parse_mount_string('C:/foo/bar:/baz') + assert mount['Source'] == "C:/foo/bar" + assert mount['Target'] == "/baz" + assert mount['Type'] == 'bind' + + +class ServicePortsTest(unittest.TestCase): + def test_convert_service_ports_simple(self): + ports = {8080: 80} + assert convert_service_ports(ports) == [{ + 'Protocol': 'tcp', + 'PublishedPort': 8080, + 'TargetPort': 80, + }] + + def test_convert_service_ports_with_protocol(self): + ports = {8080: (80, 'udp')} + + assert convert_service_ports(ports) == [{ + 'Protocol': 'udp', + 'PublishedPort': 8080, + 'TargetPort': 80, + }] + + def test_convert_service_ports_with_protocol_and_mode(self): + ports = {8080: (80, 'udp', 'ingress')} + + assert convert_service_ports(ports) == [{ + 'Protocol': 'udp', + 'PublishedPort': 8080, + 'TargetPort': 80, + 'PublishMode': 'ingress', + }] + + def test_convert_service_ports_invalid(self): + ports = {8080: ('way', 'too', 'many', 'items', 'here')} + + with pytest.raises(ValueError): + convert_service_ports(ports) + + def test_convert_service_ports_no_protocol_and_mode(self): + ports = {8080: (80, None, 'host')} + + assert convert_service_ports(ports) == [{ + 'Protocol': 'tcp', + 'PublishedPort': 8080, + 'TargetPort': 80, + 'PublishMode': 'host', + }] + + def test_convert_service_ports_multiple(self): + ports = { + 8080: (80, None, 'host'), + 9999: 99, + 2375: (2375,) + } + + converted_ports = convert_service_ports(ports) + assert { + 'Protocol': 'tcp', + 'PublishedPort': 8080, + 'TargetPort': 80, + 'PublishMode': 'host', + } in converted_ports + + assert { + 'Protocol': 'tcp', + 'PublishedPort': 9999, + 'TargetPort': 99, + } in converted_ports + + assert { + 'Protocol': 'tcp', + 'PublishedPort': 2375, + 'TargetPort': 2375, + } in converted_ports + + assert len(converted_ports) == 3 diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py new file mode 100644 index 0000000000..5ccc40474f --- /dev/null +++ b/tests/unit/errors_test.py @@ -0,0 +1,159 @@ +import unittest + +import requests + +from docker.errors import ( + APIError, + ContainerError, + DockerException, + create_api_error_from_http_exception, + create_unexpected_kwargs_error, +) + +from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID +from .fake_api_client import make_fake_client + + +class APIErrorTest(unittest.TestCase): + def test_api_error_is_caught_by_dockerexception(self): + try: + raise APIError("this should be caught by DockerException") + except DockerException: + pass + + def test_status_code_200(self): + """The status_code property is present with 200 response.""" + resp = requests.Response() + resp.status_code = 200 + err = APIError('', response=resp) + assert err.status_code == 200 + + def test_status_code_400(self): + """The status_code property is present with 400 response.""" + resp = requests.Response() + resp.status_code = 400 + err = APIError('', response=resp) + assert err.status_code == 400 + + def test_status_code_500(self): + """The status_code property is present with 500 response.""" + resp = requests.Response() + resp.status_code = 500 + err = APIError('', response=resp) + assert err.status_code == 500 + + def test_is_server_error_200(self): + """Report not server error on 200 response.""" + resp = requests.Response() + resp.status_code = 200 + err = APIError('', response=resp) + assert err.is_server_error() is False + + def test_is_server_error_300(self): + """Report not server error on 300 response.""" + resp = requests.Response() + resp.status_code = 300 + err = APIError('', response=resp) + assert err.is_server_error() is False + + def test_is_server_error_400(self): + """Report not server error on 400 response.""" + resp = requests.Response() + resp.status_code = 400 + err = APIError('', response=resp) + assert err.is_server_error() is False + + def test_is_server_error_500(self): + """Report server error on 500 response.""" + resp = requests.Response() + resp.status_code = 500 + err = APIError('', response=resp) + assert err.is_server_error() is True + + def test_is_client_error_500(self): + """Report not client error on 500 response.""" + resp = requests.Response() + resp.status_code = 500 + err = APIError('', response=resp) + assert err.is_client_error() is False + + def test_is_client_error_400(self): + """Report client error on 400 response.""" + resp = requests.Response() + resp.status_code = 400 + err = APIError('', response=resp) + assert err.is_client_error() is True + + def test_is_error_300(self): + """Report no error on 300 response.""" + resp = requests.Response() + resp.status_code = 300 + err = APIError('', response=resp) + assert err.is_error() is False + + def test_is_error_400(self): + """Report error on 400 response.""" + resp = requests.Response() + resp.status_code = 400 + err = APIError('', response=resp) + assert err.is_error() is True + + def test_is_error_500(self): + """Report error on 500 response.""" + resp = requests.Response() + resp.status_code = 500 + err = APIError('', response=resp) + assert err.is_error() is True + + def test_create_error_from_exception(self): + resp = requests.Response() + resp.status_code = 500 + err = APIError('') + try: + resp.raise_for_status() + except requests.exceptions.HTTPError as e: + try: + create_api_error_from_http_exception(e) + except APIError as e: + err = e + assert err.is_server_error() is True + + +class ContainerErrorTest(unittest.TestCase): + def test_container_without_stderr(self): + """The massage does not contain stderr""" + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + command = "echo Hello World" + exit_status = 42 + image = FAKE_IMAGE_ID + stderr = None + + err = ContainerError(container, exit_status, command, image, stderr) + msg = ("Command '{}' in image '{}' returned non-zero exit status {}" + ).format(command, image, exit_status) + assert str(err) == msg + + def test_container_with_stderr(self): + """The massage contains stderr""" + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + command = "echo Hello World" + exit_status = 42 + image = FAKE_IMAGE_ID + stderr = "Something went wrong" + + err = ContainerError(container, exit_status, command, image, stderr) + msg = ("Command '{}' in image '{}' returned non-zero exit status {}: " + "{}").format(command, image, exit_status, stderr) + assert str(err) == msg + + +class CreateUnexpectedKwargsErrorTest(unittest.TestCase): + def test_create_unexpected_kwargs_error_single(self): + e = create_unexpected_kwargs_error('f', {'foo': 'bar'}) + assert str(e) == "f() got an unexpected keyword argument 'foo'" + + def test_create_unexpected_kwargs_error_multiple(self): + e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'}) + assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'" diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py new file mode 100644 index 0000000000..03e53cc648 --- /dev/null +++ b/tests/unit/fake_api.py @@ -0,0 +1,642 @@ +from docker import constants + +from . import fake_stat + +CURRENT_VERSION = f'v{constants.DEFAULT_DOCKER_API_VERSION}' + +FAKE_CONTAINER_ID = '81cf499cc928ce3fedc250a080d2b9b978df20e4517304c45211e8a68b33e254' +FAKE_IMAGE_ID = 'sha256:fe7a8fc91d3f17835cbb3b86a1c60287500ab01a53bc79c4497d09f07a3f0688' # noqa: E501 +FAKE_EXEC_ID = 'b098ec855f10434b5c7c973c78484208223a83f663ddaefb0f02a242840cb1c7' +FAKE_NETWORK_ID = '1999cfb42e414483841a125ade3c276c3cb80cb3269b14e339354ac63a31b02c' +FAKE_IMAGE_NAME = 'test_image' +FAKE_TARBALL_PATH = '/path/to/tarball' +FAKE_REPO_NAME = 'repo' +FAKE_TAG_NAME = 'tag' +FAKE_FILE_NAME = 'file' +FAKE_URL = 'myurl' +FAKE_PATH = '/path' +FAKE_VOLUME_NAME = 'perfectcherryblossom' +FAKE_NODE_ID = '24ifsmvkjbyhk' +FAKE_SECRET_ID = 'epdyrw4tsi03xy3deu8g8ly6o' +FAKE_SECRET_NAME = 'super_secret' +FAKE_CONFIG_ID = 'sekvs771242jfdjnvfuds8232' +FAKE_CONFIG_NAME = 'super_config' + +# Each method is prefixed with HTTP method (get, post...) +# for clarity and readability + + +def get_fake_version(): + status_code = 200 + response = { + 'ApiVersion': '1.35', + 'Arch': 'amd64', + 'BuildTime': '2018-01-10T20:09:37.000000000+00:00', + 'Components': [{ + 'Details': { + 'ApiVersion': '1.35', + 'Arch': 'amd64', + 'BuildTime': '2018-01-10T20:09:37.000000000+00:00', + 'Experimental': 'false', + 'GitCommit': '03596f5', + 'GoVersion': 'go1.9.2', + 'KernelVersion': '4.4.0-112-generic', + 'MinAPIVersion': '1.12', + 'Os': 'linux' + }, + 'Name': 'Engine', + 'Version': '18.01.0-ce' + }], + 'GitCommit': '03596f5', + 'GoVersion': 'go1.9.2', + 'KernelVersion': '4.4.0-112-generic', + 'MinAPIVersion': '1.12', + 'Os': 'linux', + 'Platform': {'Name': ''}, + 'Version': '18.01.0-ce' + } + + return status_code, response + + +def get_fake_info(): + status_code = 200 + response = {'Containers': 1, 'Images': 1, 'Debug': False, + 'MemoryLimit': False, 'SwapLimit': False, + 'IPv4Forwarding': True} + return status_code, response + + +def post_fake_auth(): + status_code = 200 + response = {'Status': 'Login Succeeded', + 'IdentityToken': '9cbaf023786cd7'} + return status_code, response + + +def get_fake_ping(): + return 200, "OK" + + +def get_fake_search(): + status_code = 200 + response = [{'Name': 'busybox', 'Description': 'Fake Description'}] + return status_code, response + + +def get_fake_images(): + status_code = 200 + response = [{ + 'Id': FAKE_IMAGE_ID, + 'Created': '2 days ago', + 'Repository': 'busybox', + 'RepoTags': ['busybox:latest', 'busybox:1.0'], + }] + return status_code, response + + +def get_fake_image_history(): + status_code = 200 + response = [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + + return status_code, response + + +def get_fake_containers(): + status_code = 200 + response = [{ + 'Id': FAKE_CONTAINER_ID, + 'Image': 'busybox:latest', + 'Created': '2 days ago', + 'Command': 'true', + 'Status': 'fake status' + }] + return status_code, response + + +def post_fake_start_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_resize_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_create_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def get_fake_inspect_container(tty=False): + status_code = 200 + response = { + 'Id': FAKE_CONTAINER_ID, + 'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty}, + 'ID': FAKE_CONTAINER_ID, + 'Image': 'busybox:latest', + 'Name': 'foobar', + "State": { + "Status": "running", + "Running": True, + "Pid": 0, + "ExitCode": 0, + "StartedAt": "2013-09-25T14:01:18.869545111+02:00", + "Ghost": False + }, + "HostConfig": { + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + }, + "MacAddress": "02:42:ac:11:00:0a" + } + return status_code, response + + +def get_fake_inspect_image(): + status_code = 200 + response = { + 'Id': FAKE_IMAGE_ID, + 'Parent': "27cf784147099545", + 'Created': "2013-03-23T22:24:18.818426-07:00", + 'Container': FAKE_CONTAINER_ID, + 'Config': {'Labels': {'bar': 'foo'}}, + 'ContainerConfig': + { + "Hostname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "AttachStdin": False, + "AttachStdout": False, + "AttachStderr": False, + "PortSpecs": "", + "Tty": True, + "OpenStdin": True, + "StdinOnce": False, + "Env": "", + "Cmd": ["/bin/bash"], + "Dns": "", + "Image": "base", + "Volumes": "", + "VolumesFrom": "", + "WorkingDir": "" + }, + 'Size': 6823592 + } + return status_code, response + + +def get_fake_insert_image(): + status_code = 200 + response = {'StatusCode': 0} + return status_code, response + + +def get_fake_wait(): + status_code = 200 + response = {'StatusCode': 0} + return status_code, response + + +def get_fake_logs(): + status_code = 200 + response = (b'\x01\x00\x00\x00\x00\x00\x00\x00' + b'\x02\x00\x00\x00\x00\x00\x00\x00' + b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n' + b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n') + return status_code, response + + +def get_fake_diff(): + status_code = 200 + response = [{'Path': '/test', 'Kind': 1}] + return status_code, response + + +def get_fake_events(): + status_code = 200 + response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID, + 'from': FAKE_IMAGE_ID, 'time': 1423247867}] + return status_code, response + + +def get_fake_export(): + status_code = 200 + response = 'Byte Stream....' + return status_code, response + + +def post_fake_exec_create(): + status_code = 200 + response = {'Id': FAKE_EXEC_ID} + return status_code, response + + +def post_fake_exec_start(): + status_code = 200 + response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n' + b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n' + b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n') + return status_code, response + + +def post_fake_exec_resize(): + status_code = 201 + return status_code, '' + + +def get_fake_exec_inspect(): + return 200, { + 'OpenStderr': True, + 'OpenStdout': True, + 'Container': get_fake_inspect_container()[1], + 'Running': False, + 'ProcessConfig': { + 'arguments': ['hello world'], + 'tty': False, + 'entrypoint': 'echo', + 'privileged': False, + 'user': '' + }, + 'ExitCode': 0, + 'ID': FAKE_EXEC_ID, + 'OpenStdin': False + } + + +def post_fake_stop_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_kill_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_pause_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_unpause_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_restart_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_rename_container(): + status_code = 204 + return status_code, None + + +def delete_fake_remove_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_image_create(): + status_code = 200 + response = {'Id': FAKE_IMAGE_ID} + return status_code, response + + +def delete_fake_remove_image(): + status_code = 200 + response = {'Id': FAKE_IMAGE_ID} + return status_code, response + + +def get_fake_get_image(): + status_code = 200 + response = 'Byte Stream....' + return status_code, response + + +def post_fake_load_image(): + status_code = 200 + response = {'Id': FAKE_IMAGE_ID} + return status_code, response + + +def post_fake_commit(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_push(): + status_code = 200 + response = {'Id': FAKE_IMAGE_ID} + return status_code, response + + +def post_fake_build_container(): + status_code = 200 + response = {'Id': FAKE_CONTAINER_ID} + return status_code, response + + +def post_fake_tag_image(): + status_code = 200 + response = {'Id': FAKE_IMAGE_ID} + return status_code, response + + +def get_fake_stats(): + status_code = 200 + response = fake_stat.OBJ + return status_code, response + + +def get_fake_top(): + return 200, { + 'Processes': [ + [ + 'root', + '26501', + '6907', + '0', + '10:32', + 'pts/55', + '00:00:00', + 'sleep 60', + ], + ], + 'Titles': [ + 'UID', + 'PID', + 'PPID', + 'C', + 'STIME', + 'TTY', + 'TIME', + 'CMD', + ], + } + + +def get_fake_volume_list(): + status_code = 200 + response = { + 'Volumes': [ + { + 'Name': 'perfectcherryblossom', + 'Driver': 'local', + 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom', + 'Scope': 'local' + }, { + 'Name': 'subterraneananimism', + 'Driver': 'local', + 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism', + 'Scope': 'local' + } + ] + } + return status_code, response + + +def get_fake_volume(): + status_code = 200 + response = { + 'Name': 'perfectcherryblossom', + 'Driver': 'local', + 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom', + 'Labels': { + 'com.example.some-label': 'some-value' + }, + 'Scope': 'local' + } + return status_code, response + + +def fake_remove_volume(): + return 204, None + + +def post_fake_update_container(): + return 200, {'Warnings': []} + + +def post_fake_update_node(): + return 200, None + + +def post_fake_join_swarm(): + return 200, None + + +def get_fake_network_list(): + return 200, [{ + "Name": "bridge", + "Id": FAKE_NETWORK_ID, + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": False, + "Internal": False, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + FAKE_CONTAINER_ID: { + "EndpointID": "ed2419a97c1d99", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }] + + +def get_fake_network(): + return 200, get_fake_network_list()[1][0] + + +def post_fake_network(): + return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []} + + +def delete_fake_network(): + return 204, None + + +def post_fake_network_connect(): + return 200, None + + +def post_fake_network_disconnect(): + return 200, None + + +def post_fake_secret(): + status_code = 200 + response = {'ID': FAKE_SECRET_ID} + return status_code, response + +def post_fake_config(): + status_code = 200 + response = {'ID': FAKE_CONFIG_ID} + return status_code, response + + +# Maps real api url to fake response callback +prefix = 'http+docker://localhost' +if constants.IS_WINDOWS_PLATFORM: + prefix = 'http+docker://localnpipe' + +fake_responses = { + f'{prefix}/version': + get_fake_version, + f'{prefix}/{CURRENT_VERSION}/version': + get_fake_version, + f'{prefix}/{CURRENT_VERSION}/info': + get_fake_info, + f'{prefix}/{CURRENT_VERSION}/auth': + post_fake_auth, + f'{prefix}/{CURRENT_VERSION}/_ping': + get_fake_ping, + f'{prefix}/{CURRENT_VERSION}/images/search': + get_fake_search, + f'{prefix}/{CURRENT_VERSION}/images/json': + get_fake_images, + f'{prefix}/{CURRENT_VERSION}/images/test_image/history': + get_fake_image_history, + f'{prefix}/{CURRENT_VERSION}/containers/json': + get_fake_containers, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/start': + post_fake_start_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/resize': + post_fake_resize_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/json': + get_fake_inspect_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/rename': + post_fake_rename_container, + f'{prefix}/{CURRENT_VERSION}/images/{FAKE_IMAGE_ID}/tag': + post_fake_tag_image, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/wait': + get_fake_wait, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/logs': + get_fake_logs, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/changes': + get_fake_diff, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/export': + get_fake_export, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/update': + post_fake_update_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/exec': + post_fake_exec_create, + f'{prefix}/{CURRENT_VERSION}/exec/{FAKE_EXEC_ID}/start': + post_fake_exec_start, + f'{prefix}/{CURRENT_VERSION}/exec/{FAKE_EXEC_ID}/json': + get_fake_exec_inspect, + f'{prefix}/{CURRENT_VERSION}/exec/{FAKE_EXEC_ID}/resize': + post_fake_exec_resize, + + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/stats': + get_fake_stats, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/top': + get_fake_top, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/stop': + post_fake_stop_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/kill': + post_fake_kill_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/pause': + post_fake_pause_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/unpause': + post_fake_unpause_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}/restart': + post_fake_restart_container, + f'{prefix}/{CURRENT_VERSION}/containers/{FAKE_CONTAINER_ID}': + delete_fake_remove_container, + f'{prefix}/{CURRENT_VERSION}/images/create': + post_fake_image_create, + f'{prefix}/{CURRENT_VERSION}/images/{FAKE_IMAGE_ID}': + delete_fake_remove_image, + f'{prefix}/{CURRENT_VERSION}/images/{FAKE_IMAGE_ID}/get': + get_fake_get_image, + f'{prefix}/{CURRENT_VERSION}/images/load': + post_fake_load_image, + f'{prefix}/{CURRENT_VERSION}/images/test_image/json': + get_fake_inspect_image, + f'{prefix}/{CURRENT_VERSION}/images/test_image/insert': + get_fake_insert_image, + f'{prefix}/{CURRENT_VERSION}/images/test_image/push': + post_fake_push, + f'{prefix}/{CURRENT_VERSION}/commit': + post_fake_commit, + f'{prefix}/{CURRENT_VERSION}/containers/create': + post_fake_create_container, + f'{prefix}/{CURRENT_VERSION}/build': + post_fake_build_container, + f'{prefix}/{CURRENT_VERSION}/events': + get_fake_events, + (f'{prefix}/{CURRENT_VERSION}/volumes', 'GET'): + get_fake_volume_list, + (f'{prefix}/{CURRENT_VERSION}/volumes/create', 'POST'): + get_fake_volume, + (f'{prefix}/{CURRENT_VERSION}/volumes/{FAKE_VOLUME_NAME}', 'GET'): + get_fake_volume, + (f'{prefix}/{CURRENT_VERSION}/volumes/{FAKE_VOLUME_NAME}', 'DELETE'): + fake_remove_volume, + (f'{prefix}/{CURRENT_VERSION}/nodes/{FAKE_NODE_ID}/update?version=1', 'POST'): + post_fake_update_node, + (f'{prefix}/{CURRENT_VERSION}/swarm/join', 'POST'): + post_fake_join_swarm, + (f'{prefix}/{CURRENT_VERSION}/networks', 'GET'): + get_fake_network_list, + (f'{prefix}/{CURRENT_VERSION}/networks/create', 'POST'): + post_fake_network, + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}', 'GET'): + get_fake_network, + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}', 'DELETE'): + delete_fake_network, + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}/connect', 'POST'): + post_fake_network_connect, + (f'{prefix}/{CURRENT_VERSION}/networks/{FAKE_NETWORK_ID}/disconnect', 'POST'): + post_fake_network_disconnect, + f'{prefix}/{CURRENT_VERSION}/secrets/create': + post_fake_secret, + f'{prefix}/{CURRENT_VERSION}/configs/create': + post_fake_config, +} diff --git a/tests/unit/fake_api_client.py b/tests/unit/fake_api_client.py new file mode 100644 index 0000000000..017e99d0c8 --- /dev/null +++ b/tests/unit/fake_api_client.py @@ -0,0 +1,68 @@ +import copy +from unittest import mock + +import docker +from docker.constants import DEFAULT_DOCKER_API_VERSION + +from . import fake_api + + +class CopyReturnMagicMock(mock.MagicMock): + """ + A MagicMock which deep copies every return value. + """ + def _mock_call(self, *args, **kwargs): + ret = super()._mock_call(*args, **kwargs) + if isinstance(ret, (dict, list)): + ret = copy.deepcopy(ret) + return ret + + +def make_fake_api_client(overrides=None): + """ + Returns non-complete fake APIClient. + + This returns most of the default cases correctly, but most arguments that + change behaviour will not work. + """ + + if overrides is None: + overrides = {} + api_client = docker.APIClient(version=DEFAULT_DOCKER_API_VERSION) + mock_attrs = { + 'build.return_value': fake_api.FAKE_IMAGE_ID, + 'commit.return_value': fake_api.post_fake_commit()[1], + 'containers.return_value': fake_api.get_fake_containers()[1], + 'create_container.return_value': + fake_api.post_fake_create_container()[1], + 'create_host_config.side_effect': api_client.create_host_config, + 'create_network.return_value': fake_api.post_fake_network()[1], + 'create_secret.return_value': fake_api.post_fake_secret()[1], + 'create_config.return_value': fake_api.post_fake_config()[1], + 'exec_create.return_value': fake_api.post_fake_exec_create()[1], + 'exec_start.return_value': fake_api.post_fake_exec_start()[1], + 'images.return_value': fake_api.get_fake_images()[1], + 'inspect_container.return_value': + fake_api.get_fake_inspect_container()[1], + 'inspect_image.return_value': fake_api.get_fake_inspect_image()[1], + 'inspect_network.return_value': fake_api.get_fake_network()[1], + 'logs.return_value': [b'hello world\n'], + 'networks.return_value': fake_api.get_fake_network_list()[1], + 'start.return_value': None, + 'wait.return_value': {'StatusCode': 0}, + 'version.return_value': fake_api.get_fake_version() + } + mock_attrs.update(overrides) + mock_client = CopyReturnMagicMock(**mock_attrs) + + mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION + return mock_client + + +def make_fake_client(overrides=None): + """ + Returns a Client with a fake APIClient. + """ + client = docker.DockerClient(version=DEFAULT_DOCKER_API_VERSION) + client.api = make_fake_api_client(overrides) + return client diff --git a/tests/fake_stat.py b/tests/unit/fake_stat.py similarity index 100% rename from tests/fake_stat.py rename to tests/unit/fake_stat.py diff --git a/tests/unit/models_configs_test.py b/tests/unit/models_configs_test.py new file mode 100644 index 0000000000..9f10830687 --- /dev/null +++ b/tests/unit/models_configs_test.py @@ -0,0 +1,11 @@ +import unittest + +from .fake_api import FAKE_CONFIG_NAME +from .fake_api_client import make_fake_client + + +class CreateConfigsTest(unittest.TestCase): + def test_create_config(self): + client = make_fake_client() + config = client.configs.create(name="super_config", data="config") + assert config.__repr__() == f"" diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py new file mode 100644 index 0000000000..0e2ae341a9 --- /dev/null +++ b/tests/unit/models_containers_test.py @@ -0,0 +1,824 @@ +import unittest + +import pytest + +import docker +from docker.constants import DEFAULT_DATA_CHUNK_SIZE, DEFAULT_DOCKER_API_VERSION +from docker.models.containers import Container, _create_container_args +from docker.models.images import Image +from docker.types import EndpointConfig + +from .fake_api import FAKE_CONTAINER_ID, FAKE_EXEC_ID, FAKE_IMAGE_ID +from .fake_api_client import make_fake_client + + +class ContainerCollectionTest(unittest.TestCase): + def test_run(self): + client = make_fake_client() + out = client.containers.run("alpine", "echo hello world") + + assert out == b'hello world\n' + + client.api.create_container.assert_called_with( + image="alpine", + command="echo hello world", + detach=False, + host_config={'NetworkMode': 'default'} + ) + client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID) + client.api.start.assert_called_with(FAKE_CONTAINER_ID) + client.api.wait.assert_called_with(FAKE_CONTAINER_ID) + client.api.logs.assert_called_with( + FAKE_CONTAINER_ID, stderr=False, stdout=True, stream=True, + follow=True + ) + + def test_create_container_args(self): + networking_config = { + 'foo': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + create_kwargs = _create_container_args({ + 'image': 'alpine', + 'command': 'echo hello world', + 'blkio_weight_device': [{'Path': 'foo', 'Weight': 3}], + 'blkio_weight': 2, + 'cap_add': ['foo'], + 'cap_drop': ['bar'], + 'cgroup_parent': 'foobar', + 'cgroupns': 'host', + 'cpu_period': 1, + 'cpu_quota': 2, + 'cpu_shares': 5, + 'cpuset_cpus': '0-3', + 'detach': False, + 'device_read_bps': [{'Path': 'foo', 'Rate': 3}], + 'device_read_iops': [{'Path': 'foo', 'Rate': 3}], + 'device_write_bps': [{'Path': 'foo', 'Rate': 3}], + 'device_write_iops': [{'Path': 'foo', 'Rate': 3}], + 'devices': ['/dev/sda:/dev/xvda:rwm'], + 'dns': ['8.8.8.8'], + 'domainname': 'example.com', + 'dns_opt': ['foo'], + 'dns_search': ['example.com'], + 'entrypoint': '/bin/sh', + 'environment': {'FOO': 'BAR'}, + 'extra_hosts': {'foo': '1.2.3.4'}, + 'group_add': ['blah'], + 'ipc_mode': 'foo', + 'kernel_memory': 123, + 'labels': {'key': 'value'}, + 'links': {'foo': 'bar'}, + 'log_config': {'Type': 'json-file', 'Config': {}}, + 'lxc_conf': {'foo': 'bar'}, + 'healthcheck': {'test': 'true'}, + 'hostname': 'somehost', + 'mac_address': 'abc123', + 'mem_limit': 123, + 'mem_reservation': 123, + 'mem_swappiness': 2, + 'memswap_limit': 456, + 'name': 'somename', + 'network_disabled': False, + 'network': 'foo', + 'networking_config': networking_config, + 'oom_kill_disable': True, + 'oom_score_adj': 5, + 'pid_mode': 'host', + 'pids_limit': 500, + 'platform': 'linux', + 'ports': {1111: 4567, 2222: None}, + 'privileged': True, + 'publish_all_ports': True, + 'read_only': True, + 'restart_policy': {'Name': 'always'}, + 'security_opt': ['blah'], + 'shm_size': 123, + 'stdin_open': True, + 'stop_signal': 9, + 'sysctls': {'foo': 'bar'}, + 'tmpfs': {'/blah': ''}, + 'tty': True, + 'ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}], + 'user': 'bob', + 'userns_mode': 'host', + 'uts_mode': 'host', + 'version': DEFAULT_DOCKER_API_VERSION, + 'volume_driver': 'some_driver', 'volumes': [ + '/home/user1/:/mnt/vol2', + '/var/www:/mnt/vol1:ro', + 'volumename:/mnt/vol3r', + '/volumewithnohostpath', + '/anothervolumewithnohostpath:ro', + 'C:\\windows\\path:D:\\hello\\world:rw' + ], + 'volumes_from': ['container'], + 'working_dir': '/code', + }) + + expected = { + 'image': 'alpine', + 'command': 'echo hello world', + 'domainname': 'example.com', + 'detach': False, + 'entrypoint': '/bin/sh', + 'environment': {'FOO': 'BAR'}, + 'host_config': { + 'Binds': [ + '/home/user1/:/mnt/vol2', + '/var/www:/mnt/vol1:ro', + 'volumename:/mnt/vol3r', + '/volumewithnohostpath', + '/anothervolumewithnohostpath:ro', + 'C:\\windows\\path:D:\\hello\\world:rw' + ], + 'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}], + 'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}], + 'BlkioDeviceWriteBps': [{'Path': 'foo', 'Rate': 3}], + 'BlkioDeviceWriteIOps': [{'Path': 'foo', 'Rate': 3}], + 'BlkioWeightDevice': [{'Path': 'foo', 'Weight': 3}], + 'BlkioWeight': 2, + 'CapAdd': ['foo'], + 'CapDrop': ['bar'], + 'CgroupnsMode': 'host', + 'CgroupParent': 'foobar', + 'CpuPeriod': 1, + 'CpuQuota': 2, + 'CpuShares': 5, + 'CpusetCpus': '0-3', + 'Devices': [ + { + 'PathOnHost': '/dev/sda', + 'CgroupPermissions': 'rwm', + 'PathInContainer': '/dev/xvda', + }, + ], + 'Dns': ['8.8.8.8'], + 'DnsOptions': ['foo'], + 'DnsSearch': ['example.com'], + 'ExtraHosts': ['foo:1.2.3.4'], + 'GroupAdd': ['blah'], + 'IpcMode': 'foo', + 'KernelMemory': 123, + 'Links': ['foo:bar'], + 'LogConfig': {'Type': 'json-file', 'Config': {}}, + 'LxcConf': [{'Key': 'foo', 'Value': 'bar'}], + 'Memory': 123, + 'MemoryReservation': 123, + 'MemorySwap': 456, + 'MemorySwappiness': 2, + 'NetworkMode': 'foo', + 'OomKillDisable': True, + 'OomScoreAdj': 5, + 'PidMode': 'host', + 'PidsLimit': 500, + 'PortBindings': { + '1111/tcp': [{'HostIp': '', 'HostPort': '4567'}], + '2222/tcp': [{'HostIp': '', 'HostPort': ''}] + }, + 'Privileged': True, + 'PublishAllPorts': True, + 'ReadonlyRootfs': True, + 'RestartPolicy': {'Name': 'always'}, + 'SecurityOpt': ['blah'], + 'ShmSize': 123, + 'Sysctls': {'foo': 'bar'}, + 'Tmpfs': {'/blah': ''}, + 'Ulimits': [ + {"Name": "nofile", "Soft": 1024, "Hard": 2048}, + ], + 'UsernsMode': 'host', + 'UTSMode': 'host', + 'VolumeDriver': 'some_driver', + 'VolumesFrom': ['container'], + }, + 'healthcheck': {'test': 'true'}, + 'hostname': 'somehost', + 'labels': {'key': 'value'}, + 'mac_address': 'abc123', + 'name': 'somename', + 'network_disabled': False, + 'networking_config': { + 'EndpointsConfig': { + 'foo': { + 'Aliases': ['test'], + 'DriverOpts': {'key1': 'a'}, + }, + } + }, + 'platform': 'linux', + 'ports': [('1111', 'tcp'), ('2222', 'tcp')], + 'stdin_open': True, + 'stop_signal': 9, + 'tty': True, + 'user': 'bob', + 'volumes': [ + '/mnt/vol2', + '/mnt/vol1', + '/mnt/vol3r', + '/volumewithnohostpath', + '/anothervolumewithnohostpath', + 'D:\\hello\\world' + ], + 'working_dir': '/code', + } + + assert create_kwargs == expected + + def test_run_detach(self): + client = make_fake_client() + container = client.containers.run('alpine', 'sleep 300', detach=True) + assert isinstance(container, Container) + assert container.id == FAKE_CONTAINER_ID + client.api.create_container.assert_called_with( + image='alpine', + command='sleep 300', + detach=True, + host_config={ + 'NetworkMode': 'default', + } + ) + client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID) + client.api.start.assert_called_with(FAKE_CONTAINER_ID) + + def test_run_pull(self): + client = make_fake_client() + + # raise exception on first call, then return normal value + client.api.create_container.side_effect = [ + docker.errors.ImageNotFound(""), + client.api.create_container.return_value + ] + + container = client.containers.run('alpine', 'sleep 300', detach=True) + + assert container.id == FAKE_CONTAINER_ID + client.api.pull.assert_called_with( + 'alpine', platform=None, tag='latest', all_tags=False, stream=True + ) + + def test_run_with_error(self): + client = make_fake_client() + client.api.logs.return_value = "some error" + client.api.wait.return_value = {'StatusCode': 1} + + with pytest.raises(docker.errors.ContainerError) as cm: + client.containers.run('alpine', 'echo hello world') + assert cm.value.exit_status == 1 + assert "some error" in cm.exconly() + + def test_run_with_image_object(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + client.containers.run(image) + client.api.create_container.assert_called_with( + image=image.id, + command=None, + detach=False, + host_config={ + 'NetworkMode': 'default', + } + ) + + def test_run_remove(self): + client = make_fake_client() + client.containers.run("alpine") + client.api.remove_container.assert_not_called() + + client = make_fake_client() + client.api.wait.return_value = {'StatusCode': 1} + with pytest.raises(docker.errors.ContainerError): + client.containers.run("alpine") + client.api.remove_container.assert_not_called() + + client = make_fake_client() + client.containers.run("alpine", remove=True) + client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID) + + client = make_fake_client() + client.api.wait.return_value = {'StatusCode': 1} + with pytest.raises(docker.errors.ContainerError): + client.containers.run("alpine", remove=True) + client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID) + + client = make_fake_client() + client.api._version = '1.24' + with pytest.raises(RuntimeError): + client.containers.run("alpine", detach=True, remove=True) + + client = make_fake_client() + client.api._version = '1.23' + with pytest.raises(RuntimeError): + client.containers.run("alpine", detach=True, remove=True) + + client = make_fake_client() + client.api._version = '1.25' + client.containers.run("alpine", detach=True, remove=True) + client.api.remove_container.assert_not_called() + client.api.create_container.assert_called_with( + command=None, + image='alpine', + detach=True, + host_config={'AutoRemove': True, + 'NetworkMode': 'default'} + ) + + client = make_fake_client() + client.api._version = '1.26' + client.containers.run("alpine", detach=True, remove=True) + client.api.remove_container.assert_not_called() + client.api.create_container.assert_called_with( + command=None, + image='alpine', + detach=True, + host_config={'AutoRemove': True, + 'NetworkMode': 'default'} + ) + + def test_run_platform(self): + client = make_fake_client() + + # raise exception on first call, then return normal value + client.api.create_container.side_effect = [ + docker.errors.ImageNotFound(""), + client.api.create_container.return_value + ] + + client.containers.run(image='alpine', platform='linux/arm64') + + client.api.pull.assert_called_with( + 'alpine', + tag='latest', + all_tags=False, + stream=True, + platform='linux/arm64', + ) + + client.api.create_container.assert_called_with( + detach=False, + platform='linux/arm64', + image='alpine', + command=None, + host_config={'NetworkMode': 'default'}, + ) + + def test_run_networking_config_without_network(self): + client = make_fake_client() + + with pytest.raises(RuntimeError): + client.containers.run( + image='alpine', + networking_config={'aliases': ['test'], + 'driver_opt': {'key1': 'a'}} + ) + + def test_run_networking_config_with_network_mode(self): + client = make_fake_client() + + with pytest.raises(RuntimeError): + client.containers.run( + image='alpine', + network_mode='none', + networking_config={'aliases': ['test'], + 'driver_opt': {'key1': 'a'}} + ) + + def test_run_networking_config(self): + client = make_fake_client() + + networking_config = { + 'foo': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + client.containers.run( + image='alpine', + network='foo', + networking_config=networking_config + ) + + client.api.create_container.assert_called_with( + detach=False, + image='alpine', + command=None, + networking_config={'EndpointsConfig': { + 'foo': {'Aliases': ['test'], 'DriverOpts': {'key1': 'a'}}} + }, + host_config={'NetworkMode': 'foo'} + ) + + def test_run_networking_config_with_undeclared_network(self): + client = make_fake_client() + + networking_config = { + 'foo': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test_foo'], + driver_opt={'key2': 'b'} + ), + 'bar': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + client.containers.run( + image='alpine', + network='foo', + networking_config=networking_config + ) + + client.api.create_container.assert_called_with( + detach=False, + image='alpine', + command=None, + networking_config={'EndpointsConfig': { + 'foo': {'Aliases': ['test_foo'], 'DriverOpts': {'key2': 'b'}}, + 'bar': {'Aliases': ['test'], 'DriverOpts': {'key1': 'a'}}, + }}, + host_config={'NetworkMode': 'foo'} + ) + + def test_run_networking_config_only_undeclared_network(self): + client = make_fake_client() + + networking_config = { + 'bar': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + client.containers.run( + image='alpine', + network='foo', + networking_config=networking_config + ) + + client.api.create_container.assert_called_with( + detach=False, + image='alpine', + command=None, + networking_config={'foo': None}, + host_config={'NetworkMode': 'foo'} + ) + + def test_create(self): + client = make_fake_client() + container = client.containers.create( + 'alpine', + 'echo hello world', + environment={'FOO': 'BAR'} + ) + assert isinstance(container, Container) + assert container.id == FAKE_CONTAINER_ID + client.api.create_container.assert_called_with( + image='alpine', + command='echo hello world', + environment={'FOO': 'BAR'}, + host_config={'NetworkMode': 'default'} + ) + client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID) + + def test_create_with_image_object(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + client.containers.create(image) + client.api.create_container.assert_called_with( + image=image.id, + command=None, + host_config={'NetworkMode': 'default'} + ) + + def test_create_networking_config_without_network(self): + client = make_fake_client() + + client.containers.create( + image='alpine', + networking_config={'aliases': ['test'], + 'driver_opt': {'key1': 'a'}} + ) + + client.api.create_container.assert_called_with( + image='alpine', + command=None, + host_config={'NetworkMode': 'default'} + ) + + def test_create_networking_config_with_network_mode(self): + client = make_fake_client() + + client.containers.create( + image='alpine', + network_mode='none', + networking_config={'aliases': ['test'], + 'driver_opt': {'key1': 'a'}} + ) + + client.api.create_container.assert_called_with( + image='alpine', + command=None, + host_config={'NetworkMode': 'none'} + ) + + def test_create_networking_config(self): + client = make_fake_client() + + networking_config = { + 'foo': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + client.containers.create( + image='alpine', + network='foo', + networking_config=networking_config + ) + + client.api.create_container.assert_called_with( + image='alpine', + command=None, + networking_config={'EndpointsConfig': { + 'foo': {'Aliases': ['test'], 'DriverOpts': {'key1': 'a'}}} + }, + host_config={'NetworkMode': 'foo'} + ) + + def test_create_networking_config_with_undeclared_network(self): + client = make_fake_client() + + networking_config = { + 'foo': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test_foo'], + driver_opt={'key2': 'b'} + ), + 'bar': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + client.containers.create( + image='alpine', + network='foo', + networking_config=networking_config + ) + + client.api.create_container.assert_called_with( + image='alpine', + command=None, + networking_config={'EndpointsConfig': { + 'foo': {'Aliases': ['test_foo'], 'DriverOpts': {'key2': 'b'}}, + 'bar': {'Aliases': ['test'], 'DriverOpts': {'key1': 'a'}}, + }}, + host_config={'NetworkMode': 'foo'} + ) + + def test_create_networking_config_only_undeclared_network(self): + client = make_fake_client() + + networking_config = { + 'bar': EndpointConfig( + DEFAULT_DOCKER_API_VERSION, aliases=['test'], + driver_opt={'key1': 'a'} + ) + } + + client.containers.create( + image='alpine', + network='foo', + networking_config=networking_config + ) + + client.api.create_container.assert_called_with( + image='alpine', + command=None, + networking_config={'foo': None}, + host_config={'NetworkMode': 'foo'} + ) + + def test_get(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + assert isinstance(container, Container) + assert container.id == FAKE_CONTAINER_ID + client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID) + + def test_list(self): + client = make_fake_client() + containers = client.containers.list(all=True) + client.api.containers.assert_called_with( + all=True, + before=None, + filters=None, + limit=-1, + since=None + ) + client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID) + assert len(containers) == 1 + assert isinstance(containers[0], Container) + assert containers[0].id == FAKE_CONTAINER_ID + + def test_list_ignore_removed(self): + def side_effect(*args, **kwargs): + raise docker.errors.NotFound('Container not found') + + client = make_fake_client({ + 'inspect_container.side_effect': side_effect + }) + + with pytest.raises(docker.errors.NotFound): + client.containers.list(all=True, ignore_removed=False) + + assert client.containers.list(all=True, ignore_removed=True) == [] + + +class ContainerTest(unittest.TestCase): + def test_short_id(self): + container = Container(attrs={'Id': '8497fe9244dd45cac543eb3c37d8605077' + '6800eebef1f3ec2ee111e8ccf12db6'}) + assert container.short_id == '8497fe9244dd' + + def test_name(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + assert container.name == 'foobar' + + def test_status(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + assert container.status == "running" + + def test_attach(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.attach(stream=True) + client.api.attach.assert_called_with(FAKE_CONTAINER_ID, stream=True) + + def test_commit(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + image = container.commit() + client.api.commit.assert_called_with(FAKE_CONTAINER_ID, + repository=None, + tag=None) + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_diff(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.diff() + client.api.diff.assert_called_with(FAKE_CONTAINER_ID) + + def test_exec_run(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.exec_run("echo hello world", privileged=True, stream=True) + client.api.exec_create.assert_called_with( + FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True, + stdin=False, tty=False, privileged=True, user='', environment=None, + workdir=None, + ) + client.api.exec_start.assert_called_with( + FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False, + demux=False, + ) + + def test_exec_run_failure(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.exec_run("docker ps", privileged=True, stream=False) + client.api.exec_create.assert_called_with( + FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True, + stdin=False, tty=False, privileged=True, user='', environment=None, + workdir=None, + ) + client.api.exec_start.assert_called_with( + FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False, + demux=False, + ) + + def test_export(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.export() + client.api.export.assert_called_with( + FAKE_CONTAINER_ID, DEFAULT_DATA_CHUNK_SIZE + ) + + def test_get_archive(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.get_archive('foo') + client.api.get_archive.assert_called_with( + FAKE_CONTAINER_ID, 'foo', DEFAULT_DATA_CHUNK_SIZE, False + ) + + def test_image(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + assert container.image.id == FAKE_IMAGE_ID + + def test_kill(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.kill(signal=5) + client.api.kill.assert_called_with(FAKE_CONTAINER_ID, signal=5) + + def test_labels(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + assert container.labels == {'foo': 'bar'} + + def test_logs(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.logs() + client.api.logs.assert_called_with(FAKE_CONTAINER_ID) + + def test_pause(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.pause() + client.api.pause.assert_called_with(FAKE_CONTAINER_ID) + + def test_put_archive(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.put_archive('path', 'foo') + client.api.put_archive.assert_called_with(FAKE_CONTAINER_ID, + 'path', 'foo') + + def test_remove(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.remove() + client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID) + + def test_rename(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.rename("foo") + client.api.rename.assert_called_with(FAKE_CONTAINER_ID, "foo") + + def test_resize(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.resize(1, 2) + client.api.resize.assert_called_with(FAKE_CONTAINER_ID, 1, 2) + + def test_restart(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.restart() + client.api.restart.assert_called_with(FAKE_CONTAINER_ID) + + def test_start(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.start() + client.api.start.assert_called_with(FAKE_CONTAINER_ID) + + def test_stats(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.stats() + client.api.stats.assert_called_with(FAKE_CONTAINER_ID) + + def test_stop(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.stop() + client.api.stop.assert_called_with(FAKE_CONTAINER_ID) + + def test_top(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.top() + client.api.top.assert_called_with(FAKE_CONTAINER_ID) + + def test_unpause(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.unpause() + client.api.unpause.assert_called_with(FAKE_CONTAINER_ID) + + def test_update(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.update(cpu_shares=2) + client.api.update_container.assert_called_with(FAKE_CONTAINER_ID, + cpu_shares=2) + + def test_wait(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.wait() + client.api.wait.assert_called_with(FAKE_CONTAINER_ID) diff --git a/tests/unit/models_images_test.py b/tests/unit/models_images_test.py new file mode 100644 index 0000000000..3478c3fedb --- /dev/null +++ b/tests/unit/models_images_test.py @@ -0,0 +1,175 @@ +import unittest +import warnings + +from docker.constants import DEFAULT_DATA_CHUNK_SIZE +from docker.models.images import Image + +from .fake_api import FAKE_IMAGE_ID +from .fake_api_client import make_fake_client + + +class ImageCollectionTest(unittest.TestCase): + def test_build(self): + client = make_fake_client() + image = client.images.build() + client.api.build.assert_called_with() + client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID) + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_get(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID) + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_labels(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + assert image.labels == {'bar': 'foo'} + + def test_list(self): + client = make_fake_client() + images = client.images.list(all=True) + client.api.images.assert_called_with(all=True, name=None, filters=None) + assert len(images) == 1 + assert isinstance(images[0], Image) + assert images[0].id == FAKE_IMAGE_ID + + def test_load(self): + client = make_fake_client() + client.images.load('byte stream') + client.api.load_image.assert_called_with('byte stream') + + def test_pull(self): + client = make_fake_client() + image = client.images.pull('test_image:test') + client.api.pull.assert_called_with( + 'test_image', tag='test', all_tags=False, stream=True + ) + client.api.inspect_image.assert_called_with('test_image:test') + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_pull_tag_precedence(self): + client = make_fake_client() + image = client.images.pull('test_image:latest', tag='test') + client.api.pull.assert_called_with( + 'test_image', tag='test', all_tags=False, stream=True + ) + client.api.inspect_image.assert_called_with('test_image:test') + + image = client.images.pull('test_image') + client.api.pull.assert_called_with( + 'test_image', tag='latest', all_tags=False, stream=True + ) + client.api.inspect_image.assert_called_with('test_image:latest') + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_pull_multiple(self): + client = make_fake_client() + images = client.images.pull('test_image', all_tags=True) + client.api.pull.assert_called_with( + 'test_image', tag='latest', all_tags=True, stream=True + ) + client.api.images.assert_called_with( + all=False, name='test_image', filters=None + ) + client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID) + assert len(images) == 1 + image = images[0] + assert isinstance(image, Image) + assert image.id == FAKE_IMAGE_ID + + def test_pull_with_stream_param(self): + client = make_fake_client() + with warnings.catch_warnings(record=True) as w: + client.images.pull('test_image', stream=True) + + assert len(w) == 1 + assert str(w[0].message).startswith( + '`stream` is not a valid parameter' + ) + + def test_push(self): + client = make_fake_client() + client.images.push('foobar', insecure_registry=True) + client.api.push.assert_called_with( + 'foobar', + tag=None, + insecure_registry=True + ) + + def test_remove(self): + client = make_fake_client() + client.images.remove('test_image') + client.api.remove_image.assert_called_with('test_image') + + def test_search(self): + client = make_fake_client() + client.images.search('test') + client.api.search.assert_called_with('test') + + def test_search_limit(self): + client = make_fake_client() + client.images.search('test', limit=5) + client.api.search.assert_called_with('test', limit=5) + + +class ImageTest(unittest.TestCase): + def test_short_id(self): + image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675' + 'f69d65a6f73ef6262057ad0a15dcd'}) + assert image.short_id == 'sha256:b6846070672c' + + image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675' + 'f69d65a6f73ef6262057ad0a15dcd'}) + assert image.short_id == 'b6846070672c' + + def test_tags(self): + image = Image(attrs={ + 'RepoTags': ['test_image:latest'] + }) + assert image.tags == ['test_image:latest'] + + image = Image(attrs={ + 'RepoTags': [':'] + }) + assert image.tags == [] + + image = Image(attrs={ + 'RepoTags': None + }) + assert image.tags == [] + + def test_history(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + image.history() + client.api.history.assert_called_with(FAKE_IMAGE_ID) + + def test_remove(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + image.remove() + client.api.remove_image.assert_called_with( + FAKE_IMAGE_ID, + force=False, + noprune=False, + ) + + def test_save(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + image.save() + client.api.get_image.assert_called_with( + FAKE_IMAGE_ID, DEFAULT_DATA_CHUNK_SIZE + ) + + def test_tag(self): + client = make_fake_client() + image = client.images.get(FAKE_IMAGE_ID) + image.tag('foo') + client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None) diff --git a/tests/unit/models_networks_test.py b/tests/unit/models_networks_test.py new file mode 100644 index 0000000000..099fb21936 --- /dev/null +++ b/tests/unit/models_networks_test.py @@ -0,0 +1,64 @@ +import unittest + +from .fake_api import FAKE_CONTAINER_ID, FAKE_NETWORK_ID +from .fake_api_client import make_fake_client + + +class NetworkCollectionTest(unittest.TestCase): + + def test_create(self): + client = make_fake_client() + network = client.networks.create("foobar", labels={'foo': 'bar'}) + assert network.id == FAKE_NETWORK_ID + client.api.inspect_network.assert_called_once_with(FAKE_NETWORK_ID) + client.api.create_network.assert_called_once_with( + "foobar", + labels={'foo': 'bar'} + ) + + def test_get(self): + client = make_fake_client() + network = client.networks.get(FAKE_NETWORK_ID) + assert network.id == FAKE_NETWORK_ID + client.api.inspect_network.assert_called_once_with(FAKE_NETWORK_ID) + + def test_list(self): + client = make_fake_client() + networks = client.networks.list() + assert networks[0].id == FAKE_NETWORK_ID + client.api.networks.assert_called_once_with() + + client = make_fake_client() + client.networks.list(ids=["abc"]) + client.api.networks.assert_called_once_with(ids=["abc"]) + + client = make_fake_client() + client.networks.list(names=["foobar"]) + client.api.networks.assert_called_once_with(names=["foobar"]) + + +class NetworkTest(unittest.TestCase): + + def test_connect(self): + client = make_fake_client() + network = client.networks.get(FAKE_NETWORK_ID) + network.connect(FAKE_CONTAINER_ID) + client.api.connect_container_to_network.assert_called_once_with( + FAKE_CONTAINER_ID, + FAKE_NETWORK_ID + ) + + def test_disconnect(self): + client = make_fake_client() + network = client.networks.get(FAKE_NETWORK_ID) + network.disconnect(FAKE_CONTAINER_ID) + client.api.disconnect_container_from_network.assert_called_once_with( + FAKE_CONTAINER_ID, + FAKE_NETWORK_ID + ) + + def test_remove(self): + client = make_fake_client() + network = client.networks.get(FAKE_NETWORK_ID) + network.remove() + client.api.remove_network.assert_called_once_with(FAKE_NETWORK_ID) diff --git a/tests/unit/models_resources_test.py b/tests/unit/models_resources_test.py new file mode 100644 index 0000000000..11dea29480 --- /dev/null +++ b/tests/unit/models_resources_test.py @@ -0,0 +1,28 @@ +import unittest + +from .fake_api import FAKE_CONTAINER_ID +from .fake_api_client import make_fake_client + + +class ModelTest(unittest.TestCase): + def test_reload(self): + client = make_fake_client() + container = client.containers.get(FAKE_CONTAINER_ID) + container.attrs['Name'] = "oldname" + container.reload() + assert client.api.inspect_container.call_count == 2 + assert container.attrs['Name'] == "foobar" + + def test_hash(self): + client = make_fake_client() + container1 = client.containers.get(FAKE_CONTAINER_ID) + my_set = {container1} + assert len(my_set) == 1 + + container2 = client.containers.get(FAKE_CONTAINER_ID) + my_set.add(container2) + assert len(my_set) == 1 + + image1 = client.images.get(FAKE_CONTAINER_ID) + my_set.add(image1) + assert len(my_set) == 2 diff --git a/tests/unit/models_secrets_test.py b/tests/unit/models_secrets_test.py new file mode 100644 index 0000000000..1f5aaace2a --- /dev/null +++ b/tests/unit/models_secrets_test.py @@ -0,0 +1,11 @@ +import unittest + +from .fake_api import FAKE_SECRET_NAME +from .fake_api_client import make_fake_client + + +class CreateServiceTest(unittest.TestCase): + def test_secrets_repr(self): + client = make_fake_client() + secret = client.secrets.create(name="super_secret", data="secret") + assert secret.__repr__() == f"" diff --git a/tests/unit/models_services_test.py b/tests/unit/models_services_test.py new file mode 100644 index 0000000000..0277563435 --- /dev/null +++ b/tests/unit/models_services_test.py @@ -0,0 +1,65 @@ +import unittest + +from docker.models.services import _get_create_service_kwargs + + +class CreateServiceKwargsTest(unittest.TestCase): + def test_get_create_service_kwargs(self): + kwargs = _get_create_service_kwargs('test', { + 'image': 'foo', + 'command': 'true', + 'name': 'somename', + 'labels': {'key': 'value'}, + 'hostname': 'test_host', + 'mode': 'global', + 'rollback_config': {'rollback': 'config'}, + 'update_config': {'update': 'config'}, + 'networks': ['somenet'], + 'endpoint_spec': {'blah': 'blah'}, + 'container_labels': {'containerkey': 'containervalue'}, + 'resources': {'foo': 'bar'}, + 'restart_policy': {'restart': 'policy'}, + 'log_driver': 'logdriver', + 'log_driver_options': {'foo': 'bar'}, + 'args': ['some', 'args'], + 'env': {'FOO': 'bar'}, + 'workdir': '/', + 'user': 'bob', + 'mounts': [{'some': 'mounts'}], + 'stop_grace_period': 5, + 'constraints': ['foo=bar'], + 'preferences': ['bar=baz'], + 'platforms': [('x86_64', 'linux')], + 'maxreplicas': 1, + 'sysctls': {'foo': 'bar'} + }) + + task_template = kwargs.pop('task_template') + + assert kwargs == { + 'name': 'somename', + 'labels': {'key': 'value'}, + 'mode': 'global', + 'rollback_config': {'rollback': 'config'}, + 'update_config': {'update': 'config'}, + 'endpoint_spec': {'blah': 'blah'}, + } + assert set(task_template.keys()) == { + 'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement', + 'LogDriver', 'Networks' + } + assert task_template['Placement'] == { + 'Constraints': ['foo=bar'], + 'Preferences': ['bar=baz'], + 'Platforms': [{'Architecture': 'x86_64', 'OS': 'linux'}], + 'MaxReplicas': 1, + } + assert task_template['LogDriver'] == { + 'Name': 'logdriver', + 'Options': {'foo': 'bar'} + } + assert task_template['Networks'] == [{'Target': 'somenet'}] + assert set(task_template['ContainerSpec'].keys()) == { + 'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User', + 'Labels', 'Mounts', 'StopGracePeriod', 'Sysctls' + } diff --git a/tests/unit/sshadapter_test.py b/tests/unit/sshadapter_test.py new file mode 100644 index 0000000000..8736662101 --- /dev/null +++ b/tests/unit/sshadapter_test.py @@ -0,0 +1,40 @@ +import unittest + +import docker +from docker.transport.sshconn import SSHSocket + + +class SSHAdapterTest(unittest.TestCase): + @staticmethod + def test_ssh_hostname_prefix_trim(): + conn = docker.transport.SSHHTTPAdapter( + base_url="ssh://user@hostname:1234", shell_out=True) + assert conn.ssh_host == "user@hostname:1234" + + @staticmethod + def test_ssh_parse_url(): + c = SSHSocket(host="user@hostname:1234") + assert c.host == "hostname" + assert c.port == "1234" + assert c.user == "user" + + @staticmethod + def test_ssh_parse_hostname_only(): + c = SSHSocket(host="hostname") + assert c.host == "hostname" + assert c.port is None + assert c.user is None + + @staticmethod + def test_ssh_parse_user_and_hostname(): + c = SSHSocket(host="user@hostname") + assert c.host == "hostname" + assert c.port is None + assert c.user == "user" + + @staticmethod + def test_ssh_parse_hostname_and_port(): + c = SSHSocket(host="hostname:22") + assert c.host == "hostname" + assert c.port == "22" + assert c.user is None diff --git a/tests/unit/swarm_test.py b/tests/unit/swarm_test.py new file mode 100644 index 0000000000..4c0f2fd00c --- /dev/null +++ b/tests/unit/swarm_test.py @@ -0,0 +1,69 @@ +import json + +from ..helpers import requires_api_version +from . import fake_api +from .api_test import BaseAPIClientTest, fake_request, url_prefix + + +class SwarmTest(BaseAPIClientTest): + @requires_api_version('1.24') + def test_node_update(self): + node_spec = { + 'Availability': 'active', + 'Name': 'node-name', + 'Role': 'manager', + 'Labels': {'foo': 'bar'} + } + + self.client.update_node( + node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec + ) + args = fake_request.call_args + assert args[0][1] == ( + f"{url_prefix}nodes/24ifsmvkjbyhk/update?version=1" + ) + assert json.loads(args[1]['data']) == node_spec + assert args[1]['headers']['Content-Type'] == 'application/json' + + @requires_api_version('1.24') + def test_join_swarm(self): + remote_addr = ['1.2.3.4:2377'] + listen_addr = '2.3.4.5:2377' + join_token = 'A_BEAUTIFUL_JOIN_TOKEN' + + data = { + 'RemoteAddrs': remote_addr, + 'ListenAddr': listen_addr, + 'JoinToken': join_token + } + + self.client.join_swarm( + remote_addrs=remote_addr, + listen_addr=listen_addr, + join_token=join_token + ) + + args = fake_request.call_args + + assert (args[0][1] == f"{url_prefix}swarm/join") + assert (json.loads(args[1]['data']) == data) + assert (args[1]['headers']['Content-Type'] == 'application/json') + + @requires_api_version('1.24') + def test_join_swarm_no_listen_address_takes_default(self): + remote_addr = ['1.2.3.4:2377'] + join_token = 'A_BEAUTIFUL_JOIN_TOKEN' + + data = { + 'RemoteAddrs': remote_addr, + 'ListenAddr': '0.0.0.0:2377', + 'JoinToken': join_token + } + + self.client.join_swarm(remote_addrs=remote_addr, join_token=join_token) + + args = fake_request.call_args + + assert (args[0][1] == f"{url_prefix}swarm/join") + assert (json.loads(args[1]['data']) == data) + assert (args[1]['headers']['Content-Type'] == 'application/json') diff --git a/tests/unit/testdata/certs/ca.pem b/tests/unit/testdata/certs/ca.pem new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/testdata/certs/cert.pem b/tests/unit/testdata/certs/cert.pem new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/testdata/certs/key.pem b/tests/unit/testdata/certs/key.pem new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/types_containers_test.py b/tests/unit/types_containers_test.py new file mode 100644 index 0000000000..b0ad0a71ac --- /dev/null +++ b/tests/unit/types_containers_test.py @@ -0,0 +1,6 @@ +from docker.types.containers import ContainerConfig + + +def test_uid_0_is_not_elided(): + x = ContainerConfig(image='i', version='v', command='true', user=0) + assert x['User'] == '0' diff --git a/tests/unit/utils_build_test.py b/tests/unit/utils_build_test.py new file mode 100644 index 0000000000..2089afb49d --- /dev/null +++ b/tests/unit/utils_build_test.py @@ -0,0 +1,538 @@ +import os +import os.path +import shutil +import socket +import tarfile +import tempfile +import unittest + +import pytest + +from docker.constants import IS_WINDOWS_PLATFORM +from docker.utils import exclude_paths, match_tag, tar + +from ..helpers import make_tree + + +def convert_paths(collection): + return set(map(convert_path, collection)) + + +def convert_path(path): + return path.replace('/', os.path.sep) + + +class ExcludePathsTest(unittest.TestCase): + dirs = [ + 'foo', + 'foo/bar', + 'bar', + 'target', + 'target/subdir', + 'subdir', + 'subdir/target', + 'subdir/target/subdir', + 'subdir/subdir2', + 'subdir/subdir2/target', + 'subdir/subdir2/target/subdir' + ] + + files = [ + 'Dockerfile', + 'Dockerfile.alt', + '.dockerignore', + 'a.py', + 'a.go', + 'b.py', + 'cde.py', + 'foo/a.py', + 'foo/b.py', + 'foo/bar/a.py', + 'bar/a.py', + 'foo/Dockerfile3', + 'target/file.txt', + 'target/subdir/file.txt', + 'subdir/file.txt', + 'subdir/target/file.txt', + 'subdir/target/subdir/file.txt', + 'subdir/subdir2/file.txt', + 'subdir/subdir2/target/file.txt', + 'subdir/subdir2/target/subdir/file.txt', + ] + + all_paths = set(dirs + files) + + def setUp(self): + self.base = make_tree(self.dirs, self.files) + + def tearDown(self): + shutil.rmtree(self.base) + + def exclude(self, patterns, dockerfile=None): + return set(exclude_paths(self.base, patterns, dockerfile=dockerfile)) + + def test_no_excludes(self): + assert self.exclude(['']) == convert_paths(self.all_paths) + + def test_no_dupes(self): + paths = exclude_paths(self.base, ['!a.py']) + assert sorted(paths) == sorted(set(paths)) + + def test_wildcard_exclude(self): + assert self.exclude(['*']) == {'Dockerfile', '.dockerignore'} + + def test_exclude_dockerfile_dockerignore(self): + """ + Even if the .dockerignore file explicitly says to exclude + Dockerfile and/or .dockerignore, don't exclude them from + the actual tar file. + """ + assert self.exclude(['Dockerfile', '.dockerignore']) == convert_paths( + self.all_paths + ) + + def test_exclude_custom_dockerfile(self): + """ + If we're using a custom Dockerfile, make sure that's not + excluded. + """ + assert self.exclude(['*'], dockerfile='Dockerfile.alt') == { + 'Dockerfile.alt', '.dockerignore' + } + + assert self.exclude( + ['*'], dockerfile='foo/Dockerfile3' + ) == convert_paths({'foo/Dockerfile3', '.dockerignore'}) + + # https://github.com/docker/docker-py/issues/1956 + assert self.exclude( + ['*'], dockerfile='./foo/Dockerfile3' + ) == convert_paths({'foo/Dockerfile3', '.dockerignore'}) + + def test_exclude_dockerfile_child(self): + includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3') + assert convert_path('foo/Dockerfile3') in includes + assert convert_path('foo/a.py') not in includes + + def test_single_filename(self): + assert self.exclude(['a.py']) == convert_paths( + self.all_paths - {'a.py'} + ) + + def test_single_filename_leading_dot_slash(self): + assert self.exclude(['./a.py']) == convert_paths( + self.all_paths - {'a.py'} + ) + + # As odd as it sounds, a filename pattern with a trailing slash on the + # end *will* result in that file being excluded. + def test_single_filename_trailing_slash(self): + assert self.exclude(['a.py/']) == convert_paths( + self.all_paths - {'a.py'} + ) + + def test_wildcard_filename_start(self): + assert self.exclude(['*.py']) == convert_paths( + self.all_paths - {'a.py', 'b.py', 'cde.py'} + ) + + def test_wildcard_with_exception(self): + assert self.exclude(['*.py', '!b.py']) == convert_paths( + self.all_paths - {'a.py', 'cde.py'} + ) + + def test_wildcard_with_wildcard_exception(self): + assert self.exclude(['*.*', '!*.go']) == convert_paths( + self.all_paths - { + 'a.py', 'b.py', 'cde.py', 'Dockerfile.alt', + } + ) + + def test_wildcard_filename_end(self): + assert self.exclude(['a.*']) == convert_paths( + self.all_paths - {'a.py', 'a.go'} + ) + + def test_question_mark(self): + assert self.exclude(['?.py']) == convert_paths( + self.all_paths - {'a.py', 'b.py'} + ) + + def test_single_subdir_single_filename(self): + assert self.exclude(['foo/a.py']) == convert_paths( + self.all_paths - {'foo/a.py'} + ) + + def test_single_subdir_single_filename_leading_slash(self): + assert self.exclude(['/foo/a.py']) == convert_paths( + self.all_paths - {'foo/a.py'} + ) + + def test_exclude_include_absolute_path(self): + base = make_tree([], ['a.py', 'b.py']) + assert exclude_paths( + base, + ['/*', '!/*.py'] + ) == {'a.py', 'b.py'} + + def test_single_subdir_with_path_traversal(self): + assert self.exclude(['foo/whoops/../a.py']) == convert_paths( + self.all_paths - {'foo/a.py'} + ) + + def test_single_subdir_wildcard_filename(self): + assert self.exclude(['foo/*.py']) == convert_paths( + self.all_paths - {'foo/a.py', 'foo/b.py'} + ) + + def test_wildcard_subdir_single_filename(self): + assert self.exclude(['*/a.py']) == convert_paths( + self.all_paths - {'foo/a.py', 'bar/a.py'} + ) + + def test_wildcard_subdir_wildcard_filename(self): + assert self.exclude(['*/*.py']) == convert_paths( + self.all_paths - {'foo/a.py', 'foo/b.py', 'bar/a.py'} + ) + + def test_directory(self): + assert self.exclude(['foo']) == convert_paths( + self.all_paths - { + 'foo', 'foo/a.py', 'foo/b.py', 'foo/bar', 'foo/bar/a.py', + 'foo/Dockerfile3' + } + ) + + def test_directory_with_trailing_slash(self): + assert self.exclude(['foo']) == convert_paths( + self.all_paths - { + 'foo', 'foo/a.py', 'foo/b.py', + 'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3' + } + ) + + def test_directory_with_single_exception(self): + assert self.exclude(['foo', '!foo/bar/a.py']) == convert_paths( + self.all_paths - { + 'foo/a.py', 'foo/b.py', 'foo', 'foo/bar', + 'foo/Dockerfile3' + } + ) + + def test_directory_with_subdir_exception(self): + assert self.exclude(['foo', '!foo/bar']) == convert_paths( + self.all_paths - { + 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3' + } + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows' + ) + def test_directory_with_subdir_exception_win32_pathsep(self): + assert self.exclude(['foo', '!foo\\bar']) == convert_paths( + self.all_paths - { + 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3' + } + ) + + def test_directory_with_wildcard_exception(self): + assert self.exclude(['foo', '!foo/*.py']) == convert_paths( + self.all_paths - { + 'foo/bar', 'foo/bar/a.py', 'foo', 'foo/Dockerfile3' + } + ) + + def test_subdirectory(self): + assert self.exclude(['foo/bar']) == convert_paths( + self.all_paths - {'foo/bar', 'foo/bar/a.py'} + ) + + @pytest.mark.skipif( + not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows' + ) + def test_subdirectory_win32_pathsep(self): + assert self.exclude(['foo\\bar']) == convert_paths( + self.all_paths - {'foo/bar', 'foo/bar/a.py'} + ) + + def test_double_wildcard(self): + assert self.exclude(['**/a.py']) == convert_paths( + self.all_paths - { + 'a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py' + } + ) + + assert self.exclude(['foo/**/bar']) == convert_paths( + self.all_paths - {'foo/bar', 'foo/bar/a.py'} + ) + + def test_single_and_double_wildcard(self): + assert self.exclude(['**/target/*/*']) == convert_paths( + self.all_paths - { + 'target/subdir/file.txt', + 'subdir/target/subdir/file.txt', + 'subdir/subdir2/target/subdir/file.txt' + } + ) + + def test_trailing_double_wildcard(self): + assert self.exclude(['subdir/**']) == convert_paths( + self.all_paths - { + 'subdir/file.txt', + 'subdir/target/file.txt', + 'subdir/target/subdir/file.txt', + 'subdir/subdir2/file.txt', + 'subdir/subdir2/target/file.txt', + 'subdir/subdir2/target/subdir/file.txt', + 'subdir/target', + 'subdir/target/subdir', + 'subdir/subdir2', + 'subdir/subdir2/target', + 'subdir/subdir2/target/subdir' + } + ) + + def test_double_wildcard_with_exception(self): + assert self.exclude(['**', '!bar', '!foo/bar']) == convert_paths( + { + 'foo/bar', 'foo/bar/a.py', 'bar', 'bar/a.py', 'Dockerfile', + '.dockerignore', + } + ) + + def test_include_wildcard(self): + # This may be surprising but it matches the CLI's behavior + # (tested with 18.05.0-ce on linux) + base = make_tree(['a'], ['a/b.py']) + assert exclude_paths( + base, + ['*', '!*/b.py'] + ) == set() + + def test_last_line_precedence(self): + base = make_tree( + [], + ['garbage.md', + 'trash.md', + 'README.md', + 'README-bis.md', + 'README-secret.md']) + assert exclude_paths( + base, + ['*.md', '!README*.md', 'README-secret.md'] + ) == {'README.md', 'README-bis.md'} + + def test_parent_directory(self): + base = make_tree( + [], + ['a.py', + 'b.py', + 'c.py']) + # Dockerignore reference stipulates that absolute paths are + # equivalent to relative paths, hence /../foo should be + # equivalent to ../foo. It also stipulates that paths are run + # through Go's filepath.Clean, which explicitly "replace + # "/.." by "/" at the beginning of a path". + assert exclude_paths( + base, + ['../a.py', '/../b.py'] + ) == {'c.py'} + + +class TarTest(unittest.TestCase): + def test_tar_with_excludes(self): + dirs = [ + 'foo', + 'foo/bar', + 'bar', + ] + + files = [ + 'Dockerfile', + 'Dockerfile.alt', + '.dockerignore', + 'a.py', + 'a.go', + 'b.py', + 'cde.py', + 'foo/a.py', + 'foo/b.py', + 'foo/bar/a.py', + 'bar/a.py', + ] + + exclude = [ + '*.py', + '!b.py', + '!a.go', + 'foo', + 'Dockerfile*', + '.dockerignore', + ] + + expected_names = { + 'Dockerfile', + '.dockerignore', + 'a.go', + 'b.py', + 'bar', + 'bar/a.py', + } + + base = make_tree(dirs, files) + self.addCleanup(shutil.rmtree, base) + + with tar(base, exclude=exclude) as archive: + tar_data = tarfile.open(fileobj=archive) + assert sorted(tar_data.getnames()) == sorted(expected_names) + + def test_tar_with_empty_directory(self): + base = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base) + for d in ['foo', 'bar']: + os.makedirs(os.path.join(base, d)) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + assert sorted(tar_data.getnames()) == ['bar', 'foo'] + + @pytest.mark.skipif( + IS_WINDOWS_PLATFORM or os.geteuid() == 0, + reason='root user always has access ; no chmod on Windows' + ) + def test_tar_with_inaccessible_file(self): + base = tempfile.mkdtemp() + full_path = os.path.join(base, 'foo') + self.addCleanup(shutil.rmtree, base) + with open(full_path, 'w') as f: + f.write('content') + os.chmod(full_path, 0o222) + with pytest.raises(IOError) as ei: + tar(base) + + assert f'Can not read file in context: {full_path}' in ( + ei.exconly() + ) + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows') + def test_tar_with_file_symlinks(self): + base = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base) + with open(os.path.join(base, 'foo'), 'w') as f: + f.write("content") + os.makedirs(os.path.join(base, 'bar')) + os.symlink('../foo', os.path.join(base, 'bar/foo')) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo'] + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows') + def test_tar_with_directory_symlinks(self): + base = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base) + for d in ['foo', 'bar']: + os.makedirs(os.path.join(base, d)) + os.symlink('../foo', os.path.join(base, 'bar/foo')) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo'] + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows') + def test_tar_with_broken_symlinks(self): + base = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base) + for d in ['foo', 'bar']: + os.makedirs(os.path.join(base, d)) + + os.symlink('../baz', os.path.join(base, 'bar/foo')) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo'] + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32') + def test_tar_socket_file(self): + base = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, base) + for d in ['foo', 'bar']: + os.makedirs(os.path.join(base, d)) + sock = socket.socket(socket.AF_UNIX) + self.addCleanup(sock.close) + sock.bind(os.path.join(base, 'test.sock')) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + assert sorted(tar_data.getnames()) == ['bar', 'foo'] + + def tar_test_negative_mtime_bug(self): + base = tempfile.mkdtemp() + filename = os.path.join(base, 'th.txt') + self.addCleanup(shutil.rmtree, base) + with open(filename, 'w') as f: + f.write('Invisible Full Moon') + os.utime(filename, (12345, -3600.0)) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + assert tar_data.getnames() == ['th.txt'] + assert tar_data.getmember('th.txt').mtime == -3600 + + @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows') + def test_tar_directory_link(self): + dirs = ['a', 'b', 'a/c'] + files = ['a/hello.py', 'b/utils.py', 'a/c/descend.py'] + base = make_tree(dirs, files) + self.addCleanup(shutil.rmtree, base) + os.symlink(os.path.join(base, 'b'), os.path.join(base, 'a/c/b')) + with tar(base) as archive: + tar_data = tarfile.open(fileobj=archive) + names = tar_data.getnames() + for member in dirs + files: + assert member in names + assert 'a/c/b' in names + assert 'a/c/b/utils.py' not in names + + +# selected test cases from https://github.com/distribution/reference/blob/8507c7fcf0da9f570540c958ea7b972c30eeaeca/reference_test.go#L13-L328 +@pytest.mark.parametrize("tag,expected", [ + ("test_com", True), + ("test.com:tag", True), + # N.B. this implicitly means "docker.io/library/test.com:5000" + # i.e. the `5000` is a tag, not a port here! + ("test.com:5000", True), + ("test.com/repo:tag", True), + ("test:5000/repo", True), + ("test:5000/repo:tag", True), + ("test:5000/repo", True), + ("", False), + (":justtag", False), + ("Uppercase:tag", False), + ("test:5000/Uppercase/lowercase:tag", False), + ("lowercase:Uppercase", True), + # length limits not enforced + pytest.param("a/"*128 + "a:tag", False, marks=pytest.mark.xfail), + ("a/"*127 + "a:tag-puts-this-over-max", True), + ("aa/asdf$$^/aa", False), + ("sub-dom1.foo.com/bar/baz/quux", True), + ("sub-dom1.foo.com/bar/baz/quux:some-long-tag", True), + ("b.gcr.io/test.example.com/my-app:test.example.com", True), + ("xn--n3h.com/myimage:xn--n3h.com", True), + ("foo_bar.com:8080", True), + ("foo/foo_bar.com:8080", True), + ("192.168.1.1", True), + ("192.168.1.1:tag", True), + ("192.168.1.1:5000", True), + ("192.168.1.1/repo", True), + ("192.168.1.1:5000/repo", True), + ("192.168.1.1:5000/repo:5050", True), + # regex does not properly handle ipv6 + pytest.param("[2001:db8::1]", False, marks=pytest.mark.xfail), + ("[2001:db8::1]:5000", False), + pytest.param("[2001:db8::1]/repo", True, marks=pytest.mark.xfail), + pytest.param("[2001:db8:1:2:3:4:5:6]/repo:tag", True, marks=pytest.mark.xfail), + pytest.param("[2001:db8::1]:5000/repo", True, marks=pytest.mark.xfail), + pytest.param("[2001:db8::1]:5000/repo:tag", True, marks=pytest.mark.xfail), + pytest.param("[2001:db8::]:5000/repo", True, marks=pytest.mark.xfail), + pytest.param("[::1]:5000/repo", True, marks=pytest.mark.xfail), + ("[fe80::1%eth0]:5000/repo", False), + ("[fe80::1%@invalidzone]:5000/repo", False), +]) +def test_match_tag(tag: str, expected: bool): + assert match_tag(tag) == expected diff --git a/tests/unit/utils_config_test.py b/tests/unit/utils_config_test.py new file mode 100644 index 0000000000..c87231a99f --- /dev/null +++ b/tests/unit/utils_config_test.py @@ -0,0 +1,119 @@ +import json +import os +import shutil +import tempfile +import unittest +from unittest import mock + +from pytest import fixture, mark + +from docker.utils import config + + +class FindConfigFileTest(unittest.TestCase): + + @fixture(autouse=True) + def tmpdir(self, tmpdir): + self.mkdir = tmpdir.mkdir + + def test_find_config_fallback(self): + tmpdir = self.mkdir('test_find_config_fallback') + + with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}): + assert config.find_config_file() is None + + def test_find_config_from_explicit_path(self): + tmpdir = self.mkdir('test_find_config_from_explicit_path') + config_path = tmpdir.ensure('my-config-file.json') + + assert config.find_config_file(str(config_path)) == str(config_path) + + def test_find_config_from_environment(self): + tmpdir = self.mkdir('test_find_config_from_environment') + config_path = tmpdir.ensure('config.json') + + with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}): + assert config.find_config_file() == str(config_path) + + @mark.skipif("sys.platform == 'win32'") + def test_find_config_from_home_posix(self): + tmpdir = self.mkdir('test_find_config_from_home_posix') + config_path = tmpdir.ensure('.docker', 'config.json') + + with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}): + assert config.find_config_file() == str(config_path) + + @mark.skipif("sys.platform == 'win32'") + def test_find_config_from_home_legacy_name(self): + tmpdir = self.mkdir('test_find_config_from_home_legacy_name') + config_path = tmpdir.ensure('.dockercfg') + + with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}): + assert config.find_config_file() == str(config_path) + + @mark.skipif("sys.platform != 'win32'") + def test_find_config_from_home_windows(self): + tmpdir = self.mkdir('test_find_config_from_home_windows') + config_path = tmpdir.ensure('.docker', 'config.json') + + with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}): + assert config.find_config_file() == str(config_path) + + +class LoadConfigTest(unittest.TestCase): + def test_load_config_no_file(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + cfg = config.load_general_config(folder) + assert cfg is not None + assert isinstance(cfg, dict) + assert not cfg + + def test_load_config_custom_headers(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + + dockercfg_path = os.path.join(folder, 'config.json') + config_data = { + 'HttpHeaders': { + 'Name': 'Spike', + 'Surname': 'Spiegel' + }, + } + + with open(dockercfg_path, 'w') as f: + json.dump(config_data, f) + + cfg = config.load_general_config(dockercfg_path) + assert 'HttpHeaders' in cfg + assert cfg['HttpHeaders'] == { + 'Name': 'Spike', + 'Surname': 'Spiegel' + } + + def test_load_config_detach_keys(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + dockercfg_path = os.path.join(folder, 'config.json') + config_data = { + 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i' + } + with open(dockercfg_path, 'w') as f: + json.dump(config_data, f) + + cfg = config.load_general_config(dockercfg_path) + assert cfg == config_data + + def test_load_config_from_env(self): + folder = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, folder) + dockercfg_path = os.path.join(folder, 'config.json') + config_data = { + 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i' + } + with open(dockercfg_path, 'w') as f: + json.dump(config_data, f) + + with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): + cfg = config.load_general_config(None) + assert cfg == config_data diff --git a/tests/unit/utils_json_stream_test.py b/tests/unit/utils_json_stream_test.py new file mode 100644 index 0000000000..5a8310cb52 --- /dev/null +++ b/tests/unit/utils_json_stream_test.py @@ -0,0 +1,58 @@ +from docker.utils.json_stream import json_splitter, json_stream, stream_as_text + + +class TestJsonSplitter: + + def test_json_splitter_no_object(self): + data = '{"foo": "bar' + assert json_splitter(data) is None + + def test_json_splitter_with_object(self): + data = '{"foo": "bar"}\n \n{"next": "obj"}' + assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}') + + def test_json_splitter_leading_whitespace(self): + data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}' + assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}') + + +class TestStreamAsText: + + def test_stream_with_non_utf_unicode_character(self): + stream = [b'\xed\xf3\xf3'] + output, = stream_as_text(stream) + assert output == '���' + + def test_stream_with_utf_character(self): + stream = ['ěĝ'.encode()] + output, = stream_as_text(stream) + assert output == 'ěĝ' + + +class TestJsonStream: + + def test_with_falsy_entries(self): + stream = [ + '{"one": "two"}\n{}\n', + "[1, 2, 3]\n[]\n", + ] + output = list(json_stream(stream)) + assert output == [ + {'one': 'two'}, + {}, + [1, 2, 3], + [], + ] + + def test_with_leading_whitespace(self): + stream = [ + '\n \r\n {"one": "two"}{"x": 1}', + ' {"three": "four"}\t\t{"x": 2}' + ] + output = list(json_stream(stream)) + assert output == [ + {'one': 'two'}, + {'x': 1}, + {'three': 'four'}, + {'x': 2} + ] diff --git a/tests/unit/utils_proxy_test.py b/tests/unit/utils_proxy_test.py new file mode 100644 index 0000000000..2da60401d6 --- /dev/null +++ b/tests/unit/utils_proxy_test.py @@ -0,0 +1,81 @@ +import unittest + +from docker.utils.proxy import ProxyConfig + +HTTP = 'http://test:80' +HTTPS = 'https://test:443' +FTP = 'ftp://user:password@host:23' +NO_PROXY = 'localhost,.localdomain' +CONFIG = ProxyConfig(http=HTTP, https=HTTPS, ftp=FTP, no_proxy=NO_PROXY) +ENV = { + 'http_proxy': HTTP, + 'HTTP_PROXY': HTTP, + 'https_proxy': HTTPS, + 'HTTPS_PROXY': HTTPS, + 'ftp_proxy': FTP, + 'FTP_PROXY': FTP, + 'no_proxy': NO_PROXY, + 'NO_PROXY': NO_PROXY, +} + + +class ProxyConfigTest(unittest.TestCase): + + def test_from_dict(self): + config = ProxyConfig.from_dict({ + 'httpProxy': HTTP, + 'httpsProxy': HTTPS, + 'ftpProxy': FTP, + 'noProxy': NO_PROXY + }) + self.assertEqual(CONFIG.http, config.http) + self.assertEqual(CONFIG.https, config.https) + self.assertEqual(CONFIG.ftp, config.ftp) + self.assertEqual(CONFIG.no_proxy, config.no_proxy) + + def test_new(self): + config = ProxyConfig() + self.assertIsNone(config.http) + self.assertIsNone(config.https) + self.assertIsNone(config.ftp) + self.assertIsNone(config.no_proxy) + + config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d') + self.assertEqual(config.http, 'a') + self.assertEqual(config.https, 'b') + self.assertEqual(config.ftp, 'c') + self.assertEqual(config.no_proxy, 'd') + + def test_truthiness(self): + assert not ProxyConfig() + assert ProxyConfig(http='non-zero') + assert ProxyConfig(https='non-zero') + assert ProxyConfig(ftp='non-zero') + assert ProxyConfig(no_proxy='non-zero') + + def test_environment(self): + self.assertDictEqual(CONFIG.get_environment(), ENV) + empty = ProxyConfig() + self.assertDictEqual(empty.get_environment(), {}) + + def test_inject_proxy_environment(self): + # Proxy config is non null, env is None. + self.assertSetEqual( + set(CONFIG.inject_proxy_environment(None)), + {f'{k}={v}' for k, v in ENV.items()}) + + # Proxy config is null, env is None. + self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None) + + env = ['FOO=BAR', 'BAR=BAZ'] + + # Proxy config is non null, env is non null + actual = CONFIG.inject_proxy_environment(env) + expected = [f'{k}={v}' for k, v in ENV.items()] + env + # It's important that the first 8 variables are the ones from the proxy + # config, and the last 2 are the ones from the input environment + self.assertSetEqual(set(actual[:8]), set(expected[:8])) + self.assertSetEqual(set(actual[-2:]), set(expected[-2:])) + + # Proxy is null, and is non null + self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env) diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py new file mode 100644 index 0000000000..21da0b58e8 --- /dev/null +++ b/tests/unit/utils_test.py @@ -0,0 +1,660 @@ +import base64 +import json +import os +import os.path +import shutil +import tempfile +import unittest + +import pytest + +from docker.api.client import APIClient +from docker.constants import DEFAULT_DOCKER_API_VERSION, IS_WINDOWS_PLATFORM +from docker.errors import DockerException +from docker.utils import ( + compare_version, + convert_filters, + convert_volume_binds, + decode_json_header, + format_environment, + kwargs_from_env, + parse_bytes, + parse_devices, + parse_env_file, + parse_host, + parse_repository_tag, + split_command, + update_headers, + version_gte, + version_lt, +) +from docker.utils.ports import build_port_bindings, split_port + +TEST_CERT_DIR = os.path.join( + os.path.dirname(__file__), + 'testdata/certs', +) + + +class DecoratorsTest(unittest.TestCase): + def test_update_headers(self): + sample_headers = { + 'X-Docker-Locale': 'en-US', + } + + def f(self, headers=None): + return headers + + client = APIClient(version=DEFAULT_DOCKER_API_VERSION) + client._general_configs = {} + + g = update_headers(f) + assert g(client, headers=None) is None + assert g(client, headers={}) == {} + assert g(client, headers={'Content-type': 'application/json'}) == { + 'Content-type': 'application/json', + } + + client._general_configs = { + 'HttpHeaders': sample_headers + } + + assert g(client, headers=None) == sample_headers + assert g(client, headers={}) == sample_headers + assert g(client, headers={'Content-type': 'application/json'}) == { + 'Content-type': 'application/json', + 'X-Docker-Locale': 'en-US', + } + + +class KwargsFromEnvTest(unittest.TestCase): + def setUp(self): + self.os_environ = os.environ.copy() + + def tearDown(self): + os.environ.clear() + os.environ.update(self.os_environ) + + def test_kwargs_from_env_empty(self): + os.environ.update(DOCKER_HOST='', + DOCKER_CERT_PATH='') + os.environ.pop('DOCKER_TLS_VERIFY', None) + + kwargs = kwargs_from_env() + assert kwargs.get('base_url') is None + assert kwargs.get('tls') is None + + def test_kwargs_from_env_tls(self): + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='1') + kwargs = kwargs_from_env() + assert 'tcp://192.168.59.103:2376' == kwargs['base_url'] + assert 'ca.pem' in kwargs['tls'].ca_cert + assert 'cert.pem' in kwargs['tls'].cert[0] + assert 'key.pem' in kwargs['tls'].cert[1] + assert kwargs['tls'].verify is True + + parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True) + kwargs['version'] = DEFAULT_DOCKER_API_VERSION + try: + client = APIClient(**kwargs) + assert parsed_host == client.base_url + assert kwargs['tls'].ca_cert == client.verify + assert kwargs['tls'].cert == client.cert + except TypeError as e: + self.fail(e) + + def test_kwargs_from_env_tls_verify_false(self): + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='') + kwargs = kwargs_from_env() + assert 'tcp://192.168.59.103:2376' == kwargs['base_url'] + assert 'ca.pem' in kwargs['tls'].ca_cert + assert 'cert.pem' in kwargs['tls'].cert[0] + assert 'key.pem' in kwargs['tls'].cert[1] + assert kwargs['tls'].verify is False + parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True) + kwargs['version'] = DEFAULT_DOCKER_API_VERSION + try: + client = APIClient(**kwargs) + assert parsed_host == client.base_url + assert kwargs['tls'].cert == client.cert + assert not kwargs['tls'].verify + except TypeError as e: + self.fail(e) + + def test_kwargs_from_env_tls_verify_false_no_cert(self): + temp_dir = tempfile.mkdtemp() + cert_dir = os.path.join(temp_dir, '.docker') + shutil.copytree(TEST_CERT_DIR, cert_dir) + + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + HOME=temp_dir, + DOCKER_TLS_VERIFY='') + os.environ.pop('DOCKER_CERT_PATH', None) + kwargs = kwargs_from_env() + assert 'tcp://192.168.59.103:2376' == kwargs['base_url'] + + def test_kwargs_from_env_no_cert_path(self): + temp_dir = tempfile.mkdtemp() + try: + cert_dir = os.path.join(temp_dir, '.docker') + shutil.copytree(TEST_CERT_DIR, cert_dir) + + os.environ.update(HOME=temp_dir, + DOCKER_CERT_PATH='', + DOCKER_TLS_VERIFY='1') + + kwargs = kwargs_from_env() + assert kwargs['tls'].verify + assert cert_dir in kwargs['tls'].ca_cert + assert cert_dir in kwargs['tls'].cert[0] + assert cert_dir in kwargs['tls'].cert[1] + finally: + shutil.rmtree(temp_dir) + + def test_kwargs_from_env_alternate_env(self): + # Values in os.environ are entirely ignored if an alternate is + # provided + os.environ.update( + DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='' + ) + kwargs = kwargs_from_env(environment={ + 'DOCKER_HOST': 'http://docker.gensokyo.jp:2581', + }) + assert 'http://docker.gensokyo.jp:2581' == kwargs['base_url'] + assert 'tls' not in kwargs + + +class ConverVolumeBindsTest(unittest.TestCase): + def test_convert_volume_binds_empty(self): + assert convert_volume_binds({}) == [] + assert convert_volume_binds([]) == [] + + def test_convert_volume_binds_list(self): + data = ['/a:/a:ro', '/b:/c:z'] + assert convert_volume_binds(data) == data + + def test_convert_volume_binds_complete(self): + data = { + '/mnt/vol1': { + 'bind': '/data', + 'mode': 'ro' + } + } + assert convert_volume_binds(data) == ['/mnt/vol1:/data:ro'] + + def test_convert_volume_binds_compact(self): + data = { + '/mnt/vol1': '/data' + } + assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw'] + + def test_convert_volume_binds_no_mode(self): + data = { + '/mnt/vol1': { + 'bind': '/data' + } + } + assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw'] + + def test_convert_volume_binds_unicode_bytes_input(self): + expected = ['/mnt/지연:/unicode/박:rw'] + + data = { + '/mnt/지연'.encode(): { + 'bind': '/unicode/박'.encode(), + 'mode': 'rw' + } + } + assert convert_volume_binds(data) == expected + + def test_convert_volume_binds_unicode_unicode_input(self): + expected = ['/mnt/지연:/unicode/박:rw'] + + data = { + '/mnt/지연': { + 'bind': '/unicode/박', + 'mode': 'rw' + } + } + assert convert_volume_binds(data) == expected + + +class ParseEnvFileTest(unittest.TestCase): + def generate_tempfile(self, file_content=None): + """ + Generates a temporary file for tests with the content + of 'file_content' and returns the filename. + Don't forget to unlink the file with os.unlink() after. + """ + local_tempfile = tempfile.NamedTemporaryFile(delete=False) + local_tempfile.write(file_content.encode('UTF-8')) + local_tempfile.close() + return local_tempfile.name + + def test_parse_env_file_proper(self): + env_file = self.generate_tempfile( + file_content='USER=jdoe\nPASS=secret') + get_parse_env_file = parse_env_file(env_file) + assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'} + os.unlink(env_file) + + def test_parse_env_file_with_equals_character(self): + env_file = self.generate_tempfile( + file_content='USER=jdoe\nPASS=sec==ret') + get_parse_env_file = parse_env_file(env_file) + assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'sec==ret'} + os.unlink(env_file) + + def test_parse_env_file_commented_line(self): + env_file = self.generate_tempfile( + file_content='USER=jdoe\n#PASS=secret') + get_parse_env_file = parse_env_file(env_file) + assert get_parse_env_file == {'USER': 'jdoe'} + os.unlink(env_file) + + def test_parse_env_file_newline(self): + env_file = self.generate_tempfile( + file_content='\nUSER=jdoe\n\n\nPASS=secret') + get_parse_env_file = parse_env_file(env_file) + assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'} + os.unlink(env_file) + + def test_parse_env_file_invalid_line(self): + env_file = self.generate_tempfile( + file_content='USER jdoe') + with pytest.raises(DockerException): + parse_env_file(env_file) + os.unlink(env_file) + + +class ParseHostTest(unittest.TestCase): + def test_parse_host(self): + invalid_hosts = [ + '0.0.0.0', + 'tcp://', + 'udp://127.0.0.1', + 'udp://127.0.0.1:2375', + 'ssh://:22/path', + 'tcp://netloc:3333/path?q=1', + 'unix:///sock/path#fragment', + 'https://netloc:3333/path;params', + 'ssh://:clearpassword@host:22', + ] + + valid_hosts = { + '0.0.0.1:5555': 'http://0.0.0.1:5555', + ':6666': 'http://127.0.0.1:6666', + 'tcp://:7777': 'http://127.0.0.1:7777', + 'http://:7777': 'http://127.0.0.1:7777', + 'https://kokia.jp:2375': 'https://kokia.jp:2375', + 'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock', + 'unix://': 'http+unix:///var/run/docker.sock', + '12.234.45.127:2375/docker/engine': ( + 'http://12.234.45.127:2375/docker/engine' + ), + 'somehost.net:80/service/swarm': ( + 'http://somehost.net:80/service/swarm' + ), + 'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine', + '[fd12::82d1]:2375': 'http://[fd12::82d1]:2375', + 'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090', + '[fd12::82d1]:2375/docker/engine': ( + 'http://[fd12::82d1]:2375/docker/engine' + ), + 'ssh://[fd12::82d1]': 'ssh://[fd12::82d1]:22', + 'ssh://user@[fd12::82d1]:8765': 'ssh://user@[fd12::82d1]:8765', + 'ssh://': 'ssh://127.0.0.1:22', + 'ssh://user@localhost:22': 'ssh://user@localhost:22', + 'ssh://user@remote': 'ssh://user@remote:22', + } + + for host in invalid_hosts: + msg = f'Should have failed to parse invalid host: {host}' + with self.assertRaises(DockerException, msg=msg): + parse_host(host, None) + + for host, expected in valid_hosts.items(): + self.assertEqual( + parse_host(host, None), + expected, + msg=f'Failed to parse valid host: {host}', + ) + + def test_parse_host_empty_value(self): + unix_socket = 'http+unix:///var/run/docker.sock' + npipe = 'npipe:////./pipe/docker_engine' + + for val in [None, '']: + assert parse_host(val, is_win32=False) == unix_socket + assert parse_host(val, is_win32=True) == npipe + + def test_parse_host_tls(self): + host_value = 'myhost.docker.net:3348' + expected_result = 'https://myhost.docker.net:3348' + assert parse_host(host_value, tls=True) == expected_result + + def test_parse_host_tls_tcp_proto(self): + host_value = 'tcp://myhost.docker.net:3348' + expected_result = 'https://myhost.docker.net:3348' + assert parse_host(host_value, tls=True) == expected_result + + def test_parse_host_trailing_slash(self): + host_value = 'tcp://myhost.docker.net:2376/' + expected_result = 'http://myhost.docker.net:2376' + assert parse_host(host_value) == expected_result + + +class ParseRepositoryTagTest(unittest.TestCase): + sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + + def test_index_image_no_tag(self): + assert parse_repository_tag("root") == ("root", None) + + def test_index_image_tag(self): + assert parse_repository_tag("root:tag") == ("root", "tag") + + def test_index_user_image_no_tag(self): + assert parse_repository_tag("user/repo") == ("user/repo", None) + + def test_index_user_image_tag(self): + assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag") + + def test_private_reg_image_no_tag(self): + assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", None) + + def test_private_reg_image_tag(self): + assert parse_repository_tag("url:5000/repo:tag") == ( + "url:5000/repo", "tag" + ) + + def test_index_image_sha(self): + assert parse_repository_tag(f"root@sha256:{self.sha}") == ( + "root", f"sha256:{self.sha}" + ) + + def test_private_reg_image_sha(self): + assert parse_repository_tag( + f"url:5000/repo@sha256:{self.sha}" + ) == ("url:5000/repo", f"sha256:{self.sha}") + + +class ParseDeviceTest(unittest.TestCase): + def test_dict(self): + devices = parse_devices([{ + 'PathOnHost': '/dev/sda1', + 'PathInContainer': '/dev/mnt1', + 'CgroupPermissions': 'r' + }]) + assert devices[0] == { + 'PathOnHost': '/dev/sda1', + 'PathInContainer': '/dev/mnt1', + 'CgroupPermissions': 'r' + } + + def test_partial_string_definition(self): + devices = parse_devices(['/dev/sda1']) + assert devices[0] == { + 'PathOnHost': '/dev/sda1', + 'PathInContainer': '/dev/sda1', + 'CgroupPermissions': 'rwm' + } + + def test_permissionless_string_definition(self): + devices = parse_devices(['/dev/sda1:/dev/mnt1']) + assert devices[0] == { + 'PathOnHost': '/dev/sda1', + 'PathInContainer': '/dev/mnt1', + 'CgroupPermissions': 'rwm' + } + + def test_full_string_definition(self): + devices = parse_devices(['/dev/sda1:/dev/mnt1:r']) + assert devices[0] == { + 'PathOnHost': '/dev/sda1', + 'PathInContainer': '/dev/mnt1', + 'CgroupPermissions': 'r' + } + + def test_hybrid_list(self): + devices = parse_devices([ + '/dev/sda1:/dev/mnt1:rw', + { + 'PathOnHost': '/dev/sda2', + 'PathInContainer': '/dev/mnt2', + 'CgroupPermissions': 'r' + } + ]) + + assert devices[0] == { + 'PathOnHost': '/dev/sda1', + 'PathInContainer': '/dev/mnt1', + 'CgroupPermissions': 'rw' + } + assert devices[1] == { + 'PathOnHost': '/dev/sda2', + 'PathInContainer': '/dev/mnt2', + 'CgroupPermissions': 'r' + } + + +class ParseBytesTest(unittest.TestCase): + def test_parse_bytes_valid(self): + assert parse_bytes("512MB") == 536870912 + assert parse_bytes("512M") == 536870912 + assert parse_bytes("512m") == 536870912 + + def test_parse_bytes_invalid(self): + with pytest.raises(DockerException): + parse_bytes("512MK") + with pytest.raises(DockerException): + parse_bytes("512L") + with pytest.raises(DockerException): + parse_bytes("127.0.0.1K") + + def test_parse_bytes_float(self): + assert parse_bytes("1.5k") == 1536 + + +class UtilsTest(unittest.TestCase): + longMessage = True + + def test_convert_filters(self): + tests = [ + ({'dangling': True}, '{"dangling": ["true"]}'), + ({'dangling': "true"}, '{"dangling": ["true"]}'), + ({'exited': 0}, '{"exited": ["0"]}'), + ({'exited': [0, 1]}, '{"exited": ["0", "1"]}'), + ] + + for filters, expected in tests: + assert convert_filters(filters) == expected + + def test_decode_json_header(self): + obj = {'a': 'b', 'c': 1} + data = None + data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8')) + decoded_data = decode_json_header(data) + assert obj == decoded_data + + +class SplitCommandTest(unittest.TestCase): + def test_split_command_with_unicode(self): + assert split_command('echo μμ') == ['echo', 'μμ'] + + +class PortsTest(unittest.TestCase): + def test_split_port_with_host_ip(self): + internal_port, external_port = split_port("127.0.0.1:1000:2000") + assert internal_port == ["2000"] + assert external_port == [("127.0.0.1", "1000")] + + def test_split_port_with_protocol(self): + for protocol in ['tcp', 'udp', 'sctp']: + internal_port, external_port = split_port( + f"127.0.0.1:1000:2000/{protocol}" + ) + assert internal_port == [f"2000/{protocol}"] + assert external_port == [("127.0.0.1", "1000")] + + def test_split_port_with_host_ip_no_port(self): + internal_port, external_port = split_port("127.0.0.1::2000") + assert internal_port == ["2000"] + assert external_port == [("127.0.0.1", None)] + + def test_split_port_range_with_host_ip_no_port(self): + internal_port, external_port = split_port("127.0.0.1::2000-2001") + assert internal_port == ["2000", "2001"] + assert external_port == [("127.0.0.1", None), ("127.0.0.1", None)] + + def test_split_port_with_host_port(self): + internal_port, external_port = split_port("1000:2000") + assert internal_port == ["2000"] + assert external_port == ["1000"] + + def test_split_port_range_with_host_port(self): + internal_port, external_port = split_port("1000-1001:2000-2001") + assert internal_port == ["2000", "2001"] + assert external_port == ["1000", "1001"] + + def test_split_port_random_port_range_with_host_port(self): + internal_port, external_port = split_port("1000-1001:2000") + assert internal_port == ["2000"] + assert external_port == ["1000-1001"] + + def test_split_port_no_host_port(self): + internal_port, external_port = split_port("2000") + assert internal_port == ["2000"] + assert external_port is None + + def test_split_port_range_no_host_port(self): + internal_port, external_port = split_port("2000-2001") + assert internal_port == ["2000", "2001"] + assert external_port is None + + def test_split_port_range_with_protocol(self): + internal_port, external_port = split_port( + "127.0.0.1:1000-1001:2000-2001/udp") + assert internal_port == ["2000/udp", "2001/udp"] + assert external_port == [("127.0.0.1", "1000"), ("127.0.0.1", "1001")] + + def test_split_port_with_ipv6_address(self): + internal_port, external_port = split_port( + "2001:abcd:ef00::2:1000:2000") + assert internal_port == ["2000"] + assert external_port == [("2001:abcd:ef00::2", "1000")] + + def test_split_port_with_ipv6_square_brackets_address(self): + internal_port, external_port = split_port( + "[2001:abcd:ef00::2]:1000:2000") + assert internal_port == ["2000"] + assert external_port == [("2001:abcd:ef00::2", "1000")] + + def test_split_port_invalid(self): + with pytest.raises(ValueError): + split_port("0.0.0.0:1000:2000:tcp") + + def test_split_port_invalid_protocol(self): + with pytest.raises(ValueError): + split_port("0.0.0.0:1000:2000/ftp") + + def test_non_matching_length_port_ranges(self): + with pytest.raises(ValueError): + split_port("0.0.0.0:1000-1010:2000-2002/tcp") + + def test_port_and_range_invalid(self): + with pytest.raises(ValueError): + split_port("0.0.0.0:1000:2000-2002/tcp") + + def test_port_only_with_colon(self): + with pytest.raises(ValueError): + split_port(":80") + + def test_host_only_with_colon(self): + with pytest.raises(ValueError): + split_port("localhost:") + + def test_with_no_container_port(self): + with pytest.raises(ValueError): + split_port("localhost:80:") + + def test_split_port_empty_string(self): + with pytest.raises(ValueError): + split_port("") + + def test_split_port_non_string(self): + assert split_port(1243) == (['1243'], None) + + def test_build_port_bindings_with_one_port(self): + port_bindings = build_port_bindings(["127.0.0.1:1000:1000"]) + assert port_bindings["1000"] == [("127.0.0.1", "1000")] + + def test_build_port_bindings_with_matching_internal_ports(self): + port_bindings = build_port_bindings( + ["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"]) + assert port_bindings["1000"] == [ + ("127.0.0.1", "1000"), ("127.0.0.1", "2000") + ] + + def test_build_port_bindings_with_nonmatching_internal_ports(self): + port_bindings = build_port_bindings( + ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"]) + assert port_bindings["1000"] == [("127.0.0.1", "1000")] + assert port_bindings["2000"] == [("127.0.0.1", "2000")] + + def test_build_port_bindings_with_port_range(self): + port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"]) + assert port_bindings["1000"] == [("127.0.0.1", "1000")] + assert port_bindings["1001"] == [("127.0.0.1", "1001")] + + def test_build_port_bindings_with_matching_internal_port_ranges(self): + port_bindings = build_port_bindings( + ["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"]) + assert port_bindings["1000"] == [ + ("127.0.0.1", "1000"), ("127.0.0.1", "2000") + ] + assert port_bindings["1001"] == [ + ("127.0.0.1", "1001"), ("127.0.0.1", "2001") + ] + + def test_build_port_bindings_with_nonmatching_internal_port_ranges(self): + port_bindings = build_port_bindings( + ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"]) + assert port_bindings["1000"] == [("127.0.0.1", "1000")] + assert port_bindings["2000"] == [("127.0.0.1", "2000")] + + +class FormatEnvironmentTest(unittest.TestCase): + def test_format_env_binary_unicode_value(self): + env_dict = { + 'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80' + } + assert format_environment(env_dict) == ['ARTIST_NAME=송지은'] + + def test_format_env_no_value(self): + env_dict = { + 'FOO': None, + 'BAR': '', + } + assert sorted(format_environment(env_dict)) == ['BAR=', 'FOO'] + + +def test_compare_versions(): + assert compare_version('1.0', '1.1') == 1 + assert compare_version('1.10', '1.1') == -1 + assert compare_version('1.10', '1.10') == 0 + assert compare_version('1.10.0', '1.10.1') == 1 + assert compare_version('1.9', '1.10') == 1 + assert compare_version('1.9.1', '1.10') == 1 + # Test comparison helpers + assert version_lt('1.0', '1.27') + assert version_gte('1.27', '1.20') + # Test zero-padding + assert compare_version('1', '1.0') == 0 + assert compare_version('1.10', '1.10.1') == 1 + assert compare_version('1.10.0', '1.10') == 0 diff --git a/tests/utils_test.py b/tests/utils_test.py deleted file mode 100644 index 716cde5578..0000000000 --- a/tests/utils_test.py +++ /dev/null @@ -1,332 +0,0 @@ -import os -import os.path -import unittest - -from docker.client import Client -from docker.errors import DockerException -from docker.utils import ( - parse_repository_tag, parse_host, convert_filters, kwargs_from_env, - create_host_config, Ulimit, LogConfig, parse_bytes -) -from docker.utils.ports import build_port_bindings, split_port -from docker.auth import resolve_authconfig - -import base - - -class UtilsTest(base.BaseTestCase): - longMessage = True - - def setUp(self): - self.os_environ = os.environ.copy() - - def tearDown(self): - os.environ = self.os_environ - - def test_parse_repository_tag(self): - self.assertEqual(parse_repository_tag("root"), - ("root", None)) - self.assertEqual(parse_repository_tag("root:tag"), - ("root", "tag")) - self.assertEqual(parse_repository_tag("user/repo"), - ("user/repo", None)) - self.assertEqual(parse_repository_tag("user/repo:tag"), - ("user/repo", "tag")) - self.assertEqual(parse_repository_tag("url:5000/repo"), - ("url:5000/repo", None)) - self.assertEqual(parse_repository_tag("url:5000/repo:tag"), - ("url:5000/repo", "tag")) - - def test_parse_bytes(self): - self.assertEqual(parse_bytes("512MB"), (536870912)) - self.assertEqual(parse_bytes("512M"), (536870912)) - self.assertRaises(DockerException, parse_bytes, "512MK") - self.assertRaises(DockerException, parse_bytes, "512L") - - def test_parse_host(self): - invalid_hosts = [ - '0.0.0.0', - 'tcp://', - 'udp://127.0.0.1', - 'udp://127.0.0.1:2375', - ] - - valid_hosts = { - '0.0.0.1:5555': 'http://0.0.0.1:5555', - ':6666': 'http://127.0.0.1:6666', - 'tcp://:7777': 'http://127.0.0.1:7777', - 'http://:7777': 'http://127.0.0.1:7777', - 'https://kokia.jp:2375': 'https://kokia.jp:2375', - '': 'http+unix://var/run/docker.sock', - None: 'http+unix://var/run/docker.sock', - 'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock', - 'unix://': 'http+unix://var/run/docker.sock' - } - - for host in invalid_hosts: - try: - parsed = parse_host(host) - self.fail('Expected to fail but success: %s -> %s' % ( - host, parsed - )) - except DockerException: - pass - - for host, expected in valid_hosts.items(): - self.assertEqual(parse_host(host), expected, msg=host) - - def test_kwargs_from_env(self): - os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', - DOCKER_CERT_PATH=os.path.join( - os.path.dirname(__file__), - 'testdata/certs'), - DOCKER_TLS_VERIFY='1') - kwargs = kwargs_from_env(assert_hostname=False) - self.assertEqual('https://192.168.59.103:2376', kwargs['base_url']) - self.assertTrue('ca.pem' in kwargs['tls'].verify) - self.assertTrue('cert.pem' in kwargs['tls'].cert[0]) - self.assertTrue('key.pem' in kwargs['tls'].cert[1]) - self.assertEqual(False, kwargs['tls'].assert_hostname) - try: - client = Client(**kwargs) - self.assertEqual(kwargs['base_url'], client.base_url) - self.assertEqual(kwargs['tls'].verify, client.verify) - self.assertEqual(kwargs['tls'].cert, client.cert) - except TypeError as e: - self.fail(e) - - def test_convert_filters(self): - tests = [ - ({'dangling': True}, '{"dangling": ["true"]}'), - ({'dangling': "true"}, '{"dangling": ["true"]}'), - ({'exited': 0}, '{"exited": [0]}'), - ({'exited': [0, 1]}, '{"exited": [0, 1]}'), - ] - - for filters, expected in tests: - self.assertEqual(convert_filters(filters), expected) - - def test_create_empty_host_config(self): - empty_config = create_host_config() - self.assertEqual(empty_config, {}) - - def test_create_host_config_dict_ulimit(self): - ulimit_dct = {'name': 'nofile', 'soft': 8096} - config = create_host_config(ulimits=[ulimit_dct]) - self.assertIn('Ulimits', config) - self.assertEqual(len(config['Ulimits']), 1) - ulimit_obj = config['Ulimits'][0] - self.assertTrue(isinstance(ulimit_obj, Ulimit)) - self.assertEqual(ulimit_obj.name, ulimit_dct['name']) - self.assertEqual(ulimit_obj.soft, ulimit_dct['soft']) - self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft) - - def test_create_host_config_dict_ulimit_capitals(self): - ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4} - config = create_host_config(ulimits=[ulimit_dct]) - self.assertIn('Ulimits', config) - self.assertEqual(len(config['Ulimits']), 1) - ulimit_obj = config['Ulimits'][0] - self.assertTrue(isinstance(ulimit_obj, Ulimit)) - self.assertEqual(ulimit_obj.name, ulimit_dct['Name']) - self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft']) - self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard']) - self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft) - - def test_create_host_config_obj_ulimit(self): - ulimit_dct = Ulimit(name='nofile', soft=8096) - config = create_host_config(ulimits=[ulimit_dct]) - self.assertIn('Ulimits', config) - self.assertEqual(len(config['Ulimits']), 1) - ulimit_obj = config['Ulimits'][0] - self.assertTrue(isinstance(ulimit_obj, Ulimit)) - self.assertEqual(ulimit_obj, ulimit_dct) - - def test_ulimit_invalid_type(self): - self.assertRaises(ValueError, lambda: Ulimit(name=None)) - self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123')) - self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456')) - - def test_create_host_config_dict_logconfig(self): - dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}} - config = create_host_config(log_config=dct) - self.assertIn('LogConfig', config) - self.assertTrue(isinstance(config['LogConfig'], LogConfig)) - self.assertEqual(dct['type'], config['LogConfig'].type) - - def test_create_host_config_obj_logconfig(self): - obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'}) - config = create_host_config(log_config=obj) - self.assertIn('LogConfig', config) - self.assertTrue(isinstance(config['LogConfig'], LogConfig)) - self.assertEqual(obj, config['LogConfig']) - - def test_logconfig_invalid_type(self): - self.assertRaises(ValueError, lambda: LogConfig(type='xxx', config={})) - self.assertRaises(ValueError, lambda: LogConfig( - type=LogConfig.types.JSON, config='helloworld' - )) - - def test_resolve_authconfig(self): - auth_config = { - 'https://index.docker.io/v1/': {'auth': 'indexuser'}, - 'my.registry.net': {'auth': 'privateuser'}, - 'http://legacy.registry.url/v1/': {'auth': 'legacyauth'} - } - # hostname only - self.assertEqual( - resolve_authconfig(auth_config, 'my.registry.net'), - {'auth': 'privateuser'} - ) - # no protocol - self.assertEqual( - resolve_authconfig(auth_config, 'my.registry.net/v1/'), - {'auth': 'privateuser'} - ) - # no path - self.assertEqual( - resolve_authconfig(auth_config, 'http://my.registry.net'), - {'auth': 'privateuser'} - ) - # no path, trailing slash - self.assertEqual( - resolve_authconfig(auth_config, 'http://my.registry.net/'), - {'auth': 'privateuser'} - ) - # no path, wrong secure protocol - self.assertEqual( - resolve_authconfig(auth_config, 'https://my.registry.net'), - {'auth': 'privateuser'} - ) - # no path, wrong insecure protocol - self.assertEqual( - resolve_authconfig(auth_config, 'http://index.docker.io'), - {'auth': 'indexuser'} - ) - # with path, wrong protocol - self.assertEqual( - resolve_authconfig(auth_config, 'https://my.registry.net/v1/'), - {'auth': 'privateuser'} - ) - # default registry - self.assertEqual( - resolve_authconfig(auth_config), {'auth': 'indexuser'} - ) - # default registry (explicit None) - self.assertEqual( - resolve_authconfig(auth_config, None), {'auth': 'indexuser'} - ) - # fully explicit - self.assertEqual( - resolve_authconfig(auth_config, 'http://my.registry.net/v1/'), - {'auth': 'privateuser'} - ) - # legacy entry in config - self.assertEqual( - resolve_authconfig(auth_config, 'legacy.registry.url'), - {'auth': 'legacyauth'} - ) - # no matching entry - self.assertTrue( - resolve_authconfig(auth_config, 'does.not.exist') is None - ) - - def test_split_port_with_host_ip(self): - internal_port, external_port = split_port("127.0.0.1:1000:2000") - self.assertEqual(internal_port, ["2000"]) - self.assertEqual(external_port, [("127.0.0.1", "1000")]) - - def test_split_port_with_protocol(self): - internal_port, external_port = split_port("127.0.0.1:1000:2000/udp") - self.assertEqual(internal_port, ["2000/udp"]) - self.assertEqual(external_port, [("127.0.0.1", "1000")]) - - def test_split_port_with_host_ip_no_port(self): - internal_port, external_port = split_port("127.0.0.1::2000") - self.assertEqual(internal_port, ["2000"]) - self.assertEqual(external_port, [("127.0.0.1", None)]) - - def test_split_port_range_with_host_ip_no_port(self): - internal_port, external_port = split_port("127.0.0.1::2000-2001") - self.assertEqual(internal_port, ["2000", "2001"]) - self.assertEqual(external_port, - [("127.0.0.1", None), ("127.0.0.1", None)]) - - def test_split_port_with_host_port(self): - internal_port, external_port = split_port("1000:2000") - self.assertEqual(internal_port, ["2000"]) - self.assertEqual(external_port, ["1000"]) - - def test_split_port_range_with_host_port(self): - internal_port, external_port = split_port("1000-1001:2000-2001") - self.assertEqual(internal_port, ["2000", "2001"]) - self.assertEqual(external_port, ["1000", "1001"]) - - def test_split_port_no_host_port(self): - internal_port, external_port = split_port("2000") - self.assertEqual(internal_port, ["2000"]) - self.assertEqual(external_port, None) - - def test_split_port_range_no_host_port(self): - internal_port, external_port = split_port("2000-2001") - self.assertEqual(internal_port, ["2000", "2001"]) - self.assertEqual(external_port, None) - - def test_split_port_range_with_protocol(self): - internal_port, external_port = split_port( - "127.0.0.1:1000-1001:2000-2001/udp") - self.assertEqual(internal_port, ["2000/udp", "2001/udp"]) - self.assertEqual(external_port, - [("127.0.0.1", "1000"), ("127.0.0.1", "1001")]) - - def test_split_port_invalid(self): - self.assertRaises(ValueError, - lambda: split_port("0.0.0.0:1000:2000:tcp")) - - def test_non_matching_length_port_ranges(self): - self.assertRaises( - ValueError, - lambda: split_port("0.0.0.0:1000-1010:2000-2002/tcp") - ) - - def test_port_and_range_invalid(self): - self.assertRaises(ValueError, - lambda: split_port("0.0.0.0:1000:2000-2002/tcp")) - - def test_build_port_bindings_with_one_port(self): - port_bindings = build_port_bindings(["127.0.0.1:1000:1000"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")]) - - def test_build_port_bindings_with_matching_internal_ports(self): - port_bindings = build_port_bindings( - ["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"]) - self.assertEqual(port_bindings["1000"], - [("127.0.0.1", "1000"), ("127.0.0.1", "2000")]) - - def test_build_port_bindings_with_nonmatching_internal_ports(self): - port_bindings = build_port_bindings( - ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")]) - self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")]) - - def test_build_port_bindings_with_port_range(self): - port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")]) - self.assertEqual(port_bindings["1001"], [("127.0.0.1", "1001")]) - - def test_build_port_bindings_with_matching_internal_port_ranges(self): - port_bindings = build_port_bindings( - ["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"]) - self.assertEqual(port_bindings["1000"], - [("127.0.0.1", "1000"), ("127.0.0.1", "2000")]) - self.assertEqual(port_bindings["1001"], - [("127.0.0.1", "1001"), ("127.0.0.1", "2001")]) - - def test_build_port_bindings_with_nonmatching_internal_port_ranges(self): - port_bindings = build_port_bindings( - ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"]) - self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")]) - self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")]) - -if __name__ == '__main__': - unittest.main() diff --git a/tox.ini b/tox.ini index 10b9df935e..19689b9645 100644 --- a/tox.ini +++ b/tox.ini @@ -1,19 +1,13 @@ [tox] -envlist = py26, py27, py32, py33, py34, flake8 +envlist = py{37,38,39,310,311,312}, ruff skipsdist=True [testenv] usedevelop=True commands = - {envbindir}/coverage run -p tests/test.py - {envbindir}/coverage run -p tests/utils_test.py - {envbindir}/coverage combine - {envbindir}/coverage report - {envbindir}/coverage html -deps = - -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt + py.test -v --cov=docker {posargs:tests/unit} +extras = dev -[testenv:flake8] -commands = flake8 docker tests -deps = flake8 +[testenv:ruff] +commands = ruff docker tests setup.py +extras = dev