diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml index a126756f102..d685ef9456d 100644 --- a/.github/workflows/diff_shades.yml +++ b/.github/workflows/diff_shades.yml @@ -26,7 +26,7 @@ jobs: - name: Install diff-shades and support dependencies run: | - python -m pip install click packaging urllib3 + python -m pip install 'click==8.1.3' packaging urllib3 python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip - name: Calculate run configuration & metadata @@ -64,7 +64,7 @@ jobs: - name: Install diff-shades and support dependencies run: | python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip - python -m pip install click packaging urllib3 + python -m pip install 'click==8.1.3' packaging urllib3 # After checking out old revisions, this might not exist so we'll use a copy. cat scripts/diff_shades_gha_helper.py > helper.py git config user.name "diff-shades-gha" diff --git a/.github/workflows/diff_shades_comment.yml b/.github/workflows/diff_shades_comment.yml index bb81ca4f0d6..22c293f91d2 100644 --- a/.github/workflows/diff_shades_comment.yml +++ b/.github/workflows/diff_shades_comment.yml @@ -33,7 +33,7 @@ jobs: - name: Try to find pre-existing PR comment if: steps.metadata.outputs.needs-comment == 'true' id: find-comment - uses: peter-evans/find-comment@034abe94d3191f9c89d870519735beae326f2bdb + uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 with: issue-number: ${{ steps.metadata.outputs.pr-number }} comment-author: "github-actions[bot]" @@ -41,7 +41,7 @@ jobs: - name: Create or update PR comment if: steps.metadata.outputs.needs-comment == 'true' - uses: peter-evans/create-or-update-comment@67dcc547d311b736a8e6c5c236542148a47adc3d + uses: peter-evans/create-or-update-comment@c6c9a1a66007646a28c153e2a8580a5bad27bcfa with: comment-id: ${{ steps.find-comment.outputs.comment-id }} issue-number: ${{ steps.metadata.outputs.pr-number }} diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 373e1500ee9..4439148a1c7 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -22,7 +22,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index d5797c7d230..06600fcbc45 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -58,7 +58,7 @@ jobs: - uses: actions/checkout@v3 - name: Build wheels via cibuildwheel - uses: pypa/cibuildwheel@v2.12.1 + uses: pypa/cibuildwheel@v2.13.1 env: CIBW_ARCHS_MACOS: "${{ matrix.macos_arch }}" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3ca2a469147..4bf687435b4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -31,7 +31,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "pypy-3.7", "pypy-3.8"] + python-version: ["3.8", "3.9", "3.10", "3.11", "pypy-3.8"] os: [ubuntu-latest, macOS-latest, windows-latest] steps: @@ -58,7 +58,9 @@ jobs: - name: Upload coverage to Coveralls # Upload coverage if we are on the main repository and # we're running on Linux (this action only supports Linux) - if: github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' + if: + github.repository == 'psf/black' && matrix.os == 'ubuntu-latest' && + !startsWith(matrix.python-version, 'pypy') uses: AndreMiras/coveralls-python-action@v20201129 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -103,4 +105,4 @@ jobs: python -m pip install -e ".[uvloop]" - name: Format ourselves - run: python -m black --check src/ + run: python -m black --check . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a69fb645238..c2f4b1684e6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,7 +38,7 @@ repos: - flake8-simplify - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.991 + rev: v1.4.1 hooks: - id: mypy exclude: ^docs/conf.py @@ -47,7 +47,7 @@ repos: - types-PyYAML - tomli >= 0.2.6, < 2.0.0 - types-typed-ast >= 1.4.1 - - click >= 8.1.0 + - click >= 8.1.0, != 8.1.4 - packaging >= 22.0 - platformdirs >= 2.1.0 - pytest diff --git a/CHANGES.md b/CHANGES.md index 7c76bca4f6a..c61ee698c5d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -47,6 +47,78 @@ +## 23.7.0 + +### Highlights + +- Runtime support for Python 3.7 has been removed. Formatting 3.7 code will still be + supported until further notice (#3765) + +### Stable style + +- Fix a bug where an illegal trailing comma was added to return type annotations using + PEP 604 unions (#3735) +- Fix several bugs and crashes where comments in stub files were removed or mishandled + under some circumstances (#3745) +- Fix a crash with multi-line magic comments like `type: ignore` within parentheses + (#3740) +- Fix error in AST validation when _Black_ removes trailing whitespace in a type comment + (#3773) + +### Preview style + +- Implicitly concatenated strings used as function args are no longer wrapped inside + parentheses (#3640) +- Remove blank lines between a class definition and its docstring (#3692) + +### Configuration + +- The `--workers` argument to _Black_ can now be specified via the `BLACK_NUM_WORKERS` + environment variable (#3743) +- `.pytest_cache`, `.ruff_cache` and `.vscode` are now excluded by default (#3691) +- Fix _Black_ not honouring `pyproject.toml` settings when running `--stdin-filename` + and the `pyproject.toml` found isn't in the current working directory (#3719) +- _Black_ will now error if `exclude` and `extend-exclude` have invalid data types in + `pyproject.toml`, instead of silently doing the wrong thing (#3764) + +### Packaging + +- Upgrade mypyc from 0.991 to 1.3 (#3697) +- Remove patching of Click that mitigated errors on Python 3.6 with `LANG=C` (#3768) + +### Parser + +- Add support for the new PEP 695 syntax in Python 3.12 (#3703) + +### Performance + +- Speed up _Black_ significantly when the cache is full (#3751) +- Avoid importing `IPython` in a case where we wouldn't need it (#3748) + +### Output + +- Use aware UTC datetimes internally, avoids deprecation warning on Python 3.12 (#3728) +- Change verbose logging to exactly mirror _Black_'s logic for source discovery (#3749) + +### _Blackd_ + +- The `blackd` argument parser now shows the default values for options in their help + text (#3712) + +### Integrations + +- Black is now tested with + [`PYTHONWARNDEFAULTENCODING = 1`](https://docs.python.org/3/library/io.html#io-encoding-warning) + (#3763) +- Update GitHub Action to display black output in the job summary (#3688) + +### Documentation + +- Add a CITATION.cff file to the root of the repository, containing metadata on how to + cite this software (#3723) +- Update the _classes_ and _exceptions_ documentation in Developer reference to match + the latest code base (#3755) + ## 23.3.0 ### Highlights diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 00000000000..ddf64f616ff --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,22 @@ +cff-version: 1.2.0 +title: "Black: The uncompromising Python code formatter" +message: >- + If you use this software, please cite it using the metadata from this file. +type: software +authors: + - family-names: Langa + given-names: Łukasz + - name: "contributors to Black" +repository-code: "https://github.com/psf/black" +url: "https://black.readthedocs.io/en/stable/" +abstract: >- + Black is the uncompromising Python code formatter. By using it, you agree to cede + control over minutiae ofhand-formatting. In return, Black gives you speed, + determinism, and freedom from pycodestyle nagging about formatting. You will save time + and mental energy for more important matters. + + Blackened code looks the same regardless of the project you're reading. Formatting + becomes transparent after a while and you can focus on the content instead. + + Black makes code review faster by producing the smallest diffs possible. +license: MIT diff --git a/action.yml b/action.yml index 35705e99414..282fca43dea 100644 --- a/action.yml +++ b/action.yml @@ -33,11 +33,12 @@ branding: runs: using: composite steps: - - run: | + - name: black + run: | if [ "$RUNNER_OS" == "Windows" ]; then - python $GITHUB_ACTION_PATH/action/main.py + python $GITHUB_ACTION_PATH/action/main.py | tee -a $GITHUB_STEP_SUMMARY else - python3 $GITHUB_ACTION_PATH/action/main.py + python3 $GITHUB_ACTION_PATH/action/main.py | tee -a $GITHUB_STEP_SUMMARY fi env: # TODO: Remove once https://github.com/actions/runner/issues/665 is fixed. diff --git a/action/main.py b/action/main.py index 23c3a652194..1911cfd7a01 100644 --- a/action/main.py +++ b/action/main.py @@ -32,7 +32,7 @@ describe_name = line[len("describe-name: ") :].rstrip() break if not describe_name: - print("::error::Failed to detect action version.", flush=True) + print("::error::Failed to detect action version.", file=sys.stderr, flush=True) sys.exit(1) # expected format is one of: # - 23.1.0 @@ -53,15 +53,25 @@ ) if pip_proc.returncode: print(pip_proc.stdout) - print("::error::Failed to install Black.", flush=True) + print("::error::Failed to install Black.", file=sys.stderr, flush=True) sys.exit(pip_proc.returncode) base_cmd = [str(ENV_BIN / "black")] if BLACK_ARGS: # TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS. - proc = run([*base_cmd, *shlex.split(BLACK_ARGS)]) + proc = run( + [*base_cmd, *shlex.split(BLACK_ARGS)], + stdout=PIPE, + stderr=STDOUT, + encoding="utf-8", + ) else: - proc = run([*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)]) - + proc = run( + [*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)], + stdout=PIPE, + stderr=STDOUT, + encoding="utf-8", + ) +print(proc.stdout) sys.exit(proc.returncode) diff --git a/docs/contributing/index.md b/docs/contributing/index.md index f56e57c9e90..3314c8eaa39 100644 --- a/docs/contributing/index.md +++ b/docs/contributing/index.md @@ -24,7 +24,8 @@ not very). This is deliberate. _Black_ aims to provide a consistent style and ta opportunities for arguing about style. Bug reports and fixes are always welcome! Please follow the -[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. +[issue templates on GitHub](https://github.com/psf/black/issues/new/choose) for best +results. Before you suggest a new feature or configuration knob, ask yourself why you want it. If it enables better integration with some workflow, fixes an inconsistency, speeds things diff --git a/docs/contributing/reference/reference_classes.rst b/docs/contributing/reference/reference_classes.rst index 3931e0e0072..29b25003af2 100644 --- a/docs/contributing/reference/reference_classes.rst +++ b/docs/contributing/reference/reference_classes.rst @@ -3,6 +3,9 @@ *Contents are subject to change.* +Black Classes +~~~~~~~~~~~~~~ + .. currentmodule:: black :class:`BracketTracker` @@ -18,6 +21,12 @@ :members: :special-members: __str__, __bool__ +:class:`RHSResult` +------------------------- + +.. autoclass:: black.lines.RHSResult + :members: + :class:`LinesBlock` ------------------------- @@ -43,6 +52,12 @@ .. autoclass:: black.comments.ProtoComment :members: +:class:`Mode` +--------------------- + +.. autoclass:: black.mode.Mode + :members: + :class:`Report` --------------- @@ -50,6 +65,20 @@ :members: :special-members: __str__ +:class:`Ok` +--------------- + +.. autoclass:: black.rusty.Ok + :show-inheritance: + :members: + +:class:`Err` +--------------- + +.. autoclass:: black.rusty.Err + :show-inheritance: + :members: + :class:`Visitor` ---------------- @@ -57,20 +86,115 @@ :show-inheritance: :members: -Enums -===== +:class:`StringTransformer` +---------------------------- -:class:`Changed` ----------------- +.. autoclass:: black.trans.StringTransformer + :show-inheritance: + :members: -.. autoclass:: black.Changed +:class:`CustomSplit` +---------------------------- + +.. autoclass:: black.trans.CustomSplit + :members: + +:class:`CustomSplitMapMixin` +----------------------------- + +.. autoclass:: black.trans.CustomSplitMapMixin :show-inheritance: :members: -:class:`Mode` ------------------ +:class:`StringMerger` +---------------------- -.. autoclass:: black.Mode +.. autoclass:: black.trans.StringMerger + :show-inheritance: + :members: + +:class:`StringParenStripper` +----------------------------- + +.. autoclass:: black.trans.StringParenStripper + :show-inheritance: + :members: + +:class:`BaseStringSplitter` +----------------------------- + +.. autoclass:: black.trans.BaseStringSplitter + :show-inheritance: + :members: + +:class:`StringSplitter` +----------------------------- + +.. autoclass:: black.trans.StringSplitter + :show-inheritance: + :members: + +:class:`StringParenWrapper` +----------------------------- + +.. autoclass:: black.trans.StringParenWrapper + :show-inheritance: + :members: + +:class:`StringParser` +----------------------------- + +.. autoclass:: black.trans.StringParser + :members: + +:class:`DebugVisitor` +------------------------ + +.. autoclass:: black.debug.DebugVisitor + :show-inheritance: + :members: + +:class:`Replacement` +------------------------ + +.. autoclass:: black.handle_ipynb_magics.Replacement + :members: + +:class:`CellMagic` +------------------------ + +.. autoclass:: black.handle_ipynb_magics.CellMagic + :members: + +:class:`CellMagicFinder` +------------------------ + +.. autoclass:: black.handle_ipynb_magics.CellMagicFinder + :show-inheritance: + :members: + +:class:`OffsetAndMagic` +------------------------ + +.. autoclass:: black.handle_ipynb_magics.OffsetAndMagic + :members: + +:class:`MagicFinder` +------------------------ + +.. autoclass:: black.handle_ipynb_magics.MagicFinder + :show-inheritance: + :members: + +Enum Classes +~~~~~~~~~~~~~ + +Classes inherited from Python `Enum `_ class. + +:class:`Changed` +---------------- + +.. autoclass:: black.report.Changed :show-inheritance: :members: @@ -80,3 +204,24 @@ Enums .. autoclass:: black.WriteBack :show-inheritance: :members: + +:class:`TargetVersion` +---------------------- + +.. autoclass:: black.mode.TargetVersion + :show-inheritance: + :members: + +:class:`Feature` +------------------ + +.. autoclass:: black.mode.Feature + :show-inheritance: + :members: + +:class:`Preview` +------------------ + +.. autoclass:: black.mode.Preview + :show-inheritance: + :members: diff --git a/docs/contributing/reference/reference_exceptions.rst b/docs/contributing/reference/reference_exceptions.rst index aafe61e5017..ab46ebdb628 100644 --- a/docs/contributing/reference/reference_exceptions.rst +++ b/docs/contributing/reference/reference_exceptions.rst @@ -5,8 +5,14 @@ .. currentmodule:: black +.. autoexception:: black.trans.CannotTransform + .. autoexception:: black.linegen.CannotSplit -.. autoexception:: black.NothingChanged +.. autoexception:: black.brackets.BracketMatchError + +.. autoexception:: black.report.NothingChanged + +.. autoexception:: black.parsing.InvalidInput -.. autoexception:: black.InvalidInput +.. autoexception:: black.mode.Deprecated diff --git a/docs/contributing/reference/reference_functions.rst b/docs/contributing/reference/reference_functions.rst index 3bda5de1774..09517f73961 100644 --- a/docs/contributing/reference/reference_functions.rst +++ b/docs/contributing/reference/reference_functions.rst @@ -165,8 +165,6 @@ Utilities .. autofunction:: black.linegen.normalize_invisible_parens -.. autofunction:: black.patch_click - .. autofunction:: black.nodes.preceding_leaf .. autofunction:: black.re_compile_maybe_verbose diff --git a/docs/contributing/reference/reference_summary.rst b/docs/contributing/reference/reference_summary.rst index f6ff4681557..c6163d897b6 100644 --- a/docs/contributing/reference/reference_summary.rst +++ b/docs/contributing/reference/reference_summary.rst @@ -3,8 +3,11 @@ Developer reference .. note:: - The documentation here is quite outdated and has been neglected. Many objects worthy - of inclusion aren't documented. Contributions are appreciated! + As of June 2023, the documentation of *Black classes* and *Black exceptions* + has been updated to the latest available version. + + The documentation of *Black functions* is quite outdated and has been neglected. Many + functions worthy of inclusion aren't documented. Contributions are appreciated! *Contents are subject to change.* diff --git a/docs/faq.md b/docs/faq.md index a6a422c2fec..8941ca3fe4d 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -57,8 +57,8 @@ _Black_ is timid about formatting Jupyter Notebooks. Cells containing any of the following will not be formatted: - automagics (e.g. `pip install black`) -- non-Python cell magics (e.g. `%%writeline`). These can be added with the flag - `--python-cell-magics`, e.g. `black --python-cell-magics writeline hello.ipynb`. +- non-Python cell magics (e.g. `%%writefile`). These can be added with the flag + `--python-cell-magics`, e.g. `black --python-cell-magics writefile hello.ipynb`. - multiline magics, e.g.: ```python diff --git a/docs/guides/introducing_black_to_your_project.md b/docs/guides/introducing_black_to_your_project.md index 9ae40a1928e..71a566fbda1 100644 --- a/docs/guides/introducing_black_to_your_project.md +++ b/docs/guides/introducing_black_to_your_project.md @@ -46,7 +46,6 @@ $ git config blame.ignoreRevsFile .git-blame-ignore-revs **The one caveat is that some online Git-repositories like GitLab do not yet support ignoring revisions using their native blame UI.** So blame information will be cluttered with a reformatting commit on those platforms. (If you'd like this feature, there's an -open issue for [GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423)). This is -however supported by -[GitHub](https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view), -currently in beta. +open issue for [GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423)). +[GitHub supports `.git-blame-ignore-revs`](https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view) +by default in blame views however. diff --git a/docs/integrations/editors.md b/docs/integrations/editors.md index 74c6a283ab8..ff563068e79 100644 --- a/docs/integrations/editors.md +++ b/docs/integrations/editors.md @@ -334,60 +334,6 @@ To run _Black_ on a key press (e.g. F9 below), add this: nnoremap :Black ``` -#### Troubleshooting - -**How to get Vim with Python 3.6?** On Ubuntu 17.10 Vim comes with Python 3.6 by -default. On macOS with Homebrew run: `brew install vim`. When building Vim from source, -use: `./configure --enable-python3interp=yes`. There's many guides online how to do -this. - -**I get an import error when using _Black_ from a virtual environment**: If you get an -error message like this: - -```text -Traceback (most recent call last): - File "", line 63, in - File "/home/gui/.vim/black/lib/python3.7/site-packages/black.py", line 45, in - from typed_ast import ast3, ast27 - File "/home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/ast3.py", line 40, in - from typed_ast import _ast3 -ImportError: /home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/_ast3.cpython-37m-x86_64-linux-gnu.so: undefined symbool: PyExc_KeyboardInterrupt -``` - -Then you need to install `typed_ast` directly from the source code. The error happens -because `pip` will download [Python wheels](https://pythonwheels.com/) if they are -available. Python wheels are a new standard of distributing Python packages and packages -that have Cython and extensions written in C are already compiled, so the installation -is much more faster. The problem here is that somehow the Python environment inside Vim -does not match with those already compiled C extensions and these kind of errors are the -result. Luckily there is an easy fix: installing the packages from the source code. - -The package that causes problems is: - -- [typed-ast](https://pypi.org/project/typed-ast/) - -Now remove those two packages: - -```console -$ pip uninstall typed-ast -y -``` - -And now you can install them with: - -```console -$ pip install --no-binary :all: typed-ast -``` - -The C extensions will be compiled and now Vim's Python environment will match. Note that -you need to have the GCC compiler and the Python development files installed (on -Ubuntu/Debian do `sudo apt-get install build-essential python3-dev`). - -If you later want to update _Black_, you should do it like this: - -```console -$ pip install -U black --no-binary typed-ast -``` - ### With ALE 1. Install [`ale`](https://github.com/dense-analysis/ale) diff --git a/docs/integrations/source_version_control.md b/docs/integrations/source_version_control.md index de521833609..a9d33d2d853 100644 --- a/docs/integrations/source_version_control.md +++ b/docs/integrations/source_version_control.md @@ -7,14 +7,14 @@ Use [pre-commit](https://pre-commit.com/). Once you ```yaml repos: - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 23.7.0 hooks: - id: black # It is recommended to specify the latest version of Python # supported by your project here, or alternatively use # pre-commit's default_language_version, see # https://pre-commit.com/#top_level-default_language_version - language_version: python3.9 + language_version: python3.11 ``` Feel free to switch out the `rev` value to something else, like another @@ -22,11 +22,27 @@ Feel free to switch out the `rev` value to something else, like another branches or other mutable refs since the hook [won't auto update as you may expect][pre-commit-mutable-rev]. -If you want support for Jupyter Notebooks as well, then replace `id: black` with -`id: black-jupyter`. +## Jupyter Notebooks + +There is an alternate hook `black-jupyter` that expands the targets of `black` to +include Jupyter Notebooks. To use this hook, simply replace the hook's `id: black` with +`id: black-jupyter` in the `.pre-commit-config.yaml`: + +```yaml +repos: + - repo: https://github.com/psf/black + rev: 23.7.0 + hooks: + - id: black-jupyter + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.11 +``` ```{note} -The `black-jupyter` hook is only available from version 21.8b0 and onwards. +The `black-jupyter` hook became available in version 21.8b0. ``` [black-tags]: https://github.com/psf/black/tags diff --git a/docs/requirements.txt b/docs/requirements.txt index 9d059341b14..f1b47c69413 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,9 +1,9 @@ # Used by ReadTheDocs; pinned requirements for stability. -myst-parser==1.0.0 +myst-parser==2.0.0 Sphinx==6.1.3 # Older versions break Sphinx even though they're declared to be supported. docutils==0.19 sphinxcontrib-programoutput==0.17 -sphinx_copybutton==0.5.1 -furo==2023.3.23 +sphinx_copybutton==0.5.2 +furo==2023.5.20 diff --git a/docs/the_black_code_style/current_style.md b/docs/the_black_code_style/current_style.md index 83f8785cc55..0fb59fe5aae 100644 --- a/docs/the_black_code_style/current_style.md +++ b/docs/the_black_code_style/current_style.md @@ -140,6 +140,8 @@ If you're reaching for backslashes, that's a clear signal that you can do better slightly refactor your code. I hope some of the examples above show you that there are many ways in which you can do it. +(labels/line-length)= + ### Line length You probably noticed the peculiar default line length. _Black_ defaults to 88 characters @@ -158,33 +160,35 @@ harder to work with line lengths exceeding 100 characters. It also adversely aff side-by-side diff review on typical screen resolutions. Long lines also make it harder to present code neatly in documentation or talk slides. -If you're using Flake8, you can bump `max-line-length` to 88 and mostly forget about it. -However, it's better if you use [Bugbear](https://github.com/PyCQA/flake8-bugbear)'s -B950 warning instead of E501, and bump the max line length to 88 (or the `--line-length` -you used for black), which will align more with black's _"try to respect -`--line-length`, but don't become crazy if you can't"_. You'd do it like this: - -```ini -[flake8] -max-line-length = 88 -... -select = C,E,F,W,B,B950 -extend-ignore = E203, E501 -``` +#### Flake8 -Explanation of why E203 is disabled can be found further in this documentation. And if -you're curious about the reasoning behind B950, -[Bugbear's documentation](https://github.com/PyCQA/flake8-bugbear#opinionated-warnings) -explains it. The tl;dr is "it's like highway speed limits, we won't bother you if you -overdo it by a few km/h". +If you use Flake8, you have a few options: -**If you're looking for a minimal, black-compatible flake8 configuration:** +1. Recommended is using [Bugbear](https://github.com/PyCQA/flake8-bugbear) and enabling + its B950 check instead of using Flake8's E501, because it aligns with Black's 10% + rule. Install Bugbear and use the following config: -```ini -[flake8] -max-line-length = 88 -extend-ignore = E203 -``` + ```ini + [flake8] + max-line-length = 80 + ... + select = C,E,F,W,B,B950 + extend-ignore = E203, E501 + ``` + + The rationale for E950 is explained in + [Bugbear's documentation](https://github.com/PyCQA/flake8-bugbear#opinionated-warnings). + +2. For a minimally compatible config: + + ```ini + [flake8] + max-line-length = 88 + extend-ignore = E203 + ``` + +An explanation of why E203 is disabled can be found in the [Slices section](#slices) of +this page. ### Empty lines @@ -273,6 +277,8 @@ A pre-existing trailing comma informs _Black_ to always explode contents of the bracket pair into one item per line. Read more about this in the [Pragmatism](#pragmatism) section below. +(labels/strings)= + ### Strings _Black_ prefers double quotes (`"` and `"""`) over single quotes (`'` and `'''`). It @@ -457,6 +463,8 @@ there were not many users anyway. Not many edge cases were reported. As a mature _Black_ does make some exceptions to rules it otherwise holds. This section documents what those exceptions are and why this is the case. +(labels/magic-trailing-comma)= + ### The magic trailing comma _Black_ in general does not take existing formatting into account. @@ -493,6 +501,8 @@ default by (among others) GitHub and Visual Studio Code, differentiates between r-strings and R-strings. The former are syntax highlighted as regular expressions while the latter are treated as true raw strings with no special semantics. +(labels/ast-changes)= + ### AST before and after formatting When run with `--safe` (the default), _Black_ checks that the code before and after is diff --git a/docs/the_black_code_style/future_style.md b/docs/the_black_code_style/future_style.md index f5fc3644f18..861bb64bff4 100644 --- a/docs/the_black_code_style/future_style.md +++ b/docs/the_black_code_style/future_style.md @@ -47,6 +47,8 @@ with contextlib.ExitStack() as exit_stack: ... ``` +(labels/preview-style)= + ## Preview style Experimental, potentially disruptive style changes are gathered under the `--preview` @@ -93,7 +95,6 @@ parentheses are now removed. For example: ```python my_dict = { - my_dict = { "a key in my dict": a_very_long_variable * and_a_very_long_function_call() / 100000.0, diff --git a/docs/usage_and_configuration/the_basics.md b/docs/usage_and_configuration/the_basics.md index b101e179d0e..f5862edccaa 100644 --- a/docs/usage_and_configuration/the_basics.md +++ b/docs/usage_and_configuration/the_basics.md @@ -26,62 +26,106 @@ python -m black {source_file_or_directory} ### Command line options -The CLI options of _Black_ can be displayed by expanding the view below or by running -`black --help`. While _Black_ has quite a few knobs these days, it is still opinionated -so style options are deliberately limited and rarely added. +The CLI options of _Black_ can be displayed by running `black --help`. All options are +also covered in more detail below. -
+While _Black_ has quite a few knobs these days, it is still opinionated so style options +are deliberately limited and rarely added. -CLI reference +Note that all command-line options listed above can also be configured using a +`pyproject.toml` file (more on that below). -```{program-output} black --help +#### `-c`, `--code` +Format the code passed in as a string. + +```console +$ black --code "print ( 'hello, world' )" +print("hello, world") ``` -
+#### `-l`, `--line-length` -Note that all command-line options listed above can also be configured using a -`pyproject.toml` file (more on that below). +How many characters per line to allow. The default is 88. -### Code input alternatives +See also [the style documentation](labels/line-length). -#### Standard Input +#### `-t`, `--target-version` -_Black_ supports formatting code via stdin, with the result being printed to stdout. -Just let _Black_ know with `-` as the path. +Python versions that should be supported by Black's output. You should include all +versions that your code supports. If you support Python 3.7 through 3.10, you should +write: ```console -$ echo "print ( 'hello, world' )" | black - -print("hello, world") -reformatted - -All done! ✨ 🍰 ✨ -1 file reformatted. +$ black -t py37 -t py38 -t py39 -t py310 ``` -**Tip:** if you need _Black_ to treat stdin input as a file passed directly via the CLI, -use `--stdin-filename`. Useful to make sure _Black_ will respect the `--force-exclude` -option on some editors that rely on using stdin. +In a [configuration file](#configuration-via-a-file), you can write: -#### As a string +```toml +target-versions = ["py37", "py38", "py39", "py310"] +``` -You can also pass code as a string using the `-c` / `--code` option. +_Black_ uses this option to decide what grammar to use to parse your code. In addition, +it may use it to decide what style to use. For example, support for a trailing comma +after `*args` in a function call was added in Python 3.5, so _Black_ will add this comma +only if the target versions are all Python 3.5 or higher: ```console -$ black --code "print ( 'hello, world' )" -print("hello, world") +$ black --line-length=10 --target-version=py35 -c 'f(a, *args)' +f( + a, + *args, +) +$ black --line-length=10 --target-version=py34 -c 'f(a, *args)' +f( + a, + *args +) +$ black --line-length=10 --target-version=py34 --target-version=py35 -c 'f(a, *args)' +f( + a, + *args +) ``` -### Writeback and reporting +#### `--pyi` -By default _Black_ reformats the files given and/or found in place. Sometimes you need -_Black_ to just tell you what it _would_ do without actually rewriting the Python files. +Format all input files like typing stubs regardless of file extension. This is useful +when piping source on standard input. -There's two variations to this mode that are independently enabled by their respective -flags. Both variations can be enabled at once. +#### `--ipynb` + +Format all input files like Jupyter Notebooks regardless of file extension. This is +useful when piping source on standard input. + +#### `--python-cell-magics` + +When processing Jupyter Notebooks, add the given magic to the list of known python- +magics. Useful for formatting cells with custom python magics. + +#### `-S, --skip-string-normalization` + +By default, _Black_ uses double quotes for all strings and normalizes string prefixes, +as described in [the style documentation](labels/strings). If this option is given, +strings are left unchanged instead. + +#### `-C, --skip-magic-trailing-comma` + +By default, _Black_ uses existing trailing commas as an indication that short lines +should be left separate, as described in +[the style documentation](labels/magic-trailing-comma). If this option is given, the +magic trailing comma is ignored. + +#### `--preview` + +Enable potentially disruptive style changes that may be added to Black's main +functionality in the next major release. Read more about +[our preview style](labels/preview-style). (labels/exit-code)= -#### Exit code +#### `--check` Passing `--check` will make _Black_ exit with: @@ -111,17 +155,17 @@ $ echo $? 123 ``` -#### Diffs +#### `--diff` Passing `--diff` will make _Black_ print out diffs that indicate what changes _Black_ would've made. They are printed to stdout so capturing them is simple. -If you'd like colored diffs, you can enable them with the `--color`. +If you'd like colored diffs, you can enable them with `--color`. ```console $ black test.py --diff ---- test.py 2021-03-08 22:23:40.848954 +0000 -+++ test.py 2021-03-08 22:23:47.126319 +0000 +--- test.py 2021-03-08 22:23:40.848954+00:00 ++++ test.py 2021-03-08 22:23:47.126319+00:00 @@ -1 +1 @@ -print ( 'hello, world' ) +print("hello, world") @@ -130,22 +174,93 @@ All done! ✨ 🍰 ✨ 1 file would be reformatted. ``` -### Output verbosity +#### `--color` / `--no-color` -_Black_ in general tries to produce the right amount of output, balancing between -usefulness and conciseness. By default, _Black_ emits files modified and error messages, -plus a short summary. +Show (or do not show) colored diff. Only applies when `--diff` is given. + +#### `--fast` / `--safe` + +By default, _Black_ performs [an AST safety check](labels/ast-changes) after formatting +your code. The `--fast` flag turns off this check and the `--safe` flag explicitly +enables it. + +#### `--required-version` + +Require a specific version of _Black_ to be running. This is useful for ensuring that +all contributors to your project are using the same version, because different versions +of _Black_ may format code a little differently. This option can be set in a +configuration file for consistent results across environments. ```console -$ black src/ +$ black --version +black, 23.7.0 (compiled: yes) +$ black --required-version 23.7.0 -c "format = 'this'" +format = "this" +$ black --required-version 31.5b2 -c "still = 'beta?!'" +Oh no! 💥 💔 💥 The required version does not match the running version! +``` + +You can also pass just the major version: + +```console +$ black --required-version 22 -c "format = 'this'" +format = "this" +$ black --required-version 31 -c "still = 'beta?!'" +Oh no! 💥 💔 💥 The required version does not match the running version! +``` + +Because of our [stability policy](../the_black_code_style/index.md), this will guarantee +stable formatting, but still allow you to take advantage of improvements that do not +affect formatting. + +#### `--include` + +A regular expression that matches files and directories that should be included on +recursive searches. An empty value means all files are included regardless of the name. +Use forward slashes for directories on all platforms (Windows, too). Exclusions are +calculated first, inclusions later. + +#### `--exclude` + +A regular expression that matches files and directories that should be excluded on +recursive searches. An empty value means no paths are excluded. Use forward slashes for +directories on all platforms (Windows, too). Exclusions are calculated first, inclusions +later. + +#### `--extend-exclude` + +Like `--exclude`, but adds additional files and directories on top of the excluded ones. +Useful if you simply want to add to the default. + +#### `--force-exclude` + +Like `--exclude`, but files and directories matching this regex will be excluded even +when they are passed explicitly as arguments. This is useful when invoking _Black_ +programmatically on changed files, such as in a pre-commit hook or editor plugin. + +#### `--stdin-filename` + +The name of the file when passing it through stdin. Useful to make sure Black will +respect the `--force-exclude` option on some editors that rely on using stdin. + +#### `-W`, `--workers` + +When _Black_ formats multiple files, it may use a process pool to speed up formatting. +This option controls the number of parallel workers. This can also be specified via the +`BLACK_NUM_WORKERS` environment variable. + +#### `-q`, `--quiet` + +Passing `-q` / `--quiet` will cause _Black_ to stop emitting all non-critical output. +Error messages will still be emitted (which can silenced by `2>/dev/null`). + +```console +$ black src/ -q error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio -reformatted src/black_primer/lib.py -reformatted src/blackd/__init__.py -reformatted src/black/__init__.py -Oh no! 💥 💔 💥 -3 files reformatted, 2 files left unchanged, 1 file failed to reformat. ``` +#### `-v`, `--verbose` + Passing `-v` / `--verbose` will cause _Black_ to also emit messages about files that were not changed or were ignored due to exclusion patterns. If _Black_ is using a configuration file, a blue message detailing which one it is using will be emitted. @@ -164,35 +279,86 @@ Oh no! 💥 💔 💥 3 files reformatted, 2 files left unchanged, 1 file failed to reformat ``` -Passing `-q` / `--quiet` will cause _Black_ to stop emitting all non-critial output. -Error messages will still be emitted (which can silenced by `2>/dev/null`). +#### `--version` + +You can check the version of _Black_ you have installed using the `--version` flag. ```console -$ black src/ -q -error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +$ black --version +black, 23.7.0 ``` -### Versions +#### `--config` -You can check the version of _Black_ you have installed using the `--version` flag. +Read configuration options from a configuration file. See +[below](#configuration-via-a-file) for more details on the configuration file. + +#### `-h`, `--help` + +Show available command-line options and exit. + +### Environment variable options + +_Black_ supports the following configuration via environment variables. + +#### `BLACK_CACHE_DIR` + +The directory where _Black_ should store its cache. + +#### `BLACK_NUM_WORKERS` + +The number of parallel workers _Black_ should use. The command line option `-W` / +`--workers` takes precedence over this environment variable. + +### Code input alternatives + +_Black_ supports formatting code via stdin, with the result being printed to stdout. +Just let _Black_ know with `-` as the path. ```console -$ black --version -black, version 23.3.0 +$ echo "print ( 'hello, world' )" | black - +print("hello, world") +reformatted - +All done! ✨ 🍰 ✨ +1 file reformatted. ``` -An option to require a specific version to be running is also provided. +**Tip:** if you need _Black_ to treat stdin input as a file passed directly via the CLI, +use `--stdin-filename`. Useful to make sure _Black_ will respect the `--force-exclude` +option on some editors that rely on using stdin. + +You can also pass code as a string using the `-c` / `--code` option. + +### Writeback and reporting + +By default _Black_ reformats the files given and/or found in place. Sometimes you need +_Black_ to just tell you what it _would_ do without actually rewriting the Python files. + +There's two variations to this mode that are independently enabled by their respective +flags: + +- `--check` (exit with code 1 if any file would be reformatted) +- `--diff` (print a diff instead of reformatting files) + +Both variations can be enabled at once. + +### Output verbosity + +_Black_ in general tries to produce the right amount of output, balancing between +usefulness and conciseness. By default, _Black_ emits files modified and error messages, +plus a short summary. ```console -$ black --required-version 21.9b0 -c "format = 'this'" -format = "this" -$ black --required-version 31.5b2 -c "still = 'beta?!'" -Oh no! 💥 💔 💥 The required version does not match the running version! +$ black src/ +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +reformatted src/black_primer/lib.py +reformatted src/blackd/__init__.py +reformatted src/black/__init__.py +Oh no! 💥 💔 💥 +3 files reformatted, 2 files left unchanged, 1 file failed to reformat. ``` -This is useful for example when running _Black_ in multiple environments that haven't -necessarily installed the correct version. This option can be set in a configuration -file for consistent results across environments. +The `--quiet` and `--verbose` flags control output verbosity. ## Configuration via a file diff --git a/gallery/gallery.py b/gallery/gallery.py index 38e52e34795..ba5d6f65fbe 100755 --- a/gallery/gallery.py +++ b/gallery/gallery.py @@ -243,11 +243,9 @@ def format_repos(repos: Tuple[Path, ...], options: Namespace) -> None: def main() -> None: - parser = ArgumentParser( - description="""Black Gallery is a script that + parser = ArgumentParser(description="""Black Gallery is a script that automates the process of applying different Black versions to a selected - PyPI package and seeing the results between versions.""" - ) + PyPI package and seeing the results between versions.""") group = parser.add_mutually_exclusive_group(required=True) group.add_argument("-p", "--pypi-package", help="PyPI package to download.") diff --git a/mypy.ini b/mypy.ini index 58bb7536173..95ec22d65be 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,7 +2,7 @@ # Specify the target platform details in config, so your developers are # free to run mypy on Windows, Linux, or macOS and get consistent # results. -python_version=3.7 +python_version=3.8 mypy_path=src diff --git a/pyproject.toml b/pyproject.toml index 435626ac8f4..175f7851dee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ build-backend = "hatchling.build" name = "black" description = "The uncompromising code formatter." license = { text = "MIT" } -requires-python = ">=3.7" +requires-python = ">=3.8" authors = [ { name = "Łukasz Langa", email = "lukasz@langa.pl" }, ] @@ -69,7 +69,6 @@ dependencies = [ "pathspec>=0.9.0", "platformdirs>=2", "tomli>=1.1.0; python_version < '3.11'", - "typed-ast>=1.4.2; python_version < '3.8' and implementation_name == 'cpython'", "typing_extensions>=3.10.0.0; python_version < '3.10'", ] dynamic = ["readme", "version"] @@ -119,10 +118,9 @@ sources = ["src"] [tool.hatch.build.targets.wheel.hooks.mypyc] enable-by-default = false dependencies = [ - "hatch-mypyc>=0.13.0", - "mypy==0.991", - # Required stubs to be removed when the packages support PEP 561 themselves - "types-typed-ast>=1.4.2", + "hatch-mypyc>=0.16.0", + "mypy==1.3", + "click==8.1.3", # avoid https://github.com/pallets/click/issues/2558 ] require-runtime-dependencies = true exclude = [ @@ -145,7 +143,7 @@ options = { debug_level = "0" } [tool.cibuildwheel] build-verbosity = 1 # So these are the environments we target: -# - Python: CPython 3.7+ only +# - Python: CPython 3.8+ only # - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64 # - OS: Linux (no musl), Windows, and macOS build = "cp3*-*" @@ -208,10 +206,9 @@ filterwarnings = [ # this is mitigated by a try/catch in https://github.com/psf/black/pull/3198/ # this ignore can be removed when support for aiohttp 3.x is dropped. '''ignore:Middleware decorator is deprecated since 4\.0 and its behaviour is default, you can simply remove this decorator:DeprecationWarning''', - # this is mitigated by https://github.com/python/cpython/issues/79071 in python 3.8+ - # this ignore can be removed when support for 3.7 is dropped. - '''ignore:Bare functions are deprecated, use async ones:DeprecationWarning''', # aiohttp is using deprecated cgi modules - Safe to remove when fixed: # https://github.com/aio-libs/aiohttp/issues/6905 '''ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning''', + # Work around https://github.com/pytest-dev/pytest/issues/10977 for Python 3.12 + '''ignore:(Attribute s|Attribute n|ast.Str|ast.Bytes|ast.NameConstant|ast.Num) is deprecated and will be removed in Python 3.14:DeprecationWarning''' ] diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py index c62780d97ab..7f559b3aee1 100644 --- a/scripts/check_version_in_basics_example.py +++ b/scripts/check_version_in_basics_example.py @@ -20,20 +20,21 @@ def main(changes: str, the_basics: str) -> None: the_basics_html = commonmark.commonmark(the_basics) the_basics_soup = BeautifulSoup(the_basics_html, "html.parser") - (version_example,) = [ + version_examples = [ code_block.string for code_block in the_basics_soup.find_all(class_="language-console") if "$ black --version" in code_block.string ] for tag in tags: - if tag in version_example and tag != latest_tag: - print( - "Please set the version in the ``black --version`` " - "example from ``the_basics.md`` to be the latest one.\n" - f"Expected {latest_tag}, got {tag}.\n" - ) - sys.exit(1) + for version_example in version_examples: + if tag in version_example and tag != latest_tag: + print( + "Please set the version in the ``black --version`` " + "examples from ``the_basics.md`` to be the latest one.\n" + f"Expected {latest_tag}, got {tag}.\n" + ) + sys.exit(1) if __name__ == "__main__": diff --git a/scripts/diff_shades_gha_helper.py b/scripts/diff_shades_gha_helper.py index b5fea5a817d..994fbe05045 100644 --- a/scripts/diff_shades_gha_helper.py +++ b/scripts/diff_shades_gha_helper.py @@ -52,7 +52,13 @@ def set_output(name: str, value: str) -> None: print(f"[INFO]: setting '{name}' to '{value}'") else: print(f"[INFO]: setting '{name}' to [{len(value)} chars]") - print(f"::set-output name={name}::{value}") + + # Originally the `set-output` workflow command was used here, now replaced + # by setting variables through the `GITHUB_OUTPUT` environment variable + # to stay up to date with GitHub's update. + if "GITHUB_OUTPUT" in os.environ: + with open(os.environ["GITHUB_OUTPUT"], "a") as f: + print(f"{name}={value}", file=f) def http_get(url: str, *, is_json: bool = True, **kwargs: Any) -> Any: diff --git a/scripts/make_width_table.py b/scripts/make_width_table.py index 09aca9c34b5..30fd32c34b0 100644 --- a/scripts/make_width_table.py +++ b/scripts/make_width_table.py @@ -49,21 +49,13 @@ def make_width_table() -> Iterable[Tuple[int, int, int]]: def main() -> None: table_path = join(dirname(__file__), "..", "src", "black", "_width_table.py") with open(table_path, "w") as f: - f.write( - f"""# Generated by {basename(__file__)} + f.write(f"""# Generated by {basename(__file__)} # wcwidth {wcwidth.__version__} # Unicode {wcwidth.list_versions()[-1]} -import sys -from typing import List, Tuple - -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final +from typing import Final, List, Tuple WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [ -""" - ) +""") for triple in make_width_table(): f.write(f" {triple!r},\n") f.write("]\n") diff --git a/src/black/__init__.py b/src/black/__init__.py index 4ebf28821c3..301c18f7338 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -7,7 +7,7 @@ import traceback from contextlib import contextmanager from dataclasses import replace -from datetime import datetime +from datetime import datetime, timezone from enum import Enum from json.decoder import JSONDecodeError from pathlib import Path @@ -127,7 +127,9 @@ def read_pyproject_toml( otherwise. """ if not value: - value = find_pyproject_toml(ctx.params.get("src", ())) + value = find_pyproject_toml( + ctx.params.get("src", ()), ctx.params.get("stdin_filename", None) + ) if value is None: return None @@ -155,6 +157,16 @@ def read_pyproject_toml( "target-version", "Config key target-version must be a list" ) + exclude = config.get("exclude") + if exclude is not None and not isinstance(exclude, str): + raise click.BadOptionUsage("exclude", "Config key exclude must be a string") + + extend_exclude = config.get("extend_exclude") + if extend_exclude is not None and not isinstance(extend_exclude, str): + raise click.BadOptionUsage( + "extend-exclude", "Config key extend-exclude must be a string" + ) + default_map: Dict[str, Any] = {} if ctx.default_map: default_map.update(ctx.default_map) @@ -362,6 +374,7 @@ def validate_regex( @click.option( "--stdin-filename", type=str, + is_eager=True, help=( "The name of the file when passing it through stdin. Useful to make " "sure Black will respect --force-exclude option on some " @@ -373,7 +386,10 @@ def validate_regex( "--workers", type=click.IntRange(min=1), default=None, - help="Number of parallel workers [default: number of CPUs in the system]", + help=( + "Number of parallel workers [default: BLACK_NUM_WORKERS environment variable " + "or number of CPUs in the system]" + ), ) @click.option( "-q", @@ -478,35 +494,13 @@ def main( # noqa: C901 fg="blue", ) - normalized = [ - ( - (source, source) - if source == "-" - else (normalize_path_maybe_ignore(Path(source), root), source) - ) - for source in src - ] - srcs_string = ", ".join( - [ - ( - f'"{_norm}"' - if _norm - else f'\033[31m"{source} (skipping - invalid)"\033[34m' - ) - for _norm, source in normalized - ] - ) - out(f"Sources to be formatted: {srcs_string}", fg="blue") - if config: config_source = ctx.get_parameter_source("config") user_level_config = str(find_user_pyproject_toml()) if config == user_level_config: out( - ( - "Using configuration from user-level config at " - f"'{user_level_config}'." - ), + "Using configuration from user-level config at " + f"'{user_level_config}'.", fg="blue", ) elif config_source in ( @@ -650,9 +644,15 @@ def get_sources( is_stdin = False if is_stdin or p.is_file(): - normalized_path = normalize_path_maybe_ignore(p, ctx.obj["root"], report) + normalized_path: Optional[str] = normalize_path_maybe_ignore( + p, ctx.obj["root"], report + ) if normalized_path is None: + if verbose: + out(f'Skipping invalid source: "{normalized_path}"', fg="red") continue + if verbose: + out(f'Found input source: "{normalized_path}"', fg="blue") normalized_path = "/" + normalized_path # Hard-exclude any files that matches the `--force-exclude` regex. @@ -675,6 +675,9 @@ def get_sources( sources.add(p) elif p.is_dir(): p = root / normalize_path_maybe_ignore(p, ctx.obj["root"], report) + if verbose: + out(f'Found input source directory: "{p}"', fg="blue") + if using_default_exclude: gitignore = { root: root_gitignore, @@ -695,9 +698,12 @@ def get_sources( ) ) elif s == "-": + if verbose: + out("Found input source stdin", fg="blue") sources.add(p) else: err(f"invalid path: {s}") + return sources @@ -809,7 +815,7 @@ def format_file_in_place( elif src.suffix == ".ipynb": mode = replace(mode, is_ipynb=True) - then = datetime.utcfromtimestamp(src.stat().st_mtime) + then = datetime.fromtimestamp(src.stat().st_mtime, timezone.utc) header = b"" with open(src, "rb") as buf: if mode.skip_source_first_line: @@ -830,9 +836,9 @@ def format_file_in_place( with open(src, "w", encoding=encoding, newline=newline) as f: f.write(dst_contents) elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): - now = datetime.utcnow() - src_name = f"{src}\t{then} +0000" - dst_name = f"{src}\t{now} +0000" + now = datetime.now(timezone.utc) + src_name = f"{src}\t{then}" + dst_name = f"{src}\t{now}" if mode.is_ipynb: diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name) else: @@ -870,7 +876,7 @@ def format_stdin_to_stdout( write a diff to stdout. The `mode` argument is passed to :func:`format_file_contents`. """ - then = datetime.utcnow() + then = datetime.now(timezone.utc) if content is None: src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) @@ -895,9 +901,9 @@ def format_stdin_to_stdout( dst += "\n" f.write(dst) elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): - now = datetime.utcnow() - src_name = f"STDIN\t{then} +0000" - dst_name = f"STDOUT\t{now} +0000" + now = datetime.now(timezone.utc) + src_name = f"STDIN\t{then}" + dst_name = f"STDOUT\t{now}" d = diff(src, dst, src_name, dst_name) if write_back == WriteBack.COLOR_DIFF: d = color_diff(d) @@ -1277,6 +1283,9 @@ def get_features_used( # noqa: C901 ): features.add(Feature.VARIADIC_GENERICS) + elif n.type in (syms.type_stmt, syms.typeparams): + features.add(Feature.TYPE_PARAMS) + return features @@ -1401,40 +1410,6 @@ def nullcontext() -> Iterator[None]: yield -def patch_click() -> None: - """Make Click not crash on Python 3.6 with LANG=C. - - On certain misconfigured environments, Python 3 selects the ASCII encoding as the - default which restricts paths that it can access during the lifetime of the - application. Click refuses to work in this scenario by raising a RuntimeError. - - In case of Black the likelihood that non-ASCII characters are going to be used in - file paths is minimal since it's Python source code. Moreover, this crash was - spurious on Python 3.7 thanks to PEP 538 and PEP 540. - """ - modules: List[Any] = [] - try: - from click import core - except ImportError: - pass - else: - modules.append(core) - try: - # Removed in Click 8.1.0 and newer; we keep this around for users who have - # older versions installed. - from click import _unicodefun # type: ignore - except ImportError: - pass - else: - modules.append(_unicodefun) - - for module in modules: - if hasattr(module, "_verify_python3_env"): - module._verify_python3_env = lambda: None - if hasattr(module, "_verify_python_env"): - module._verify_python_env = lambda: None - - def patched_main() -> None: # PyInstaller patches multiprocessing to need freeze_support() even in non-Windows # environments so just assume we always need to call it if frozen. @@ -1443,7 +1418,6 @@ def patched_main() -> None: freeze_support() - patch_click() main() diff --git a/src/black/_width_table.py b/src/black/_width_table.py index 6923f597687..f3304e48ed0 100644 --- a/src/black/_width_table.py +++ b/src/black/_width_table.py @@ -1,13 +1,7 @@ # Generated by make_width_table.py # wcwidth 0.2.6 # Unicode 15.0.0 -import sys -from typing import List, Tuple - -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final +from typing import Final, List, Tuple WIDTH_TABLE: Final[List[Tuple[int, int, int]]] = [ (0, 0, 0), diff --git a/src/black/brackets.py b/src/black/brackets.py index 343f0608d50..85dac6edd1e 100644 --- a/src/black/brackets.py +++ b/src/black/brackets.py @@ -1,13 +1,7 @@ """Builds on top of nodes.py to track brackets.""" -import sys from dataclasses import dataclass, field -from typing import Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union - -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final +from typing import Dict, Final, Iterable, List, Optional, Sequence, Set, Tuple, Union from black.nodes import ( BRACKET, diff --git a/src/black/comments.py b/src/black/comments.py index 619123ab4be..226968bff98 100644 --- a/src/black/comments.py +++ b/src/black/comments.py @@ -1,13 +1,7 @@ import re -import sys from dataclasses import dataclass from functools import lru_cache -from typing import Iterator, List, Optional, Union - -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final +from typing import Final, Iterator, List, Optional, Union from black.nodes import ( CLOSING_BRACKETS, diff --git a/src/black/concurrency.py b/src/black/concurrency.py index 1598f51e43f..893eba6675a 100644 --- a/src/black/concurrency.py +++ b/src/black/concurrency.py @@ -80,7 +80,8 @@ def reformat_many( executor: Executor if workers is None: - workers = os.cpu_count() or 1 + workers = int(os.environ.get("BLACK_NUM_WORKERS", 0)) + workers = workers or os.cpu_count() or 1 if sys.platform == "win32": # Work around https://bugs.python.org/issue26903 workers = min(workers, 60) diff --git a/src/black/const.py b/src/black/const.py index 0e13f31517d..ee466679c70 100644 --- a/src/black/const.py +++ b/src/black/const.py @@ -1,4 +1,4 @@ DEFAULT_LINE_LENGTH = 88 -DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|\.ipynb_checkpoints|_build|buck-out|build|dist|__pypackages__)/" # noqa: B950 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.ipynb_checkpoints|\.mypy_cache|\.nox|\.pytest_cache|\.ruff_cache|\.tox|\.svn|\.venv|\.vscode|__pypackages__|_build|buck-out|build|dist|venv)/" # noqa: B950 DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$" STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" diff --git a/src/black/files.py b/src/black/files.py index 8c0131126b7..ef6895ee3af 100644 --- a/src/black/files.py +++ b/src/black/files.py @@ -42,7 +42,7 @@ import colorama # noqa: F401 -@lru_cache() +@lru_cache def find_project_root( srcs: Sequence[str], stdin_filename: Optional[str] = None ) -> Tuple[Path, str]: @@ -89,9 +89,11 @@ def find_project_root( return directory, "file system root" -def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]: +def find_pyproject_toml( + path_search_start: Tuple[str, ...], stdin_filename: Optional[str] = None +) -> Optional[str]: """Find the absolute filepath to a pyproject.toml if it exists""" - path_project_root, _ = find_project_root(path_search_start) + path_project_root, _ = find_project_root(path_search_start, stdin_filename) path_pyproject_toml = path_project_root / "pyproject.toml" if path_pyproject_toml.is_file(): return str(path_pyproject_toml) @@ -210,7 +212,7 @@ def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet: return SpecifierSet(",".join(str(s) for s in specifiers)) -@lru_cache() +@lru_cache def find_user_pyproject_toml() -> Path: r"""Return the path to the top-level user configuration for black. @@ -230,7 +232,7 @@ def find_user_pyproject_toml() -> Path: return user_config_path.resolve() -@lru_cache() +@lru_cache def get_gitignore(root: Path) -> PathSpec: """Return a PathSpec matching gitignore content if present.""" gitignore = root / ".gitignore" @@ -274,15 +276,24 @@ def normalize_path_maybe_ignore( return root_relative_path -def path_is_ignored( - path: Path, gitignore_dict: Dict[Path, PathSpec], report: Report +def _path_is_ignored( + root_relative_path: str, + root: Path, + gitignore_dict: Dict[Path, PathSpec], + report: Report, ) -> bool: + path = root / root_relative_path + # Note that this logic is sensitive to the ordering of gitignore_dict. Callers must + # ensure that gitignore_dict is ordered from least specific to most specific. for gitignore_path, pattern in gitignore_dict.items(): - relative_path = normalize_path_maybe_ignore(path, gitignore_path, report) - if relative_path is None: + try: + relative_path = path.relative_to(gitignore_path).as_posix() + except ValueError: break if pattern.match_file(relative_path): - report.path_ignored(path, "matches a .gitignore file content") + report.path_ignored( + path.relative_to(root), "matches a .gitignore file content" + ) return True return False @@ -324,7 +335,9 @@ def gen_python_files( continue # First ignore files matching .gitignore, if passed - if gitignore_dict and path_is_ignored(child, gitignore_dict, report): + if gitignore_dict and _path_is_ignored( + normalized_path, root, gitignore_dict, report + ): continue # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. diff --git a/src/black/handle_ipynb_magics.py b/src/black/handle_ipynb_magics.py index 9e1af757c32..2a2d62220e2 100644 --- a/src/black/handle_ipynb_magics.py +++ b/src/black/handle_ipynb_magics.py @@ -55,11 +55,16 @@ class Replacement: src: str -@lru_cache() +@lru_cache def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: try: - import IPython # noqa:F401 + # isort: off + # tokenize_rt is less commonly installed than IPython + # and IPython is expensive to import import tokenize_rt # noqa:F401 + import IPython # noqa:F401 + + # isort: on except ModuleNotFoundError: if verbose or not quiet: msg = ( @@ -330,7 +335,8 @@ class CellMagicFinder(ast.NodeVisitor): For example, - %%time\nfoo() + %%time\n + foo() would have been transformed to diff --git a/src/black/linegen.py b/src/black/linegen.py index b6b83da26f7..5ef3bbd1705 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -49,6 +49,7 @@ is_stub_body, is_stub_suite, is_tuple_containing_walrus, + is_type_ignore_comment_string, is_vararg, is_walrus_assignment, is_yield, @@ -215,6 +216,18 @@ def visit_stmt( yield from self.visit(child) + def visit_typeparams(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[0].prefix = "" + + def visit_typevartuple(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[1].prefix = "" + + def visit_paramspec(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[1].prefix = "" + def visit_dictsetmaker(self, node: Node) -> Iterator[Line]: if Preview.wrap_long_dict_values_in_parens in self.mode: for i, child in enumerate(node.children): @@ -906,6 +919,13 @@ def bracket_split_build_line( ) if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf) ) + # Except the false negatives above for PEP 604 unions where we + # can't add the comma. + and not ( + leaves[0].parent + and leaves[0].parent.next_sibling + and leaves[0].parent.next_sibling.type == token.VBAR + ) ) if original.is_import or no_commas: @@ -1380,8 +1400,13 @@ def maybe_make_parens_invisible_in_atom( if is_lpar_token(first) and is_rpar_token(last): middle = node.children[1] # make parentheses invisible - first.value = "" - last.value = "" + if ( + # If the prefix of `middle` includes a type comment with + # ignore annotation, then we do not remove the parentheses + not is_type_ignore_comment_string(middle.prefix.strip()) + ): + first.value = "" + last.value = "" maybe_make_parens_invisible_in_atom( middle, parent=parent, diff --git a/src/black/lines.py b/src/black/lines.py index bf4c12cb684..ea8fe520756 100644 --- a/src/black/lines.py +++ b/src/black/lines.py @@ -28,6 +28,7 @@ is_multiline_string, is_one_sequence_between, is_type_comment, + is_type_ignore_comment, is_with_or_async_with_stmt, replace_child, syms, @@ -251,7 +252,7 @@ def contains_uncollapsable_type_comments(self) -> bool: for comment in comments: if is_type_comment(comment): if comment_seen or ( - not is_type_comment(comment, " ignore") + not is_type_ignore_comment(comment) and leaf_id not in ignored_ids ): return True @@ -288,7 +289,7 @@ def contains_unsplittable_type_ignore(self) -> bool: # line. for node in self.leaves[-2:]: for comment in self.comments.get(id(node), []): - if is_type_comment(comment, " ignore"): + if is_type_ignore_comment(comment): return True return False @@ -634,6 +635,8 @@ def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: and self.previous_line.is_class and current_line.is_triple_quoted_string ): + if Preview.no_blank_line_before_class_docstring in current_line.mode: + return 0, 1 return before, 1 if self.previous_line and self.previous_line.opens_block: @@ -790,7 +793,7 @@ def is_line_short_enough( # noqa: C901 # store the leaves that contain parts of the MLS multiline_string_contexts: List[LN] = [] - max_level_to_update = math.inf # track the depth of the MLS + max_level_to_update: Union[int, float] = math.inf # track the depth of the MLS for i, leaf in enumerate(line.leaves): if max_level_to_update == math.inf: had_comma: Optional[int] = None diff --git a/src/black/mode.py b/src/black/mode.py index 0511676ce53..4d979afd84d 100644 --- a/src/black/mode.py +++ b/src/black/mode.py @@ -4,19 +4,13 @@ chosen by the user. """ -import sys from dataclasses import dataclass, field from enum import Enum, auto from hashlib import sha256 from operator import attrgetter -from typing import Dict, Set +from typing import Dict, Final, Set from warnings import warn -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final - from black.const import DEFAULT_LINE_LENGTH @@ -30,6 +24,7 @@ class TargetVersion(Enum): PY39 = 9 PY310 = 10 PY311 = 11 + PY312 = 12 class Feature(Enum): @@ -51,6 +46,7 @@ class Feature(Enum): VARIADIC_GENERICS = 15 DEBUG_F_STRINGS = 16 PARENTHESIZED_CONTEXT_MANAGERS = 17 + TYPE_PARAMS = 18 FORCE_OPTIONAL_PARENTHESES = 50 # __future__ flags @@ -143,6 +139,25 @@ class Feature(Enum): Feature.EXCEPT_STAR, Feature.VARIADIC_GENERICS, }, + TargetVersion.PY312: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PARENTHESIZED_CONTEXT_MANAGERS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + Feature.TYPE_PARAMS, + }, } @@ -158,6 +173,7 @@ class Preview(Enum): hex_codes_in_unicode_sequences = auto() improved_async_statements_handling = auto() multiline_string_handling = auto() + no_blank_line_before_class_docstring = auto() prefer_splitting_right_hand_side_of_assignments = auto() # NOTE: string_processing requires wrap_long_dict_values_in_parens # for https://github.com/psf/black/issues/3117 to be fixed. @@ -188,10 +204,8 @@ class Mode: def __post_init__(self) -> None: if self.experimental_string_processing: warn( - ( - "`experimental string processing` has been included in `preview`" - " and deprecated. Use `preview` instead." - ), + "`experimental string processing` has been included in `preview`" + " and deprecated. Use `preview` instead.", Deprecated, ) diff --git a/src/black/nodes.py b/src/black/nodes.py index 4e9411b1b79..45423b2596b 100644 --- a/src/black/nodes.py +++ b/src/black/nodes.py @@ -3,12 +3,8 @@ """ import sys -from typing import Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union +from typing import Final, Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final if sys.version_info >= (3, 10): from typing import TypeGuard else: @@ -181,9 +177,9 @@ def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 `complex_subscript` signals whether the given leaf is part of a subscription which has non-trivial arguments, like arithmetic expressions or function calls. """ - NO: Final = "" - SPACE: Final = " " - DOUBLESPACE: Final = " " + NO: Final[str] = "" + SPACE: Final[str] = " " + DOUBLESPACE: Final[str] = " " t = leaf.type p = leaf.parent v = leaf.value @@ -718,6 +714,11 @@ def is_multiline_string(leaf: Leaf) -> bool: def is_stub_suite(node: Node) -> bool: """Return True if `node` is a suite with a stub body.""" + + # If there is a comment, we want to keep it. + if node.prefix.strip(): + return False + if ( len(node.children) != 4 or node.children[0].type != token.NEWLINE @@ -726,6 +727,9 @@ def is_stub_suite(node: Node) -> bool: ): return False + if node.children[3].prefix.strip(): + return False + return is_stub_body(node.children[2]) @@ -739,7 +743,8 @@ def is_stub_body(node: LN) -> bool: child = node.children[0] return ( - child.type == syms.atom + not child.prefix.strip() + and child.type == syms.atom and len(child.children) == 3 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) ) @@ -816,12 +821,27 @@ def is_async_stmt_or_funcdef(leaf: Leaf) -> bool: ) -def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: - """Return True if the given leaf is a special comment. - Only returns true for type comments for now.""" +def is_type_comment(leaf: Leaf) -> bool: + """Return True if the given leaf is a type comment. This function should only + be used for general type comments (excluding ignore annotations, which should + use `is_type_ignore_comment`). Note that general type comments are no longer + used in modern version of Python, this function may be deprecated in the future.""" t = leaf.type v = leaf.value - return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) + return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:") + + +def is_type_ignore_comment(leaf: Leaf) -> bool: + """Return True if the given leaf is a type comment with ignore annotation.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_ignore_comment_string(v) + + +def is_type_ignore_comment_string(value: str) -> bool: + """Return True if the given string match with type comment with + ignore annotation.""" + return value.startswith("# type: ignore") def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: diff --git a/src/black/parsing.py b/src/black/parsing.py index eaa3c367e54..e98e019cac6 100644 --- a/src/black/parsing.py +++ b/src/black/parsing.py @@ -2,14 +2,8 @@ Parse Python code and perform AST validation. """ import ast -import platform import sys -from typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union - -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final +from typing import Final, Iterable, Iterator, List, Set, Tuple from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature from black.nodes import syms @@ -20,27 +14,6 @@ from blib2to3.pgen2.tokenize import TokenError from blib2to3.pytree import Leaf, Node -ast3: Any - -_IS_PYPY = platform.python_implementation() == "PyPy" - -try: - from typed_ast import ast3 -except ImportError: - if sys.version_info < (3, 8) and not _IS_PYPY: - print( - ( - "The typed_ast package is required but not installed.\n" - "You can upgrade to Python 3.8+ or install typed_ast with\n" - "`python3 -m pip install typed-ast`." - ), - file=sys.stderr, - ) - sys.exit(1) - else: - ast3 = ast - - PY2_HINT: Final = "Python 2 support was removed in version 22.0." @@ -149,31 +122,14 @@ def lib2to3_unparse(node: Node) -> str: def parse_single_version( src: str, version: Tuple[int, int], *, type_comments: bool -) -> Union[ast.AST, ast3.AST]: +) -> ast.AST: filename = "" - # typed-ast is needed because of feature version limitations in the builtin ast 3.8> - if sys.version_info >= (3, 8) and version >= (3,): - return ast.parse( - src, filename, feature_version=version, type_comments=type_comments - ) - - if _IS_PYPY: - # PyPy 3.7 doesn't support type comment tracking which is not ideal, but there's - # not much we can do as typed-ast won't work either. - if sys.version_info >= (3, 8): - return ast3.parse(src, filename, type_comments=type_comments) - else: - return ast3.parse(src, filename) - else: - if type_comments: - # Typed-ast is guaranteed to be used here and automatically tracks type - # comments separately. - return ast3.parse(src, filename, feature_version=version[1]) - else: - return ast.parse(src, filename) + return ast.parse( + src, filename, feature_version=version, type_comments=type_comments + ) -def parse_ast(src: str) -> Union[ast.AST, ast3.AST]: +def parse_ast(src: str) -> ast.AST: # TODO: support Python 4+ ;) versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] @@ -195,9 +151,6 @@ def parse_ast(src: str) -> Union[ast.AST, ast3.AST]: raise SyntaxError(first_error) -ast3_AST: Final[Type[ast3.AST]] = ast3.AST - - def _normalize(lineend: str, value: str) -> str: # To normalize, we strip any leading and trailing space from # each line... @@ -208,23 +161,25 @@ def _normalize(lineend: str, value: str) -> str: return normalized.strip() -def stringify_ast(node: Union[ast.AST, ast3.AST], depth: int = 0) -> Iterator[str]: +def stringify_ast(node: ast.AST, depth: int = 0) -> Iterator[str]: """Simple visitor generating strings to compare ASTs by content.""" - node = fixup_ast_constants(node) + if ( + isinstance(node, ast.Constant) + and isinstance(node.value, str) + and node.kind == "u" + ): + # It's a quirk of history that we strip the u prefix over here. We used to + # rewrite the AST nodes for Python version compatibility and we never copied + # over the kind + node.kind = None yield f"{' ' * depth}{node.__class__.__name__}(" - type_ignore_classes: Tuple[Type[Any], ...] for field in sorted(node._fields): # noqa: F402 - # TypeIgnore will not be present using pypy < 3.8, so need for this - if not (_IS_PYPY and sys.version_info < (3, 8)): - # TypeIgnore has only one field 'lineno' which breaks this comparison - type_ignore_classes = (ast3.TypeIgnore,) - if sys.version_info >= (3, 8): - type_ignore_classes += (ast.TypeIgnore,) - if isinstance(node, type_ignore_classes): - break + # TypeIgnore has only one field 'lineno' which breaks this comparison + if isinstance(node, ast.TypeIgnore): + break try: value: object = getattr(node, field) @@ -239,51 +194,34 @@ def stringify_ast(node: Union[ast.AST, ast3.AST], depth: int = 0) -> Iterator[st # parentheses and they change the AST. if ( field == "targets" - and isinstance(node, (ast.Delete, ast3.Delete)) - and isinstance(item, (ast.Tuple, ast3.Tuple)) + and isinstance(node, ast.Delete) + and isinstance(item, ast.Tuple) ): for elt in item.elts: yield from stringify_ast(elt, depth + 2) - elif isinstance(item, (ast.AST, ast3.AST)): + elif isinstance(item, ast.AST): yield from stringify_ast(item, depth + 2) - # Note that we are referencing the typed-ast ASTs via global variables and not - # direct module attribute accesses because that breaks mypyc. It's probably - # something to do with the ast3 variables being marked as Any leading - # mypy to think this branch is always taken, leaving the rest of the code - # unanalyzed. Tighting up the types for the typed-ast AST types avoids the - # mypyc crash. - elif isinstance(value, (ast.AST, ast3_AST)): + elif isinstance(value, ast.AST): yield from stringify_ast(value, depth + 2) else: normalized: object - # Constant strings may be indented across newlines, if they are - # docstrings; fold spaces after newlines when comparing. Similarly, - # trailing and leading space may be removed. if ( isinstance(node, ast.Constant) and field == "value" and isinstance(value, str) ): + # Constant strings may be indented across newlines, if they are + # docstrings; fold spaces after newlines when comparing. Similarly, + # trailing and leading space may be removed. normalized = _normalize("\n", value) + elif field == "type_comment" and isinstance(value, str): + # Trailing whitespace in type comments is removed. + normalized = value.rstrip() else: normalized = value yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" yield f"{' ' * depth}) # /{node.__class__.__name__}" - - -def fixup_ast_constants(node: Union[ast.AST, ast3.AST]) -> Union[ast.AST, ast3.AST]: - """Map ast nodes deprecated in 3.8 to Constant.""" - if isinstance(node, (ast.Str, ast3.Str, ast.Bytes, ast3.Bytes)): - return ast.Constant(value=node.s) - - if isinstance(node, (ast.Num, ast3.Num)): - return ast.Constant(value=node.n) - - if isinstance(node, (ast.NameConstant, ast3.NameConstant)): - return ast.Constant(value=node.value) - - return node diff --git a/src/black/strings.py b/src/black/strings.py index ac18aef51ed..0d30f09ed11 100644 --- a/src/black/strings.py +++ b/src/black/strings.py @@ -5,16 +5,10 @@ import re import sys from functools import lru_cache -from typing import List, Match, Pattern - -from blib2to3.pytree import Leaf - -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final +from typing import Final, List, Match, Pattern from black._width_table import WIDTH_TABLE +from blib2to3.pytree import Leaf STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. STRING_PREFIX_RE: Final = re.compile( diff --git a/src/black/trans.py b/src/black/trans.py index 95695f32b14..daed26427d7 100644 --- a/src/black/trans.py +++ b/src/black/trans.py @@ -2,7 +2,6 @@ String transformers that can split and merge strings. """ import re -import sys from abc import ABC, abstractmethod from collections import defaultdict from dataclasses import dataclass @@ -12,9 +11,11 @@ ClassVar, Collection, Dict, + Final, Iterable, Iterator, List, + Literal, Optional, Sequence, Set, @@ -23,11 +24,6 @@ Union, ) -if sys.version_info < (3, 8): - from typing_extensions import Final, Literal -else: - from typing import Literal, Final - from mypy_extensions import trait from black.comments import contains_pragma_comment @@ -205,11 +201,11 @@ def do_match(self, line: Line) -> TMatchResult: """ Returns: * Ok(string_indices) such that for each index, `line.leaves[index]` - is our target string if a match was able to be made. For - transformers that don't result in more lines (e.g. StringMerger, - StringParenStripper), multiple matches and transforms are done at - once to reduce the complexity. - OR + is our target string if a match was able to be made. For + transformers that don't result in more lines (e.g. StringMerger, + StringParenStripper), multiple matches and transforms are done at + once to reduce the complexity. + OR * Err(CannotTransform), if no match could be made. """ @@ -220,12 +216,12 @@ def do_transform( """ Yields: * Ok(new_line) where new_line is the new transformed line. - OR + OR * Err(CannotTransform) if the transformation failed for some reason. The - `do_match(...)` template method should usually be used to reject - the form of the given Line, but in some cases it is difficult to - know whether or not a Line meets the StringTransformer's - requirements until the transformation is already midway. + `do_match(...)` template method should usually be used to reject + the form of the given Line, but in some cases it is difficult to + know whether or not a Line meets the StringTransformer's + requirements until the transformation is already midway. Side Effects: This method should NOT mutate @line directly, but it MAY mutate the @@ -335,8 +331,8 @@ def pop_custom_splits(self, string: str) -> List[CustomSplit]: Returns: * A list of the custom splits that are mapped to @string, if any - exist. - OR + exist. + OR * [], otherwise. Side Effects: @@ -365,14 +361,14 @@ class StringMerger(StringTransformer, CustomSplitMapMixin): Requirements: (A) The line contains adjacent strings such that ALL of the validation checks listed in StringMerger._validate_msg(...)'s docstring pass. - OR + OR (B) The line contains a string which uses line continuation backslashes. Transformations: Depending on which of the two requirements above where met, either: (A) The string group associated with the target string is merged. - OR + OR (B) All line-continuation backslashes are removed from the target string. Collaborations: @@ -965,17 +961,20 @@ class BaseStringSplitter(StringTransformer): Requirements: * The target string value is responsible for the line going over the - line length limit. It follows that after all of black's other line - split methods have been exhausted, this line (or one of the resulting - lines after all line splits are performed) would still be over the - line_length limit unless we split this string. - AND + line length limit. It follows that after all of black's other line + split methods have been exhausted, this line (or one of the resulting + lines after all line splits are performed) would still be over the + line_length limit unless we split this string. + AND + * The target string is NOT a "pointless" string (i.e. a string that has - no parent or siblings). - AND + no parent or siblings). + AND + * The target string is not followed by an inline comment that appears - to be a pragma. - AND + to be a pragma. + AND + * The target string is not a multiline (i.e. triple-quote) string. """ @@ -1027,7 +1026,7 @@ def _validate(self, line: Line, string_idx: int) -> TResult[None]: Returns: * Ok(None), if ALL of the requirements are met. - OR + OR * Err(CannotTransform), if ANY of the requirements are NOT met. """ LL = line.leaves @@ -1186,19 +1185,33 @@ def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]: if LL[0].type != token.STRING: return None - # If the string is surrounded by commas (or is the first/last child)... - prev_sibling = LL[0].prev_sibling - next_sibling = LL[0].next_sibling - if not prev_sibling and not next_sibling and parent_type(LL[0]) == syms.atom: - # If it's an atom string, we need to check the parent atom's siblings. - parent = LL[0].parent - assert parent is not None # For type checkers. - prev_sibling = parent.prev_sibling - next_sibling = parent.next_sibling - if (not prev_sibling or prev_sibling.type == token.COMMA) and ( - not next_sibling or next_sibling.type == token.COMMA + matching_nodes = [ + syms.listmaker, + syms.dictsetmaker, + syms.testlist_gexp, + ] + # If the string is an immediate child of a list/set/tuple literal... + if ( + parent_type(LL[0]) in matching_nodes + or parent_type(LL[0].parent) in matching_nodes ): - return 0 + # And the string is surrounded by commas (or is the first/last child)... + prev_sibling = LL[0].prev_sibling + next_sibling = LL[0].next_sibling + if ( + not prev_sibling + and not next_sibling + and parent_type(LL[0]) == syms.atom + ): + # If it's an atom string, we need to check the parent atom's siblings. + parent = LL[0].parent + assert parent is not None # For type checkers. + prev_sibling = parent.prev_sibling + next_sibling = parent.next_sibling + if (not prev_sibling or prev_sibling.type == token.COMMA) and ( + not next_sibling or next_sibling.type == token.COMMA + ): + return 0 return None @@ -1285,9 +1298,9 @@ class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): Requirements: * The line consists ONLY of a single string (possibly prefixed by a - string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE - a trailing comma. - AND + string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE + a trailing comma. + AND * All of the requirements listed in BaseStringSplitter's docstring. Transformations: @@ -1794,25 +1807,26 @@ class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): addition to the requirements listed below: * The line is a return/yield statement, which returns/yields a string. - OR + OR * The line is part of a ternary expression (e.g. `x = y if cond else - z`) such that the line starts with `else `, where is - some string. - OR + z`) such that the line starts with `else `, where is + some string. + OR * The line is an assert statement, which ends with a string. - OR + OR * The line is an assignment statement (e.g. `x = ` or `x += - `) such that the variable is being assigned the value of some - string. - OR + `) such that the variable is being assigned the value of some + string. + OR * The line is a dictionary key assignment where some valid key is being - assigned the value of some string. - OR + assigned the value of some string. + OR * The line is an lambda expression and the value is a string. - OR + OR * The line starts with an "atom" string that prefers to be wrapped in - parens. It's preferred to be wrapped when the string is surrounded by - commas (or is the first/last child). + parens. It's preferred to be wrapped when it's is an immediate child of + a list/set/tuple literal, AND the string is surrounded by commas (or is + the first/last child). Transformations: The chosen string is wrapped in parentheses and then split at the LPAR. @@ -2258,7 +2272,7 @@ def parse(self, leaves: List[Leaf], string_idx: int) -> int: Returns: The index directly after the last leaf which is apart of the string trailer, if a "trailer" exists. - OR + OR @string_idx + 1, if no string "trailer" exists. """ assert leaves[string_idx].type == token.STRING @@ -2272,11 +2286,11 @@ def _next_state(self, leaf: Leaf) -> bool: """ Pre-conditions: * On the first call to this function, @leaf MUST be the leaf that - was directly after the string leaf in question (e.g. if our target - string is `line.leaves[i]` then the first call to this method must - be `line.leaves[i + 1]`). + was directly after the string leaf in question (e.g. if our target + string is `line.leaves[i]` then the first call to this method must + be `line.leaves[i + 1]`). * On the next call to this function, the leaf parameter passed in - MUST be the leaf directly following @leaf. + MUST be the leaf directly following @leaf. Returns: True iff @leaf is apart of the string's trailer. diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py index ba4750b8298..4f2d87d0fca 100644 --- a/src/blackd/__init__.py +++ b/src/blackd/__init__.py @@ -1,7 +1,7 @@ import asyncio import logging from concurrent.futures import Executor, ProcessPoolExecutor -from datetime import datetime +from datetime import datetime, timezone from functools import partial from multiprocessing import freeze_support from typing import Set, Tuple @@ -59,9 +59,15 @@ class InvalidVariantHeader(Exception): @click.command(context_settings={"help_option_names": ["-h", "--help"]}) @click.option( - "--bind-host", type=str, help="Address to bind the server to.", default="localhost" + "--bind-host", + type=str, + help="Address to bind the server to.", + default="localhost", + show_default=True, +) +@click.option( + "--bind-port", type=int, help="Port to listen on", default=45484, show_default=True ) -@click.option("--bind-port", type=int, help="Port to listen on", default=45484) @click.version_option(version=black.__version__) def main(bind_host: str, bind_port: int) -> None: logging.basicConfig(level=logging.INFO) @@ -132,7 +138,7 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: req_bytes = await request.content.read() charset = request.charset if request.charset is not None else "utf8" req_str = req_bytes.decode(charset) - then = datetime.utcnow() + then = datetime.now(timezone.utc) header = "" if skip_source_first_line: @@ -159,9 +165,9 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: # Only output the diff in the HTTP response only_diff = bool(request.headers.get(DIFF_HEADER, False)) if only_diff: - now = datetime.utcnow() - src_name = f"In\t{then} +0000" - dst_name = f"Out\t{now} +0000" + now = datetime.now(timezone.utc) + src_name = f"In\t{then}" + dst_name = f"Out\t{now}" loop = asyncio.get_event_loop() formatted_str = await loop.run_in_executor( executor, @@ -219,7 +225,6 @@ def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersi def patched_main() -> None: maybe_install_uvloop() freeze_support() - black.patch_click() main() diff --git a/src/blib2to3/Grammar.txt b/src/blib2to3/Grammar.txt index bd8a452a386..e48e66363fb 100644 --- a/src/blib2to3/Grammar.txt +++ b/src/blib2to3/Grammar.txt @@ -12,11 +12,17 @@ file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE eval_input: testlist NEWLINE* ENDMARKER +typevar: NAME [':' expr] +paramspec: '**' NAME +typevartuple: '*' NAME +typeparam: typevar | paramspec | typevartuple +typeparams: '[' typeparam (',' typeparam)* [','] ']' + decorator: '@' namedexpr_test NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef | async_funcdef) async_funcdef: ASYNC funcdef -funcdef: 'def' NAME parameters ['->' test] ':' suite +funcdef: 'def' NAME [typeparams] parameters ['->' test] ':' suite parameters: '(' [typedargslist] ')' # The following definition for typedarglist is equivalent to this set of rules: @@ -74,7 +80,7 @@ vfplist: vfpdef (',' vfpdef)* [','] stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE -small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | +small_stmt: (type_stmt | expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) @@ -105,6 +111,7 @@ dotted_name: NAME ('.' NAME)* global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] +type_stmt: "type" NAME [typeparams] '=' expr compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt | match_stmt async_stmt: ASYNC (funcdef | with_stmt | for_stmt) @@ -174,7 +181,7 @@ dictsetmaker: ( ((test ':' asexpr_test | '**' expr) ((test [':=' test] | star_expr) (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) -classdef: 'class' NAME ['(' [arglist] ')'] ':' suite +classdef: 'class' NAME [typeparams] ['(' [arglist] ')'] ':' suite arglist: argument (',' argument)* [','] diff --git a/src/blib2to3/pgen2/conv.py b/src/blib2to3/pgen2/conv.py index fa9825e54d6..04eccfa1d4b 100644 --- a/src/blib2to3/pgen2/conv.py +++ b/src/blib2to3/pgen2/conv.py @@ -63,7 +63,7 @@ def parse_graminit_h(self, filename): try: f = open(filename) except OSError as err: - print("Can't open %s: %s" % (filename, err)) + print(f"Can't open {filename}: {err}") return False self.symbol2number = {} self.number2symbol = {} @@ -72,7 +72,7 @@ def parse_graminit_h(self, filename): lineno += 1 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): - print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) + print(f"{filename}({lineno}): can't parse {line.strip()}") else: symbol, number = mo.groups() number = int(number) @@ -113,7 +113,7 @@ def parse_graminit_c(self, filename): try: f = open(filename) except OSError as err: - print("Can't open %s: %s" % (filename, err)) + print(f"Can't open {filename}: {err}") return False # The code below essentially uses f's iterator-ness! lineno = 0 diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index 1741b33c510..bb73016a4c1 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -28,11 +28,8 @@ Iterable, List, Optional, - Text, Iterator, Tuple, - TypeVar, - Generic, Union, ) from contextlib import contextmanager @@ -116,7 +113,7 @@ def can_advance(self, to: int) -> bool: return True -class Driver(object): +class Driver: def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None: self.grammar = grammar if logger is None: @@ -189,30 +186,30 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> assert p.rootnode is not None return p.rootnode - def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL: + def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL: """Parse a stream and return the syntax tree.""" tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar) return self.parse_tokens(tokens, debug) - def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL: + def parse_stream(self, stream: IO[str], debug: bool = False) -> NL: """Parse a stream and return the syntax tree.""" return self.parse_stream_raw(stream, debug) def parse_file( - self, filename: Path, encoding: Optional[Text] = None, debug: bool = False + self, filename: Path, encoding: Optional[str] = None, debug: bool = False ) -> NL: """Parse a file and return the syntax tree.""" - with io.open(filename, "r", encoding=encoding) as stream: + with open(filename, encoding=encoding) as stream: return self.parse_stream(stream, debug) - def parse_string(self, text: Text, debug: bool = False) -> NL: + def parse_string(self, text: str, debug: bool = False) -> NL: """Parse a string and return the syntax tree.""" tokens = tokenize.generate_tokens( io.StringIO(text).readline, grammar=self.grammar ) return self.parse_tokens(tokens, debug) - def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]: + def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]: lines: List[str] = [] current_line = "" current_column = 0 @@ -240,7 +237,7 @@ def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Te return "".join(lines), current_line -def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text: +def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str: head, tail = os.path.splitext(gt) if tail == ".txt": tail = "" @@ -252,8 +249,8 @@ def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text: def load_grammar( - gt: Text = "Grammar.txt", - gp: Optional[Text] = None, + gt: str = "Grammar.txt", + gp: Optional[str] = None, save: bool = True, force: bool = False, logger: Optional[Logger] = None, @@ -276,7 +273,7 @@ def load_grammar( return g -def _newer(a: Text, b: Text) -> bool: +def _newer(a: str, b: str) -> bool: """Inquire whether file a was written since file b.""" if not os.path.exists(a): return False @@ -286,7 +283,7 @@ def _newer(a: Text, b: Text) -> bool: def load_packaged_grammar( - package: str, grammar_source: Text, cache_dir: Optional[Path] = None + package: str, grammar_source: str, cache_dir: Optional[Path] = None ) -> grammar.Grammar: """Normally, loads a pickled grammar by doing pkgutil.get_data(package, pickled_grammar) @@ -309,7 +306,7 @@ def load_packaged_grammar( return g -def main(*args: Text) -> bool: +def main(*args: str) -> bool: """Main program, when run as a script: produce grammar pickle files. Calls load_grammar for each argument, a path to a grammar text file. diff --git a/src/blib2to3/pgen2/grammar.py b/src/blib2to3/pgen2/grammar.py index 337a64f1726..1f3fdc55b97 100644 --- a/src/blib2to3/pgen2/grammar.py +++ b/src/blib2to3/pgen2/grammar.py @@ -16,19 +16,19 @@ import os import pickle import tempfile -from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar, Union +from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union # Local imports from . import token _P = TypeVar("_P", bound="Grammar") -Label = Tuple[int, Optional[Text]] +Label = Tuple[int, Optional[str]] DFA = List[List[Tuple[int, int]]] DFAS = Tuple[DFA, Dict[int, int]] Path = Union[str, "os.PathLike[str]"] -class Grammar(object): +class Grammar: """Pgen parsing tables conversion class. Once initialized, this class supplies the grammar tables for the diff --git a/src/blib2to3/pgen2/literals.py b/src/blib2to3/pgen2/literals.py index b5fe4285114..c67b91d0463 100644 --- a/src/blib2to3/pgen2/literals.py +++ b/src/blib2to3/pgen2/literals.py @@ -5,10 +5,10 @@ import re -from typing import Dict, Match, Text +from typing import Dict, Match -simple_escapes: Dict[Text, Text] = { +simple_escapes: Dict[str, str] = { "a": "\a", "b": "\b", "f": "\f", @@ -22,7 +22,7 @@ } -def escape(m: Match[Text]) -> Text: +def escape(m: Match[str]) -> str: all, tail = m.group(0, 1) assert all.startswith("\\") esc = simple_escapes.get(tail) @@ -44,7 +44,7 @@ def escape(m: Match[Text]) -> Text: return chr(i) -def evalString(s: Text) -> Text: +def evalString(s: str) -> str: assert s.startswith("'") or s.startswith('"'), repr(s[:1]) q = s[0] if s[:3] == q * 3: diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index c462f63ad2c..17bf118e9fc 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -9,7 +9,6 @@ how this parsing engine works. """ -import copy from contextlib import contextmanager # Local imports @@ -18,7 +17,6 @@ cast, Any, Optional, - Text, Union, Tuple, Dict, @@ -35,7 +33,7 @@ from blib2to3.pgen2.driver import TokenProxy -Results = Dict[Text, NL] +Results = Dict[str, NL] Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]] DFA = List[List[Tuple[int, int]]] DFAS = Tuple[DFA, Dict[int, int]] @@ -100,7 +98,7 @@ def backtrack(self) -> Iterator[None]: finally: self.parser.is_backtracking = is_backtracking - def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: + def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None: func: Callable[..., Any] if raw: func = self.parser._addtoken @@ -114,7 +112,7 @@ def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: args.insert(0, ilabel) func(*args) - def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]: + def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]: alive_ilabels = self.ilabels if len(alive_ilabels) == 0: *_, most_successful_ilabel = self._dead_ilabels @@ -131,10 +129,10 @@ class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__( - self, msg: Text, type: Optional[int], value: Optional[Text], context: Context + self, msg: str, type: Optional[int], value: Optional[str], context: Context ) -> None: Exception.__init__( - self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context) + self, f"{msg}: type={type!r}, value={value!r}, context={context!r}" ) self.msg = msg self.type = type @@ -142,7 +140,7 @@ def __init__( self.context = context -class Parser(object): +class Parser: """Parser engine. The proper usage sequence is: @@ -236,7 +234,7 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None: self.used_names: Set[str] = set() self.proxy = proxy - def addtoken(self, type: int, value: Text, context: Context) -> bool: + def addtoken(self, type: int, value: str, context: Context) -> bool: """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabels = self.classify(type, value, context) @@ -284,7 +282,7 @@ def addtoken(self, type: int, value: Text, context: Context) -> bool: return self._addtoken(ilabel, type, value, context) - def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: + def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool: # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] @@ -329,7 +327,7 @@ def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bo # No success finding a transition raise ParseError("bad input", type, value, context) - def classify(self, type: int, value: Text, context: Context) -> List[int]: + def classify(self, type: int, value: str, context: Context) -> List[int]: """Turn a token into a label. (Internal) Depending on whether the value is a soft-keyword or not, @@ -352,7 +350,7 @@ def classify(self, type: int, value: Text, context: Context) -> List[int]: raise ParseError("bad token", type, value, context) return [ilabel] - def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: + def shift(self, type: int, value: str, newstate: int, context: Context) -> None: """Shift a token. (Internal)""" if self.is_backtracking: dfa, state, _ = self.stack[-1] diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 631682a77c9..046efd09338 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -11,7 +11,6 @@ Iterator, List, Optional, - Text, Tuple, Union, Sequence, @@ -29,17 +28,16 @@ class PgenGrammar(grammar.Grammar): pass -class ParserGenerator(object): - +class ParserGenerator: filename: Path - stream: IO[Text] + stream: IO[str] generator: Iterator[GoodTokenInfo] - first: Dict[Text, Optional[Dict[Text, int]]] + first: Dict[str, Optional[Dict[str, int]]] - def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None: + def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None: close_stream = None if stream is None: - stream = open(filename) + stream = open(filename, encoding="utf-8") close_stream = stream.close self.filename = filename self.stream = stream @@ -76,7 +74,7 @@ def make_grammar(self) -> PgenGrammar: c.start = c.symbol2number[self.startsymbol] return c - def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: + def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]: rawfirst = self.first[name] assert rawfirst is not None first = {} @@ -86,7 +84,7 @@ def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: first[ilabel] = 1 return first - def make_label(self, c: PgenGrammar, label: Text) -> int: + def make_label(self, c: PgenGrammar, label: str) -> int: # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): @@ -145,7 +143,7 @@ def addfirstsets(self) -> None: self.calcfirst(name) # print name, self.first[name].keys() - def calcfirst(self, name: Text) -> None: + def calcfirst(self, name: str) -> None: dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] @@ -177,7 +175,7 @@ def calcfirst(self, name: Text) -> None: inverse[symbol] = label self.first[name] = totalset - def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]: + def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]: dfas = {} startsymbol: Optional[str] = None # MSTART: (NEWLINE | RULE)* ENDMARKER @@ -241,7 +239,7 @@ def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None: state.addarc(st, label) return states # List of DFAState instances; first one is start - def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None: + def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None: print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): @@ -257,7 +255,7 @@ def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None: else: print(" %s -> %d" % (label, j)) - def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None: + def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None: print("Dump of DFA for", name) for i, state in enumerate(dfa): print(" State", i, state.isfinal and "(final)" or "") @@ -350,7 +348,7 @@ def parse_atom(self) -> Tuple["NFAState", "NFAState"]: ) assert False - def expect(self, type: int, value: Optional[Any] = None) -> Text: + def expect(self, type: int, value: Optional[Any] = None) -> str: if self.type != type or (value is not None and self.value != value): self.raise_error( "expected %s/%s, got %s/%s", type, value, self.type, self.value @@ -375,22 +373,22 @@ def raise_error(self, msg: str, *args: Any) -> NoReturn: raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) -class NFAState(object): - arcs: List[Tuple[Optional[Text], "NFAState"]] +class NFAState: + arcs: List[Tuple[Optional[str], "NFAState"]] def __init__(self) -> None: self.arcs = [] # list of (label, NFAState) pairs - def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None: + def addarc(self, next: "NFAState", label: Optional[str] = None) -> None: assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next)) -class DFAState(object): +class DFAState: nfaset: Dict[NFAState, Any] isfinal: bool - arcs: Dict[Text, "DFAState"] + arcs: Dict[str, "DFAState"] def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: assert isinstance(nfaset, dict) @@ -400,7 +398,7 @@ def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: self.isfinal = final in nfaset self.arcs = {} # map from label to DFAState - def addarc(self, next: "DFAState", label: Text) -> None: + def addarc(self, next: "DFAState", label: str) -> None: assert isinstance(label, str) assert label not in self.arcs assert isinstance(next, DFAState) diff --git a/src/blib2to3/pgen2/token.py b/src/blib2to3/pgen2/token.py index 1e0dec9c714..117cc09d4ce 100644 --- a/src/blib2to3/pgen2/token.py +++ b/src/blib2to3/pgen2/token.py @@ -1,12 +1,8 @@ """Token constants (from "token.h").""" -import sys from typing import Dict -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final +from typing import Final # Taken from Python (r53757) and modified to include some tokens # originally monkeypatched in by pgen2.tokenize @@ -78,7 +74,7 @@ tok_name: Final[Dict[int, str]] = {} for _name, _value in list(globals().items()): - if type(_value) is type(0): + if type(_value) is int: tok_name[_value] = _name diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index 257dbef4a19..1dea89d7bb8 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -34,17 +34,14 @@ Iterator, List, Optional, - Text, + Set, Tuple, Pattern, Union, cast, ) -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final +from typing import Final from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar @@ -66,20 +63,20 @@ del token -def group(*choices): +def group(*choices: str) -> str: return "(" + "|".join(choices) + ")" -def any(*choices): +def any(*choices: str) -> str: return group(*choices) + "*" -def maybe(*choices): +def maybe(*choices: str) -> str: return group(*choices) + "?" -def _combinations(*l): - return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) +def _combinations(*l: str) -> Set[str]: + return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()} Whitespace = r"[ \f\t]*" @@ -163,7 +160,6 @@ def _combinations(*l): '"""': double3prog, **{f"{prefix}'''": single3prog for prefix in _strprefixes}, **{f'{prefix}"""': double3prog for prefix in _strprefixes}, - **{prefix: None for prefix in _strprefixes}, } triple_quoted: Final = ( @@ -188,19 +184,23 @@ class StopTokenizing(Exception): pass -def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing - (srow, scol) = xxx_todo_changeme - (erow, ecol) = xxx_todo_changeme1 +Coord = Tuple[int, int] + + +def printtoken( + type: int, token: str, srow_col: Coord, erow_col: Coord, line: str +) -> None: # for testing + (srow, scol) = srow_col + (erow, ecol) = erow_col print( "%d,%d-%d,%d:\t%s\t%s" % (srow, scol, erow, ecol, tok_name[type], repr(token)) ) -Coord = Tuple[int, int] -TokenEater = Callable[[int, Text, Coord, Coord, Text], None] +TokenEater = Callable[[int, str, Coord, Coord, str], None] -def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None: +def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None: """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). @@ -220,18 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) # backwards compatible interface -def tokenize_loop(readline, tokeneater): +def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None: for token_info in generate_tokens(readline): tokeneater(*token_info) -GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text] +GoodTokenInfo = Tuple[int, str, Coord, Coord, str] TokenInfo = Union[Tuple[int, str], GoodTokenInfo] class Untokenizer: - - tokens: List[Text] + tokens: List[str] prev_row: int prev_col: int @@ -247,13 +246,13 @@ def add_whitespace(self, start: Coord) -> None: if col_offset: self.tokens.append(" " * col_offset) - def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: + def untokenize(self, iterable: Iterable[TokenInfo]) -> str: for t in iterable: if len(t) == 2: self.compat(cast(Tuple[int, str], t), iterable) break tok_type, token, start, end, line = cast( - Tuple[int, Text, Coord, Coord, Text], t + Tuple[int, str, Coord, Coord, str], t ) self.add_whitespace(start) self.tokens.append(token) @@ -263,7 +262,7 @@ def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: self.prev_col = 0 return "".join(self.tokens) - def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None: + def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None: startline = False indents = [] toks_append = self.tokens.append @@ -335,7 +334,7 @@ def read_or_stop() -> bytes: try: return readline() except StopIteration: - return bytes() + return b'' def find_cookie(line: bytes) -> Optional[str]: try: @@ -384,7 +383,7 @@ def find_cookie(line: bytes) -> Optional[str]: return default, [first, second] -def untokenize(iterable: Iterable[TokenInfo]) -> Text: +def untokenize(iterable: Iterable[TokenInfo]) -> str: """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence @@ -407,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text: def generate_tokens( - readline: Callable[[], Text], grammar: Optional[Grammar] = None + readline: Callable[[], str], grammar: Optional[Grammar] = None ) -> Iterator[GoodTokenInfo]: """ The generate_tokens() generator requires one argument, readline, which @@ -425,7 +424,7 @@ def generate_tokens( logical line; continuation lines are included. """ lnum = parenlev = continued = 0 - numchars: Final = "0123456789" + numchars: Final[str] = "0123456789" contstr, needcont = "", 0 contline: Optional[str] = None indents = [0] @@ -599,11 +598,15 @@ def generate_tokens( ): if token[-1] == "\n": # continued string strstart = (lnum, start) - endprog = ( - endprogs[initial] - or endprogs[token[1]] - or endprogs[token[2]] + maybe_endprog = ( + endprogs.get(initial) + or endprogs.get(token[1]) + or endprogs.get(token[2]) ) + assert ( + maybe_endprog is not None + ), f"endprog not found for {token}" + endprog = maybe_endprog contstr, needcont = line[start:], 1 contline = line break @@ -631,7 +634,6 @@ def generate_tokens( if token in ("def", "for"): if stashed and stashed[0] == NAME and stashed[1] == "async": - if token == "def": async_def = True async_def_indent = indents[-1] diff --git a/src/blib2to3/pygram.py b/src/blib2to3/pygram.py index 99012cdd9cb..1b4832362bf 100644 --- a/src/blib2to3/pygram.py +++ b/src/blib2to3/pygram.py @@ -9,7 +9,6 @@ from typing import Union # Local imports -from .pgen2 import token from .pgen2 import driver from .pgen2.grammar import Grammar @@ -21,7 +20,7 @@ # "PatternGrammar.txt") -class Symbols(object): +class Symbols: def __init__(self, grammar: Grammar) -> None: """Initializer. @@ -95,6 +94,7 @@ class _python_symbols(Symbols): old_test: int or_test: int parameters: int + paramspec: int pass_stmt: int pattern: int patterns: int @@ -126,7 +126,12 @@ class _python_symbols(Symbols): tname_star: int trailer: int try_stmt: int + type_stmt: int typedargslist: int + typeparam: int + typeparams: int + typevar: int + typevartuple: int varargslist: int vfpdef: int vfplist: int diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index ea60c894e20..156322cab7e 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -18,7 +18,6 @@ Iterator, List, Optional, - Text, Tuple, TypeVar, Union, @@ -34,10 +33,10 @@ HUGE: int = 0x7FFFFFFF # maximum repeat count, default max -_type_reprs: Dict[int, Union[Text, int]] = {} +_type_reprs: Dict[int, Union[str, int]] = {} -def type_repr(type_num: int) -> Union[Text, int]: +def type_repr(type_num: int) -> Union[str, int]: global _type_reprs if not _type_reprs: from .pygram import python_symbols @@ -54,11 +53,11 @@ def type_repr(type_num: int) -> Union[Text, int]: _P = TypeVar("_P", bound="Base") NL = Union["Node", "Leaf"] -Context = Tuple[Text, Tuple[int, int]] -RawNode = Tuple[int, Optional[Text], Optional[Context], Optional[List[NL]]] +Context = Tuple[str, Tuple[int, int]] +RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]] -class Base(object): +class Base: """ Abstract base class for Node and Leaf. @@ -92,7 +91,7 @@ def __eq__(self, other: Any) -> bool: return self._eq(other) @property - def prefix(self) -> Text: + def prefix(self) -> str: raise NotImplementedError def _eq(self: _P, other: _P) -> bool: @@ -225,7 +224,7 @@ def depth(self) -> int: return 0 return 1 + self.parent.depth() - def get_suffix(self) -> Text: + def get_suffix(self) -> str: """ Return the string immediately following the invocant node. This is effectively equivalent to node.next_sibling.prefix @@ -242,14 +241,14 @@ class Node(Base): """Concrete implementation for interior nodes.""" fixers_applied: Optional[List[Any]] - used_names: Optional[Set[Text]] + used_names: Optional[Set[str]] def __init__( self, type: int, children: List[NL], context: Optional[Any] = None, - prefix: Optional[Text] = None, + prefix: Optional[str] = None, fixers_applied: Optional[List[Any]] = None, ) -> None: """ @@ -274,16 +273,16 @@ def __init__( else: self.fixers_applied = None - def __repr__(self) -> Text: + def __repr__(self) -> str: """Return a canonical string representation.""" assert self.type is not None - return "%s(%s, %r)" % ( + return "{}({}, {!r})".format( self.__class__.__name__, type_repr(self.type), self.children, ) - def __str__(self) -> Text: + def __str__(self) -> str: """ Return a pretty string representation. @@ -317,7 +316,7 @@ def pre_order(self) -> Iterator[NL]: yield from child.pre_order() @property - def prefix(self) -> Text: + def prefix(self) -> str: """ The whitespace and comments preceding this node in the input. """ @@ -326,7 +325,7 @@ def prefix(self) -> Text: return self.children[0].prefix @prefix.setter - def prefix(self, prefix: Text) -> None: + def prefix(self, prefix: str) -> None: if self.children: self.children[0].prefix = prefix @@ -383,12 +382,12 @@ class Leaf(Base): """Concrete implementation for leaf nodes.""" # Default values for instance variables - value: Text + value: str fixers_applied: List[Any] bracket_depth: int # Changed later in brackets.py opening_bracket: Optional["Leaf"] = None - used_names: Optional[Set[Text]] + used_names: Optional[Set[str]] _prefix = "" # Whitespace and comments preceding this token in the input lineno: int = 0 # Line where this token starts in the input column: int = 0 # Column where this token starts in the input @@ -400,9 +399,9 @@ class Leaf(Base): def __init__( self, type: int, - value: Text, + value: str, context: Optional[Context] = None, - prefix: Optional[Text] = None, + prefix: Optional[str] = None, fixers_applied: List[Any] = [], opening_bracket: Optional["Leaf"] = None, fmt_pass_converted_first_leaf: Optional["Leaf"] = None, @@ -431,13 +430,13 @@ def __repr__(self) -> str: from .pgen2.token import tok_name assert self.type is not None - return "%s(%s, %r)" % ( + return "{}({}, {!r})".format( self.__class__.__name__, tok_name.get(self.type, self.type), self.value, ) - def __str__(self) -> Text: + def __str__(self) -> str: """ Return a pretty string representation. @@ -471,14 +470,14 @@ def pre_order(self) -> Iterator["Leaf"]: yield self @property - def prefix(self) -> Text: + def prefix(self) -> str: """ The whitespace and comments preceding this token in the input. """ return self._prefix @prefix.setter - def prefix(self, prefix: Text) -> None: + def prefix(self, prefix: str) -> None: self.changed() self._prefix = prefix @@ -503,10 +502,10 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL: return Leaf(type, value or "", context=context) -_Results = Dict[Text, NL] +_Results = Dict[str, NL] -class BasePattern(object): +class BasePattern: """ A pattern is a tree matching pattern. @@ -526,19 +525,19 @@ class BasePattern(object): type: Optional[int] type = None # Node type (token if < 256, symbol if >= 256) content: Any = None # Optional content matching pattern - name: Optional[Text] = None # Optional name used to store match in results dict + name: Optional[str] = None # Optional name used to store match in results dict def __new__(cls, *args, **kwds): """Constructor that prevents BasePattern from being instantiated.""" assert cls is not BasePattern, "Cannot instantiate BasePattern" return object.__new__(cls) - def __repr__(self) -> Text: + def __repr__(self) -> str: assert self.type is not None args = [type_repr(self.type), self.content, self.name] while args and args[-1] is None: del args[-1] - return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args))) def _submatch(self, node, results=None) -> bool: raise NotImplementedError @@ -602,8 +601,8 @@ class LeafPattern(BasePattern): def __init__( self, type: Optional[int] = None, - content: Optional[Text] = None, - name: Optional[Text] = None, + content: Optional[str] = None, + name: Optional[str] = None, ) -> None: """ Initializer. Takes optional type, content, and name. @@ -653,8 +652,8 @@ class NodePattern(BasePattern): def __init__( self, type: Optional[int] = None, - content: Optional[Iterable[Text]] = None, - name: Optional[Text] = None, + content: Optional[Iterable[str]] = None, + name: Optional[str] = None, ) -> None: """ Initializer. Takes optional type, content, and name. @@ -734,10 +733,10 @@ class WildcardPattern(BasePattern): def __init__( self, - content: Optional[Text] = None, + content: Optional[str] = None, min: int = 0, max: int = HUGE, - name: Optional[Text] = None, + name: Optional[str] = None, ) -> None: """ Initializer. diff --git a/test_requirements.txt b/test_requirements.txt index ef61a1210ee..a3d262bc53d 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,6 +1,6 @@ coverage >= 5.3 pre-commit pytest >= 6.1.1 -pytest-xdist >= 2.2.1, < 3.0.2 -pytest-cov >= 2.11.1 +pytest-xdist >= 3.0.2 +pytest-cov >= 4.1.0 tox diff --git a/tests/data/preview/cantfit.py b/tests/data/preview/cantfit.py index cade382e30d..0849374f776 100644 --- a/tests/data/preview/cantfit.py +++ b/tests/data/preview/cantfit.py @@ -79,14 +79,10 @@ ) # long arguments normal_name = normal_function_name( - ( - "but with super long string arguments that on their own exceed the line limit" - " so there's no way it can ever fit" - ), - ( - "eggs with spam and eggs and spam with eggs with spam and eggs and spam with" - " eggs with spam and eggs and spam with eggs" - ), + "but with super long string arguments that on their own exceed the line limit so" + " there's no way it can ever fit", + "eggs with spam and eggs and spam with eggs with spam and eggs and spam with eggs" + " with spam and eggs and spam with eggs", this_is_a_ridiculously_long_name_and_nobody_in_their_right_mind_would_use_one_like_it=0, ) string_variable_name = "a string that is waaaaaaaayyyyyyyy too long, even in parens, there's nothing you can do" # noqa diff --git a/tests/data/preview/long_strings.py b/tests/data/preview/long_strings.py index c68da3a8632..059148729d5 100644 --- a/tests/data/preview/long_strings.py +++ b/tests/data/preview/long_strings.py @@ -323,10 +323,8 @@ def foo(): y = "Short string" print( - ( - "This is a really long string inside of a print statement with extra arguments" - " attached at the end of it." - ), + "This is a really long string inside of a print statement with extra arguments" + " attached at the end of it.", x, y, z, @@ -501,15 +499,13 @@ def foo(): ) bad_split_func1( - ( - "But what should happen when code has already " - "been formatted but in the wrong way? Like " - "with a space at the end instead of the " - "beginning. Or what about when it is split too " - "soon? In the case of a split that is too " - "short, black will try to honer the custom " - "split." - ), + "But what should happen when code has already " + "been formatted but in the wrong way? Like " + "with a space at the end instead of the " + "beginning. Or what about when it is split too " + "soon? In the case of a split that is too " + "short, black will try to honer the custom " + "split.", xxx, yyy, zzz, @@ -612,11 +608,9 @@ def foo(): ) arg_comment_string = print( - ( # This comment gets thrown to the top. - "Long lines with inline comments which are apart of (and not the only member" - " of) an argument list should have their comments appended to the reformatted" - " string's enclosing left parentheses." - ), + "Long lines with inline comments which are apart of (and not the only member of) an" + " argument list should have their comments appended to the reformatted string's" + " enclosing left parentheses.", # This comment gets thrown to the top. "Arg #2", "Arg #3", "Arg #4", @@ -676,31 +670,23 @@ def foo(): ) func_with_bad_comma( - ( - "This is a really long string argument to a function that has a trailing comma" - " which should NOT be there." - ), + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there.", ) func_with_bad_comma( - ( # comment after comma - "This is a really long string argument to a function that has a trailing comma" - " which should NOT be there." - ), + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there.", # comment after comma ) func_with_bad_comma( - ( - "This is a really long string argument to a function that has a trailing comma" - " which should NOT be there." - ), + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there.", ) func_with_bad_comma( - ( # comment after comma - "This is a really long string argument to a function that has a trailing comma" - " which should NOT be there." - ), + "This is a really long string argument to a function that has a trailing comma" + " which should NOT be there.", # comment after comma ) func_with_bad_parens_that_wont_fit_in_one_line( diff --git a/tests/data/preview/long_strings__regression.py b/tests/data/preview/long_strings__regression.py index eead8c204a9..5f0646e6029 100644 --- a/tests/data/preview/long_strings__regression.py +++ b/tests/data/preview/long_strings__regression.py @@ -715,11 +715,9 @@ class A: def foo(): some_func_call( "xxxxxxxxxx", - ( - "xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x " - '"xxxx xxxxxxx xxxxxx xxxx; xxxx xxxxxx_xxxxx xxxxxx xxxx; ' - "xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" " - ), + "xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x " + '"xxxx xxxxxxx xxxxxx xxxx; xxxx xxxxxx_xxxxx xxxxxx xxxx; ' + "xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" ", None, ("xxxxxxxxxxx",), ), @@ -728,11 +726,9 @@ def foo(): class A: def foo(): some_func_call( - ( - "xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x " - "xxxx, ('xxxxxxx xxxxxx xxxx, xxxx') xxxxxx_xxxxx xxxxxx xxxx; " - "xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" " - ), + "xx {xxxxxxxxxxx}/xxxxxxxxxxx.xxx xxxx.xxx && xxxxxx -x " + "xxxx, ('xxxxxxx xxxxxx xxxx, xxxx') xxxxxx_xxxxx xxxxxx xxxx; " + "xxxx.xxxx_xxxxxx(['xxxx.xxx'], xxxx.xxxxxxx().xxxxxxxxxx)\" ", None, ("xxxxxxxxxxx",), ), @@ -850,10 +846,8 @@ def foo(): ) lpar_and_rpar_have_comments = func_call( # LPAR Comment - ( # Comma Comment - "Long really ridiculous type of string that shouldn't really even exist at all." - " I mean commmme onnn!!!" - ), + "Long really ridiculous type of string that shouldn't really even exist at all. I" + " mean commmme onnn!!!", # Comma Comment ) # RPAR Comment cmd_fstring = ( diff --git a/tests/data/preview/no_blank_line_before_docstring.py b/tests/data/preview/no_blank_line_before_docstring.py new file mode 100644 index 00000000000..a37362de100 --- /dev/null +++ b/tests/data/preview/no_blank_line_before_docstring.py @@ -0,0 +1,58 @@ +def line_before_docstring(): + + """Please move me up""" + + +class LineBeforeDocstring: + + """Please move me up""" + + +class EvenIfThereIsAMethodAfter: + + """I'm the docstring""" + def method(self): + pass + + +class TwoLinesBeforeDocstring: + + + """I want to be treated the same as if I were closer""" + + +class MultilineDocstringsAsWell: + + """I'm so far + + and on so many lines... + """ + + +# output + + +def line_before_docstring(): + """Please move me up""" + + +class LineBeforeDocstring: + """Please move me up""" + + +class EvenIfThereIsAMethodAfter: + """I'm the docstring""" + + def method(self): + pass + + +class TwoLinesBeforeDocstring: + """I want to be treated the same as if I were closer""" + + +class MultilineDocstringsAsWell: + """I'm so far + + and on so many lines... + """ diff --git a/tests/data/py_312/type_aliases.py b/tests/data/py_312/type_aliases.py new file mode 100644 index 00000000000..84e07e50fe2 --- /dev/null +++ b/tests/data/py_312/type_aliases.py @@ -0,0 +1,13 @@ +type A=int +type Gen[T]=list[T] + +type = aliased +print(type(42)) + +# output + +type A = int +type Gen[T] = list[T] + +type = aliased +print(type(42)) diff --git a/tests/data/py_312/type_params.py b/tests/data/py_312/type_params.py new file mode 100644 index 00000000000..5f8ec43267c --- /dev/null +++ b/tests/data/py_312/type_params.py @@ -0,0 +1,57 @@ +def func [T ](): pass +async def func [ T ] (): pass +class C[ T ] : pass + +def all_in[T : int,U : (bytes, str),* Ts,**P](): pass + +def really_long[WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine](): pass + +def even_longer[WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine: WhatIfItHadABound](): pass + +def it_gets_worse[WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine, ItCouldBeGenericOverMultipleTypeVars](): pass + +def magic[Trailing, Comma,](): pass + +# output + + +def func[T](): + pass + + +async def func[T](): + pass + + +class C[T]: + pass + + +def all_in[T: int, U: (bytes, str), *Ts, **P](): + pass + + +def really_long[ + WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine +](): + pass + + +def even_longer[ + WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine: WhatIfItHadABound +](): + pass + + +def it_gets_worse[ + WhatIsTheLongestTypeVarNameYouCanThinkOfEnoughToMakeBlackSplitThisLine, + ItCouldBeGenericOverMultipleTypeVars, +](): + pass + + +def magic[ + Trailing, + Comma, +](): + pass diff --git a/tests/data/simple_cases/comments2.py b/tests/data/simple_cases/comments2.py index 37e185abf4f..1487dc4b6e2 100644 --- a/tests/data/simple_cases/comments2.py +++ b/tests/data/simple_cases/comments2.py @@ -154,6 +154,9 @@ def _init_host(self, parsed) -> None: not parsed.hostname.strip()): pass + +a = "type comment with trailing space" # type: str + ####################### ### SECTION COMMENT ### ####################### @@ -332,6 +335,8 @@ def _init_host(self, parsed) -> None: pass +a = "type comment with trailing space" # type: str + ####################### ### SECTION COMMENT ### ####################### diff --git a/tests/data/simple_cases/fstring.py b/tests/data/simple_cases/fstring.py index 4b33231c01c..60560309376 100644 --- a/tests/data/simple_cases/fstring.py +++ b/tests/data/simple_cases/fstring.py @@ -7,6 +7,8 @@ f"\"{f'{nested} inner'}\" outer" f"space between opening braces: { {a for a in (1, 2, 3)}}" f'Hello \'{tricky + "example"}\'' +f"Tried directories {str(rootdirs)} \ +but none started with prefix {parentdir_prefix}" # output @@ -19,3 +21,5 @@ f"\"{f'{nested} inner'}\" outer" f"space between opening braces: { {a for a in (1, 2, 3)}}" f'Hello \'{tricky + "example"}\'' +f"Tried directories {str(rootdirs)} \ +but none started with prefix {parentdir_prefix}" diff --git a/tests/data/simple_cases/ignore_pyi.py b/tests/data/simple_cases/ignore_pyi.py new file mode 100644 index 00000000000..3ef61079bfe --- /dev/null +++ b/tests/data/simple_cases/ignore_pyi.py @@ -0,0 +1,41 @@ +def f(): # type: ignore + ... + +class x: # some comment + ... + +class y: + ... # comment + +# whitespace doesn't matter (note the next line has a trailing space and tab) +class z: + ... + +def g(): + # hi + ... + +def h(): + ... + # bye + +# output + +def f(): # type: ignore + ... + +class x: # some comment + ... + +class y: ... # comment + +# whitespace doesn't matter (note the next line has a trailing space and tab) +class z: ... + +def g(): + # hi + ... + +def h(): + ... + # bye diff --git a/tests/data/simple_cases/multiline_consecutive_open_parentheses_ignore.py b/tests/data/simple_cases/multiline_consecutive_open_parentheses_ignore.py new file mode 100644 index 00000000000..6ec8bb45408 --- /dev/null +++ b/tests/data/simple_cases/multiline_consecutive_open_parentheses_ignore.py @@ -0,0 +1,41 @@ +# This is a regression test. Issue #3737 + +a = ( # type: ignore + int( # type: ignore + int( # type: ignore + int( # type: ignore + 6 + ) + ) + ) +) + +b = ( + int( + 6 + ) +) + +print( "111") # type: ignore +print( "111" ) # type: ignore +print( "111" ) # type: ignore + + +# output + + +# This is a regression test. Issue #3737 + +a = ( # type: ignore + int( # type: ignore + int( # type: ignore + int(6) # type: ignore + ) + ) +) + +b = int(6) + +print("111") # type: ignore +print("111") # type: ignore +print("111") # type: ignore \ No newline at end of file diff --git a/tests/data/simple_cases/pep_604.py b/tests/data/simple_cases/pep_604.py new file mode 100644 index 00000000000..b68d59d6440 --- /dev/null +++ b/tests/data/simple_cases/pep_604.py @@ -0,0 +1,25 @@ +def some_very_long_name_function() -> my_module.Asdf | my_module.AnotherType | my_module.YetAnotherType | None: + pass + + +def some_very_long_name_function() -> my_module.Asdf | my_module.AnotherType | my_module.YetAnotherType | my_module.EvenMoreType | None: + pass + + +# output + + +def some_very_long_name_function() -> ( + my_module.Asdf | my_module.AnotherType | my_module.YetAnotherType | None +): + pass + + +def some_very_long_name_function() -> ( + my_module.Asdf + | my_module.AnotherType + | my_module.YetAnotherType + | my_module.EvenMoreType + | None +): + pass diff --git a/tests/test_black.py b/tests/test_black.py index e5e17777715..3b3ab721c5f 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -104,6 +104,7 @@ class FakeContext(click.Context): def __init__(self) -> None: self.default_map: Dict[str, Any] = {} + self.params: Dict[str, Any] = {} # Dummy root, since most of the tests don't care about it self.obj: Dict[str, Any] = {"root": PROJECT_ROOT} @@ -148,8 +149,7 @@ def test_empty_ff(self) -> None: tmp_file = Path(black.dump_to_file()) try: self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES)) - with open(tmp_file, encoding="utf8") as f: - actual = f.read() + actual = tmp_file.read_text(encoding="utf-8") finally: os.unlink(tmp_file) self.assertFormatEqual(expected, actual) @@ -177,7 +177,7 @@ def test_one_empty_line_ff(self) -> None: ff(tmp_file, mode=mode, write_back=black.WriteBack.YES) ) with open(tmp_file, "rb") as f: - actual = f.read().decode("utf8") + actual = f.read().decode("utf-8") finally: os.unlink(tmp_file) self.assertFormatEqual(expected, actual) @@ -197,7 +197,7 @@ def test_piping(self) -> None: f"--line-length={black.DEFAULT_LINE_LENGTH}", f"--config={EMPTY_CONFIG}", ], - input=BytesIO(source.encode("utf8")), + input=BytesIO(source.encode("utf-8")), ) self.assertEqual(result.exit_code, 0) self.assertFormatEqual(expected, result.output) @@ -207,8 +207,8 @@ def test_piping(self) -> None: def test_piping_diff(self) -> None: diff_header = re.compile( - r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d " - r"\+\d\d\d\d" + r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d" + r"\+\d\d:\d\d" ) source, _ = read_data("simple_cases", "expression.py") expected, _ = read_data("simple_cases", "expression.diff") @@ -220,7 +220,7 @@ def test_piping_diff(self) -> None: f"--config={EMPTY_CONFIG}", ] result = BlackRunner().invoke( - black.main, args, input=BytesIO(source.encode("utf8")) + black.main, args, input=BytesIO(source.encode("utf-8")) ) self.assertEqual(result.exit_code, 0) actual = diff_header.sub(DETERMINISTIC_HEADER, result.output) @@ -238,7 +238,7 @@ def test_piping_diff_with_color(self) -> None: f"--config={EMPTY_CONFIG}", ] result = BlackRunner().invoke( - black.main, args, input=BytesIO(source.encode("utf8")) + black.main, args, input=BytesIO(source.encode("utf-8")) ) actual = result.output # Again, the contents are checked in a different test, so only look for colors. @@ -271,13 +271,21 @@ def test_pep_572_version_detection(self) -> None: versions = black.detect_target_versions(root) self.assertIn(black.TargetVersion.PY38, versions) + def test_pep_695_version_detection(self) -> None: + for file in ("type_aliases", "type_params"): + source, _ = read_data("py_312", file) + root = black.lib2to3_parse(source) + features = black.get_features_used(root) + self.assertIn(black.Feature.TYPE_PARAMS, features) + versions = black.detect_target_versions(root) + self.assertIn(black.TargetVersion.PY312, versions) + def test_expression_ff(self) -> None: source, expected = read_data("simple_cases", "expression.py") tmp_file = Path(black.dump_to_file(source)) try: self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES)) - with open(tmp_file, encoding="utf8") as f: - actual = f.read() + actual = tmp_file.read_text(encoding="utf-8") finally: os.unlink(tmp_file) self.assertFormatEqual(expected, actual) @@ -291,7 +299,7 @@ def test_expression_diff(self) -> None: tmp_file = Path(black.dump_to_file(source)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " - r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d" ) try: result = BlackRunner().invoke( @@ -380,8 +388,7 @@ def test_skip_source_first_line(self) -> None: black.main, [str(tmp_file), "-x", f"--config={EMPTY_CONFIG}"] ) self.assertEqual(result.exit_code, 0) - with open(tmp_file, encoding="utf8") as f: - actual = f.read() + actual = tmp_file.read_text(encoding="utf-8") self.assertFormatEqual(source, actual) def test_skip_source_first_line_when_mixing_newlines(self) -> None: @@ -402,7 +409,7 @@ def test_skip_magic_trailing_comma(self) -> None: tmp_file = Path(black.dump_to_file(source)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " - r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d" ) try: result = BlackRunner().invoke( @@ -501,6 +508,8 @@ def _mocked_calls() -> bool: "pathlib.Path.cwd", return_value=working_directory ), patch("pathlib.Path.is_dir", side_effect=mock_n_calls([True])): ctx = FakeContext() + # Note that the root folder (project_root) isn't the folder + # named "root" (aka working_directory) ctx.obj["root"] = project_root report = MagicMock(verbose=True) black.get_sources( @@ -520,7 +529,7 @@ def _mocked_calls() -> bool: for _, mock_args, _ in report.path_ignored.mock_calls ), "A symbolic link was reported." report.path_ignored.assert_called_once_with( - Path("child", "b.py"), "matches a .gitignore file content" + Path("root", "child", "b.py"), "matches a .gitignore file content" ) def test_report_verbose(self) -> None: @@ -567,10 +576,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(err_lines[-1], "error: cannot format e1: boom") self.assertEqual( unstyle(str(report)), - ( - "1 file reformatted, 2 files left unchanged, 1 file failed to" - " reformat." - ), + "1 file reformatted, 2 files left unchanged, 1 file failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.done(Path("f3"), black.Changed.YES) @@ -579,10 +586,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(out_lines[-1], "reformatted f3") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 1 file failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 1 file failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.failed(Path("e2"), "boom") @@ -591,10 +596,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(err_lines[-1], "error: cannot format e2: boom") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.path_ignored(Path("wat"), "no match") @@ -603,10 +606,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(out_lines[-1], "wat ignored: no match") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.done(Path("f4"), black.Changed.NO) @@ -615,28 +616,22 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(out_lines[-1], "f4 already well formatted, good job.") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 3 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 3 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.check = True self.assertEqual( unstyle(str(report)), - ( - "2 files would be reformatted, 3 files would be left unchanged, 2" - " files would fail to reformat." - ), + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - ( - "2 files would be reformatted, 3 files would be left unchanged, 2" - " files would fail to reformat." - ), + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_report_quiet(self) -> None: @@ -678,10 +673,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(err_lines[-1], "error: cannot format e1: boom") self.assertEqual( unstyle(str(report)), - ( - "1 file reformatted, 2 files left unchanged, 1 file failed to" - " reformat." - ), + "1 file reformatted, 2 files left unchanged, 1 file failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.done(Path("f3"), black.Changed.YES) @@ -689,10 +682,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(len(err_lines), 1) self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 1 file failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 1 file failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.failed(Path("e2"), "boom") @@ -701,10 +692,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(err_lines[-1], "error: cannot format e2: boom") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.path_ignored(Path("wat"), "no match") @@ -712,10 +701,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(len(err_lines), 2) self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.done(Path("f4"), black.Changed.NO) @@ -723,28 +710,22 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(len(err_lines), 2) self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 3 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 3 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.check = True self.assertEqual( unstyle(str(report)), - ( - "2 files would be reformatted, 3 files would be left unchanged, 2" - " files would fail to reformat." - ), + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - ( - "2 files would be reformatted, 3 files would be left unchanged, 2" - " files would fail to reformat." - ), + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_report_normal(self) -> None: @@ -788,10 +769,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(err_lines[-1], "error: cannot format e1: boom") self.assertEqual( unstyle(str(report)), - ( - "1 file reformatted, 2 files left unchanged, 1 file failed to" - " reformat." - ), + "1 file reformatted, 2 files left unchanged, 1 file failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.done(Path("f3"), black.Changed.YES) @@ -800,10 +779,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(out_lines[-1], "reformatted f3") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 1 file failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 1 file failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.failed(Path("e2"), "boom") @@ -812,10 +789,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(err_lines[-1], "error: cannot format e2: boom") self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.path_ignored(Path("wat"), "no match") @@ -823,10 +798,8 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(len(err_lines), 2) self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 2 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 2 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.done(Path("f4"), black.Changed.NO) @@ -834,28 +807,22 @@ def err(msg: str, **kwargs: Any) -> None: self.assertEqual(len(err_lines), 2) self.assertEqual( unstyle(str(report)), - ( - "2 files reformatted, 3 files left unchanged, 2 files failed to" - " reformat." - ), + "2 files reformatted, 3 files left unchanged, 2 files failed to" + " reformat.", ) self.assertEqual(report.return_code, 123) report.check = True self.assertEqual( unstyle(str(report)), - ( - "2 files would be reformatted, 3 files would be left unchanged, 2" - " files would fail to reformat." - ), + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - ( - "2 files would be reformatted, 3 files would be left unchanged, 2" - " files would fail to reformat." - ), + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_lib2to3_parse(self) -> None: @@ -1113,7 +1080,7 @@ def test_works_in_mono_process_only_environment(self) -> None: (workspace / "one.py").resolve(), (workspace / "two.py").resolve(), ]: - f.write_text('print("hello")\n') + f.write_text('print("hello")\n', encoding="utf-8") self.invokeBlack([str(workspace)]) @event_loop() @@ -1150,11 +1117,9 @@ def test_single_file_force_pyi(self) -> None: contents, expected = read_data("miscellaneous", "force_pyi") with cache_dir() as workspace: path = (workspace / "file.py").resolve() - with open(path, "w") as fh: - fh.write(contents) + path.write_text(contents, encoding="utf-8") self.invokeBlack([str(path), "--pyi"]) - with open(path, "r") as fh: - actual = fh.read() + actual = path.read_text(encoding="utf-8") # verify cache with --pyi is separate pyi_cache = black.read_cache(pyi_mode) self.assertIn(str(path), pyi_cache) @@ -1175,12 +1140,10 @@ def test_multi_file_force_pyi(self) -> None: (workspace / "file2.py").resolve(), ] for path in paths: - with open(path, "w") as fh: - fh.write(contents) + path.write_text(contents, encoding="utf-8") self.invokeBlack([str(p) for p in paths] + ["--pyi"]) for path in paths: - with open(path, "r") as fh: - actual = fh.read() + actual = path.read_text(encoding="utf-8") self.assertEqual(actual, expected) # verify cache with --pyi is separate pyi_cache = black.read_cache(pyi_mode) @@ -1192,7 +1155,7 @@ def test_multi_file_force_pyi(self) -> None: def test_pipe_force_pyi(self) -> None: source, expected = read_data("miscellaneous", "force_pyi") result = CliRunner().invoke( - black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf8")) + black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf-8")) ) self.assertEqual(result.exit_code, 0) actual = result.output @@ -1204,11 +1167,9 @@ def test_single_file_force_py36(self) -> None: source, expected = read_data("miscellaneous", "force_py36") with cache_dir() as workspace: path = (workspace / "file.py").resolve() - with open(path, "w") as fh: - fh.write(source) + path.write_text(source, encoding="utf-8") self.invokeBlack([str(path), *PY36_ARGS]) - with open(path, "r") as fh: - actual = fh.read() + actual = path.read_text(encoding="utf-8") # verify cache with --target-version is separate py36_cache = black.read_cache(py36_mode) self.assertIn(str(path), py36_cache) @@ -1227,12 +1188,10 @@ def test_multi_file_force_py36(self) -> None: (workspace / "file2.py").resolve(), ] for path in paths: - with open(path, "w") as fh: - fh.write(source) + path.write_text(source, encoding="utf-8") self.invokeBlack([str(p) for p in paths] + PY36_ARGS) for path in paths: - with open(path, "r") as fh: - actual = fh.read() + actual = path.read_text(encoding="utf-8") self.assertEqual(actual, expected) # verify cache with --target-version is separate pyi_cache = black.read_cache(py36_mode) @@ -1246,7 +1205,7 @@ def test_pipe_force_py36(self) -> None: result = CliRunner().invoke( black.main, ["-", "-q", "--target-version=py36"], - input=BytesIO(source.encode("utf8")), + input=BytesIO(source.encode("utf-8")), ) self.assertEqual(result.exit_code, 0) actual = result.output @@ -1475,11 +1434,11 @@ def test_preserves_line_endings_via_stdin(self) -> None: contents = nl.join(["def f( ):", " pass"]) runner = BlackRunner() result = runner.invoke( - black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf8")) + black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf-8")) ) self.assertEqual(result.exit_code, 0) output = result.stdout_bytes - self.assertIn(nl.encode("utf8"), output) + self.assertIn(nl.encode("utf-8"), output) if nl == "\n": self.assertNotIn(b"\r\n", output) @@ -1498,30 +1457,6 @@ def test_assert_equivalent_different_asts(self) -> None: with self.assertRaises(AssertionError): black.assert_equivalent("{}", "None") - def test_shhh_click(self) -> None: - try: - from click import _unicodefun # type: ignore - except ImportError: - self.skipTest("Incompatible Click version") - - if not hasattr(_unicodefun, "_verify_python_env"): - self.skipTest("Incompatible Click version") - - # First, let's see if Click is crashing with a preferred ASCII charset. - with patch("locale.getpreferredencoding") as gpe: - gpe.return_value = "ASCII" - with self.assertRaises(RuntimeError): - _unicodefun._verify_python_env() - # Now, let's silence Click... - black.patch_click() - # ...and confirm it's silent. - with patch("locale.getpreferredencoding") as gpe: - gpe.return_value = "ASCII" - try: - _unicodefun._verify_python_env() - except RuntimeError as re: - self.fail(f"`patch_click()` failed, exception still raised: {re}") - def test_root_logger_not_used_directly(self) -> None: def fail(*args: Any, **kwargs: Any) -> None: self.fail("Record created with root logger") @@ -1575,14 +1510,25 @@ def test_infer_target_version(self) -> None: for version, expected in [ ("3.6", [TargetVersion.PY36]), ("3.11.0rc1", [TargetVersion.PY311]), - (">=3.10", [TargetVersion.PY310, TargetVersion.PY311]), - (">=3.10.6", [TargetVersion.PY310, TargetVersion.PY311]), + (">=3.10", [TargetVersion.PY310, TargetVersion.PY311, TargetVersion.PY312]), + ( + ">=3.10.6", + [TargetVersion.PY310, TargetVersion.PY311, TargetVersion.PY312], + ), ("<3.6", [TargetVersion.PY33, TargetVersion.PY34, TargetVersion.PY35]), (">3.7,<3.10", [TargetVersion.PY38, TargetVersion.PY39]), - (">3.7,!=3.8,!=3.9", [TargetVersion.PY310, TargetVersion.PY311]), + ( + ">3.7,!=3.8,!=3.9", + [TargetVersion.PY310, TargetVersion.PY311, TargetVersion.PY312], + ), ( "> 3.9.4, != 3.10.3", - [TargetVersion.PY39, TargetVersion.PY310, TargetVersion.PY311], + [ + TargetVersion.PY39, + TargetVersion.PY310, + TargetVersion.PY311, + TargetVersion.PY312, + ], ), ( "!=3.3,!=3.4", @@ -1594,6 +1540,7 @@ def test_infer_target_version(self) -> None: TargetVersion.PY39, TargetVersion.PY310, TargetVersion.PY311, + TargetVersion.PY312, ], ), ( @@ -1608,6 +1555,7 @@ def test_infer_target_version(self) -> None: TargetVersion.PY39, TargetVersion.PY310, TargetVersion.PY311, + TargetVersion.PY312, ], ), ("==3.8.*", [TargetVersion.PY38]), @@ -1640,6 +1588,39 @@ def test_read_pyproject_toml(self) -> None: self.assertEqual(config["exclude"], r"\.pyi?$") self.assertEqual(config["include"], r"\.py?$") + def test_read_pyproject_toml_from_stdin(self) -> None: + with TemporaryDirectory() as workspace: + root = Path(workspace) + + src_dir = root / "src" + src_dir.mkdir() + + src_pyproject = src_dir / "pyproject.toml" + src_pyproject.touch() + + test_toml_content = (THIS_DIR / "test.toml").read_text(encoding="utf-8") + src_pyproject.write_text(test_toml_content, encoding="utf-8") + + src_python = src_dir / "foo.py" + src_python.touch() + + fake_ctx = FakeContext() + fake_ctx.params["src"] = ("-",) + fake_ctx.params["stdin_filename"] = str(src_python) + + with change_directory(root): + black.read_pyproject_toml(fake_ctx, FakeParameter(), None) + + config = fake_ctx.default_map + self.assertEqual(config["verbose"], "1") + self.assertEqual(config["check"], "no") + self.assertEqual(config["diff"], "y") + self.assertEqual(config["color"], "True") + self.assertEqual(config["line_length"], "79") + self.assertEqual(config["target_version"], ["py36", "py37", "py38"]) + self.assertEqual(config["exclude"], r"\.pyi?$") + self.assertEqual(config["include"], r"\.py?$") + @pytest.mark.incompatible_with_mypyc def test_find_project_root(self) -> None: with TemporaryDirectory() as workspace: @@ -1770,7 +1751,7 @@ def test_bpo_2142_workaround(self) -> None: tmp_file = Path(black.dump_to_file(source, ensure_final_newline=False)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " - r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d" ) try: result = BlackRunner().invoke(black.main, ["--diff", str(tmp_file)]) @@ -1971,10 +1952,10 @@ def test_cache_broken_file(self) -> None: mode = DEFAULT_MODE with cache_dir() as workspace: cache_file = get_cache_file(mode) - cache_file.write_text("this is not a pickle") + cache_file.write_text("this is not a pickle", encoding="utf-8") assert black.read_cache(mode) == {} src = (workspace / "test.py").resolve() - src.write_text("print('hello')") + src.write_text("print('hello')", encoding="utf-8") invokeBlack([str(src)]) cache = black.read_cache(mode) assert str(src) in cache @@ -1983,10 +1964,10 @@ def test_cache_single_file_already_cached(self) -> None: mode = DEFAULT_MODE with cache_dir() as workspace: src = (workspace / "test.py").resolve() - src.write_text("print('hello')") + src.write_text("print('hello')", encoding="utf-8") black.write_cache({}, [src], mode) invokeBlack([str(src)]) - assert src.read_text() == "print('hello')" + assert src.read_text(encoding="utf-8") == "print('hello')" @event_loop() def test_cache_multiple_files(self) -> None: @@ -1995,17 +1976,13 @@ def test_cache_multiple_files(self) -> None: "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor ): one = (workspace / "one.py").resolve() - with one.open("w") as fobj: - fobj.write("print('hello')") + one.write_text("print('hello')", encoding="utf-8") two = (workspace / "two.py").resolve() - with two.open("w") as fobj: - fobj.write("print('hello')") + two.write_text("print('hello')", encoding="utf-8") black.write_cache({}, [one], mode) invokeBlack([str(workspace)]) - with one.open("r") as fobj: - assert fobj.read() == "print('hello')" - with two.open("r") as fobj: - assert fobj.read() == 'print("hello")\n' + assert one.read_text(encoding="utf-8") == "print('hello')" + assert two.read_text(encoding="utf-8") == 'print("hello")\n' cache = black.read_cache(mode) assert str(one) in cache assert str(two) in cache @@ -2015,8 +1992,7 @@ def test_no_cache_when_writeback_diff(self, color: bool) -> None: mode = DEFAULT_MODE with cache_dir() as workspace: src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") + src.write_text("print('hello')", encoding="utf-8") with patch("black.read_cache") as read_cache, patch( "black.write_cache" ) as write_cache: @@ -2035,8 +2011,7 @@ def test_output_locking_when_writeback_diff(self, color: bool) -> None: with cache_dir() as workspace: for tag in range(0, 4): src = (workspace / f"test{tag}.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") + src.write_text("print('hello')", encoding="utf-8") with patch( "black.concurrency.Manager", wraps=multiprocessing.Manager ) as mgr: @@ -2106,11 +2081,9 @@ def test_failed_formatting_does_not_get_cached(self) -> None: "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor ): failing = (workspace / "failing.py").resolve() - with failing.open("w") as fobj: - fobj.write("not actually python") + failing.write_text("not actually python", encoding="utf-8") clean = (workspace / "clean.py").resolve() - with clean.open("w") as fobj: - fobj.write('print("hello")\n') + clean.write_text('print("hello")\n', encoding="utf-8") invokeBlack([str(workspace)], exit_code=123) cache = black.read_cache(mode) assert str(failing) not in cache diff --git a/tests/test_blackd.py b/tests/test_blackd.py index 5b6461f7685..325bd7dd5aa 100644 --- a/tests/test_blackd.py +++ b/tests/test_blackd.py @@ -114,7 +114,7 @@ async def test_blackd_pyi(self) -> None: @unittest_run_loop async def test_blackd_diff(self) -> None: diff_header = re.compile( - r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" + r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d" ) source, _ = read_data("miscellaneous", "blackd_diff") diff --git a/tests/test_format.py b/tests/test_format.py index 5a7b3bb6762..fb4d8eb4346 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -33,9 +33,10 @@ def check_file( @pytest.mark.parametrize("filename", all_data_cases("simple_cases")) def test_simple_format(filename: str) -> None: magic_trailing_comma = filename != "skip_magic_trailing_comma" - check_file( - "simple_cases", filename, black.Mode(magic_trailing_comma=magic_trailing_comma) + mode = black.Mode( + magic_trailing_comma=magic_trailing_comma, is_pyi=filename.endswith("_pyi") ) + check_file("simple_cases", filename, mode) @pytest.mark.parametrize("filename", all_data_cases("preview")) @@ -134,6 +135,13 @@ def test_python_311(filename: str) -> None: assert_format(source, expected, mode, minimum_version=(3, 11)) +@pytest.mark.parametrize("filename", all_data_cases("py_312")) +def test_python_312(filename: str) -> None: + source, expected = read_data("py_312", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY312}) + assert_format(source, expected, mode, minimum_version=(3, 12)) + + @pytest.mark.parametrize("filename", all_data_cases("fast")) def test_fast_cases(filename: str) -> None: source, expected = read_data("fast", filename) diff --git a/tests/test_ipynb.py b/tests/test_ipynb.py index 7aa2e91dd00..91e7901125b 100644 --- a/tests/test_ipynb.py +++ b/tests/test_ipynb.py @@ -439,8 +439,7 @@ def test_cache_isnt_written_if_no_jupyter_deps_single( jupyter_dependencies_are_installed.cache_clear() nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") tmp_nb = tmp_path / "notebook.ipynb" - with open(nb) as src, open(tmp_nb, "w") as dst: - dst.write(src.read()) + tmp_nb.write_bytes(nb.read_bytes()) monkeypatch.setattr( "black.jupyter_dependencies_are_installed", lambda verbose, quiet: False ) @@ -465,8 +464,7 @@ def test_cache_isnt_written_if_no_jupyter_deps_dir( jupyter_dependencies_are_installed.cache_clear() nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") tmp_nb = tmp_path / "notebook.ipynb" - with open(nb) as src, open(tmp_nb, "w") as dst: - dst.write(src.read()) + tmp_nb.write_bytes(nb.read_bytes()) monkeypatch.setattr( "black.files.jupyter_dependencies_are_installed", lambda verbose, quiet: False ) @@ -483,8 +481,7 @@ def test_cache_isnt_written_if_no_jupyter_deps_dir( def test_ipynb_flag(tmp_path: pathlib.Path) -> None: nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") tmp_nb = tmp_path / "notebook.a_file_extension_which_is_definitely_not_ipynb" - with open(nb) as src, open(tmp_nb, "w") as dst: - dst.write(src.read()) + tmp_nb.write_bytes(nb.read_bytes()) result = runner.invoke( main, [ diff --git a/tests/test_no_ipynb.py b/tests/test_no_ipynb.py index b63ecde8896..12c820def39 100644 --- a/tests/test_no_ipynb.py +++ b/tests/test_no_ipynb.py @@ -27,8 +27,7 @@ def test_ipynb_diff_with_no_change_dir(tmp_path: pathlib.Path) -> None: runner = CliRunner() nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") tmp_nb = tmp_path / "notebook.ipynb" - with open(nb) as src, open(tmp_nb, "w") as dst: - dst.write(src.read()) + tmp_nb.write_bytes(nb.read_bytes()) result = runner.invoke(main, [str(tmp_path)]) expected_output = ( "Skipping .ipynb files as Jupyter dependencies are not installed.\n" diff --git a/tox.ini b/tox.ini index 4934514264b..d34dbbc71db 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,9 @@ isolated_build = true envlist = {,ci-}py{37,38,39,310,311,py3},fuzz,run_self [testenv] -setenv = PYTHONPATH = {toxinidir}/src +setenv = + PYTHONPATH = {toxinidir}/src + PYTHONWARNDEFAULTENCODING = 1 skip_install = True # We use `recreate=True` because otherwise, on the second run of `tox -e py`, # the `no_jupyter` tests would run with the jupyter extra dependencies installed. @@ -37,19 +39,15 @@ deps = ; remove this when pypy releases the bugfix commands = pip install -e .[d] - coverage erase pytest tests \ --run-optional no_jupyter \ !ci: --numprocesses auto \ - ci: --numprocesses 1 \ - --cov {posargs} + ci: --numprocesses 1 pip install -e .[jupyter] pytest tests --run-optional jupyter \ -m jupyter \ !ci: --numprocesses auto \ - ci: --numprocesses 1 \ - --cov --cov-append {posargs} - coverage report + ci: --numprocesses 1 [testenv:{,ci-}311] setenv =