diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000..5b6455590f091 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,15 @@ + + +## Summary + + + +## Test Plan + + diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e31f5a418efcc..d1c74da1bcbc3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -183,18 +183,8 @@ jobs: - name: "Install cargo-udeps" uses: taiki-e/install-action@cargo-udeps - name: "Run cargo-udeps" - run: | - unused_dependencies=$(cargo +nightly-2023-03-30 udeps > unused.txt && cat unused.txt | cut -d $'\n' -f 2-) - if [ -z "$unused_dependencies" ]; then - echo "No unused dependencies found" > $GITHUB_STEP_SUMMARY - exit 0 - else - echo "Found unused dependencies" > $GITHUB_STEP_SUMMARY - echo '```console' >> $GITHUB_STEP_SUMMARY - echo "$unused_dependencies" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - exit 1 - fi + run: cargo +nightly-2023-03-30 udeps + python-package: name: "python package" diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 8722d251007cc..cb8f646df5126 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -1,9 +1,9 @@ name: mkdocs on: - release: - types: [published] workflow_dispatch: + release: + types: [ published ] jobs: mkdocs: diff --git a/.github/workflows/flake8-to-ruff.yaml b/.github/workflows/flake8-to-ruff.yaml index c207e1884ac48..28fa643788fce 100644 --- a/.github/workflows/flake8-to-ruff.yaml +++ b/.github/workflows/flake8-to-ruff.yaml @@ -52,7 +52,7 @@ jobs: - name: "Build wheels - universal2" uses: PyO3/maturin-action@v1 with: - args: --release --universal2 --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml + args: --release --target universal2-apple-darwin --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml - name: "Install built wheel - universal2" run: | pip install dist/${{ env.CRATE_NAME }}-*universal2.whl --force-reinstall diff --git a/.github/workflows/playground.yaml b/.github/workflows/playground.yaml index 40060bcbb7185..dcb582815ea7a 100644 --- a/.github/workflows/playground.yaml +++ b/.github/workflows/playground.yaml @@ -2,8 +2,8 @@ name: "[Playground] Release" on: workflow_dispatch: - push: - branches: [main] + release: + types: [ published ] env: CARGO_INCREMENTAL: 0 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0f726627a545c..e37b36374ec0f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: "[ruff] Release" on: workflow_dispatch: release: - types: [published] + types: [ published ] concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -94,7 +94,7 @@ jobs: - name: "Build wheels - universal2" uses: PyO3/maturin-action@v1 with: - args: --release --universal2 --out dist + args: --release --target universal2-apple-darwin --out dist - name: "Test wheel - universal2" run: | pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall @@ -406,9 +406,6 @@ jobs: run: | pip install --upgrade twine twine upload --skip-existing * - - name: "Update pre-commit mirror" - run: | - curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.RUFF_PRE_COMMIT_PAT }}" -H "X-GitHub-Api-Version: 2022-11-28" https://api.github.com/repos/charliermarsh/ruff-pre-commit/dispatches --data '{"event_type": "pypi_release"}' - uses: actions/download-artifact@v3 with: name: binaries @@ -417,3 +414,22 @@ jobs: uses: softprops/action-gh-release@v1 with: files: binaries/* + + # After the release has been published, we update downstream repositories + # This is separate because if this fails the release is still fine, we just need to do some manual workflow triggers + update-dependents: + name: Release + runs-on: ubuntu-latest + needs: release + steps: + - name: "Update pre-commit mirror" + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }} + script: | + github.rest.actions.createWorkflowDispatch({ + owner: 'astral-sh', + repo: 'ruff-pre-commit', + workflow_id: 'main.yml', + ref: 'main', + }) diff --git a/.gitignore b/.gitignore index c501ca7e20f7a..de3c456e67881 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,10 @@ -# Local cache -.ruff_cache crates/ruff/resources/test/cpython mkdocs.yml .overrides ruff-old github_search*.jsonl +schemastore +.venv* ### # Rust.gitignore diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7ee34d6d613f0..4283cdcfefa53 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,6 +21,7 @@ repos: - --disable - MD013 # line-length - MD033 # no-inline-html + - MD041 # first-line-h1 - -- - repo: https://github.com/crate-ci/typos diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e2998f14803f..9679aa9d7afef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -134,7 +134,7 @@ Run `cargo dev generate-all` to generate the code for your new fixture. Then run locally with (e.g.) `cargo run -p ruff_cli -- check crates/ruff/resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402`. Once you're satisfied with the output, codify the behavior as a snapshot test by adding a new -`test_case` macro in the relevant `crates/ruff/src/[linter]/mod.rs` file. Then, run `cargo test`. +`test_case` macro in the relevant `crates/ruff/src/rules/[linter]/mod.rs` file. Then, run `cargo test`. Your test will fail, but you'll be prompted to follow-up with `cargo insta review`. Accept the generated snapshot, then commit the snapshot file alongside the rest of your changes. @@ -148,7 +148,7 @@ This implies that rule names: - should state the bad thing being checked for -- should not contain instructions on what you what you should use instead +- should not contain instructions on what you should use instead (these belong in the rule documentation and the `autofix_title` for rules that have autofix) When re-implementing rules from other linters, this convention is given more importance than diff --git a/Cargo.lock b/Cargo.lock index 253340ecea423..136f7d53529bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -711,7 +711,7 @@ dependencies = [ [[package]] name = "flake8-to-ruff" -version = "0.0.269" +version = "0.0.270" dependencies = [ "anyhow", "clap 4.2.7", @@ -1723,7 +1723,7 @@ dependencies = [ [[package]] name = "ruff" -version = "0.0.269" +version = "0.0.270" dependencies = [ "annotate-snippets 0.9.1", "anyhow", @@ -1780,6 +1780,7 @@ dependencies = [ "toml", "typed-arena", "unicode-width", + "unicode_names2", ] [[package]] @@ -1812,7 +1813,7 @@ dependencies = [ [[package]] name = "ruff_cli" -version = "0.0.269" +version = "0.0.270" dependencies = [ "annotate-snippets 0.9.1", "anyhow", @@ -1899,10 +1900,19 @@ dependencies = [ "rustc-hash", "schemars", "serde", + "static_assertions", "tracing", "unicode-width", ] +[[package]] +name = "ruff_index" +version = "0.0.0" +dependencies = [ + "ruff_macros", + "static_assertions", +] + [[package]] name = "ruff_macros" version = "0.0.0" @@ -1927,9 +1937,9 @@ dependencies = [ "num-bigint", "num-traits", "once_cell", - "regex", "ruff_text_size", "rustc-hash", + "rustpython-ast", "rustpython-literal", "rustpython-parser", "serde", @@ -1964,6 +1974,7 @@ dependencies = [ "bitflags 2.3.1", "is-macro", "nohash-hasher", + "ruff_index", "ruff_python_ast", "ruff_python_stdlib", "ruff_text_size", @@ -2001,7 +2012,7 @@ dependencies = [ [[package]] name = "ruff_text_size" version = "0.0.0" -source = "git+https://github.com/RustPython/Parser.git?rev=3654cf0bdfc270df6b2b83e2df086843574ad082#3654cf0bdfc270df6b2b83e2df086843574ad082" +source = "git+https://github.com/astral-sh/RustPython-Parser.git?rev=335780aeeac1e6fcd85994ba001d7b8ce99fcf65#335780aeeac1e6fcd85994ba001d7b8ce99fcf65" dependencies = [ "schemars", "serde", @@ -2072,7 +2083,7 @@ dependencies = [ [[package]] name = "rustpython-ast" version = "0.2.0" -source = "git+https://github.com/RustPython/Parser.git?rev=3654cf0bdfc270df6b2b83e2df086843574ad082#3654cf0bdfc270df6b2b83e2df086843574ad082" +source = "git+https://github.com/astral-sh/RustPython-Parser.git?rev=335780aeeac1e6fcd85994ba001d7b8ce99fcf65#335780aeeac1e6fcd85994ba001d7b8ce99fcf65" dependencies = [ "is-macro", "num-bigint", @@ -2083,7 +2094,7 @@ dependencies = [ [[package]] name = "rustpython-format" version = "0.2.0" -source = "git+https://github.com/RustPython/Parser.git?rev=3654cf0bdfc270df6b2b83e2df086843574ad082#3654cf0bdfc270df6b2b83e2df086843574ad082" +source = "git+https://github.com/astral-sh/RustPython-Parser.git?rev=335780aeeac1e6fcd85994ba001d7b8ce99fcf65#335780aeeac1e6fcd85994ba001d7b8ce99fcf65" dependencies = [ "bitflags 2.3.1", "itertools", @@ -2095,7 +2106,7 @@ dependencies = [ [[package]] name = "rustpython-literal" version = "0.2.0" -source = "git+https://github.com/RustPython/Parser.git?rev=3654cf0bdfc270df6b2b83e2df086843574ad082#3654cf0bdfc270df6b2b83e2df086843574ad082" +source = "git+https://github.com/astral-sh/RustPython-Parser.git?rev=335780aeeac1e6fcd85994ba001d7b8ce99fcf65#335780aeeac1e6fcd85994ba001d7b8ce99fcf65" dependencies = [ "hexf-parse", "is-macro", @@ -2107,7 +2118,7 @@ dependencies = [ [[package]] name = "rustpython-parser" version = "0.2.0" -source = "git+https://github.com/RustPython/Parser.git?rev=3654cf0bdfc270df6b2b83e2df086843574ad082#3654cf0bdfc270df6b2b83e2df086843574ad082" +source = "git+https://github.com/astral-sh/RustPython-Parser.git?rev=335780aeeac1e6fcd85994ba001d7b8ce99fcf65#335780aeeac1e6fcd85994ba001d7b8ce99fcf65" dependencies = [ "anyhow", "is-macro", @@ -2130,7 +2141,7 @@ dependencies = [ [[package]] name = "rustpython-parser-core" version = "0.2.0" -source = "git+https://github.com/RustPython/Parser.git?rev=3654cf0bdfc270df6b2b83e2df086843574ad082#3654cf0bdfc270df6b2b83e2df086843574ad082" +source = "git+https://github.com/astral-sh/RustPython-Parser.git?rev=335780aeeac1e6fcd85994ba001d7b8ce99fcf65#335780aeeac1e6fcd85994ba001d7b8ce99fcf65" dependencies = [ "is-macro", "ruff_text_size", diff --git a/Cargo.toml b/Cargo.toml index f54c9bcd682b3..28993fdee2cfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,10 +31,11 @@ proc-macro2 = { version = "1.0.51" } quote = { version = "1.0.23" } regex = { version = "1.7.1" } rustc-hash = { version = "1.1.0" } -ruff_text_size = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082" } -rustpython-format = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082" } -rustpython-literal = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082" } -rustpython-parser = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082", default-features = false, features = ["full-lexer", "all-nodes-with-ranges"] } +ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "335780aeeac1e6fcd85994ba001d7b8ce99fcf65" } +rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "335780aeeac1e6fcd85994ba001d7b8ce99fcf65", default-features = false, features = ["all-nodes-with-ranges"]} +rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "335780aeeac1e6fcd85994ba001d7b8ce99fcf65" } +rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "335780aeeac1e6fcd85994ba001d7b8ce99fcf65" } +rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "335780aeeac1e6fcd85994ba001d7b8ce99fcf65", default-features = false, features = ["full-lexer", "all-nodes-with-ranges"] } schemars = { version = "0.8.12" } serde = { version = "1.0.152", features = ["derive"] } serde_json = { version = "1.0.93", features = ["preserve_order"] } diff --git a/README.md b/README.md index ed96bb4f47826..fc94c4fc12600 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ An extremely fast Python linter, written in Rust. - πŸ“ Over [500 built-in rules](https://beta.ruff.rs/docs/rules/) - βš–οΈ [Near-parity](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) with the built-in Flake8 rule set - πŸ”Œ Native re-implementations of dozens of Flake8 plugins, like flake8-bugbear -- ⌨️ First-party editor integrations for [VS Code](https://github.com/charliermarsh/ruff-vscode) and [more](https://github.com/charliermarsh/ruff-lsp) +- ⌨️ First-party editor integrations for [VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp) - 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://beta.ruff.rs/docs/configuration/#pyprojecttoml-discovery) Ruff aims to be orders of magnitude faster than alternative tools while integrating more @@ -135,15 +135,15 @@ ruff check path/to/code/to/file.py # Lint `file.py` Ruff can also be used as a [pre-commit](https://pre-commit.com) hook: ```yaml -- repo: https://github.com/charliermarsh/ruff-pre-commit +- repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.0.269' + rev: v0.0.270 hooks: - id: ruff ``` -Ruff can also be used as a [VS Code extension](https://github.com/charliermarsh/ruff-vscode) or -alongside any other editor through the [Ruff LSP](https://github.com/charliermarsh/ruff-lsp). +Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or +alongside any other editor through the [Ruff LSP](https://github.com/astral-sh/ruff-lsp). Ruff can also be used as a [GitHub Action](https://github.com/features/actions) via [`ruff-action`](https://github.com/chartboost/ruff-action): @@ -388,7 +388,7 @@ Ruff is used by a number of major open-source projects and companies, including: - [SciPy](https://github.com/scipy/scipy) - [Sphinx](https://github.com/sphinx-doc/sphinx) - [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3) -- [Starlite](https://github.com/starlite-api/starlite) +- [Litestar](https://litestar.dev/) - [The Algorithms](https://github.com/TheAlgorithms/Python) - [Vega-Altair](https://github.com/altair-viz/altair) - WordPress ([Openverse](https://github.com/WordPress/openverse)) diff --git a/crates/flake8_to_ruff/Cargo.toml b/crates/flake8_to_ruff/Cargo.toml index 9ae03e11fd163..52596b641d28a 100644 --- a/crates/flake8_to_ruff/Cargo.toml +++ b/crates/flake8_to_ruff/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "flake8-to-ruff" -version = "0.0.269" +version = "0.0.270" edition = { workspace = true } rust-version = { workspace = true } diff --git a/crates/flake8_to_ruff/pyproject.toml b/crates/flake8_to_ruff/pyproject.toml index 81462811290c3..b7278f549aeef 100644 --- a/crates/flake8_to_ruff/pyproject.toml +++ b/crates/flake8_to_ruff/pyproject.toml @@ -26,7 +26,7 @@ requires-python = ">=3.7" repository = "https://github.com/charliermarsh/ruff#subdirectory=crates/flake8_to_ruff" [build-system] -requires = ["maturin>=0.15.2,<0.16"] +requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [tool.maturin] diff --git a/crates/ruff/Cargo.toml b/crates/ruff/Cargo.toml index efec4f8007dde..a52b0aad4b7d5 100644 --- a/crates/ruff/Cargo.toml +++ b/crates/ruff/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ruff" -version = "0.0.269" +version = "0.0.270" authors.workspace = true edition.workspace = true rust-version.workspace = true @@ -70,6 +70,7 @@ thiserror = { version = "1.0.38" } toml = { workspace = true } typed-arena = { version = "2.0.2" } unicode-width = { version = "0.1.10" } +unicode_names2 = { version = "0.6.0", git = "https://github.com/youknowone/unicode_names2.git", rev = "4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde" } [dev-dependencies] insta = { workspace = true } @@ -82,4 +83,3 @@ colored = { workspace = true, features = ["no-color"] } default = [] schemars = ["dep:schemars"] jupyter_notebook = [] -ecosystem_ci = [] diff --git a/crates/ruff/resources/test/fixtures/flake8_bandit/S601.py b/crates/ruff/resources/test/fixtures/flake8_bandit/S601.py new file mode 100644 index 0000000000000..1a76018616e35 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/flake8_bandit/S601.py @@ -0,0 +1,3 @@ +import paramiko + +paramiko.exec_command('something; really; unsafe') diff --git a/crates/ruff/resources/test/fixtures/flake8_bugbear/B007.py b/crates/ruff/resources/test/fixtures/flake8_bugbear/B007.py index 6df22e0027c0d..6c855a7989c5c 100644 --- a/crates/ruff/resources/test/fixtures/flake8_bugbear/B007.py +++ b/crates/ruff/resources/test/fixtures/flake8_bugbear/B007.py @@ -73,7 +73,18 @@ def f(): def f(): - # Fixable. + # Unfixable. + for foo, bar, baz in (["1", "2", "3"],): + if foo or baz: + break + else: + bar = 1 + + print(bar) + + +def f(): + # Unfixable (false negative) due to usage of `bar` outside of loop. for foo, bar, baz in (["1", "2", "3"],): if foo or baz: break @@ -85,4 +96,4 @@ def f(): # Unfixable due to trailing underscore (`_line_` wouldn't be considered an ignorable # variable name). for line_ in range(self.header_lines): - fp.readline() + fp.readline() diff --git a/crates/ruff/resources/test/fixtures/flake8_pyi/PYI013.py b/crates/ruff/resources/test/fixtures/flake8_pyi/PYI013.py new file mode 100644 index 0000000000000..9b3635962e29c --- /dev/null +++ b/crates/ruff/resources/test/fixtures/flake8_pyi/PYI013.py @@ -0,0 +1,65 @@ +class OneAttributeClass: + value: int + ... + + +class OneAttributeClass2: + ... + value: int + + +class TwoEllipsesClass: + ... + ... + + +class DocstringClass: + """ + My body only contains an ellipsis. + """ + + ... + + +class NonEmptyChild(Exception): + value: int + ... + + +class NonEmptyChild2(Exception): + ... + value: int + + +class NonEmptyWithInit: + value: int + ... + + def __init__(): + pass + + +class EmptyClass: + ... + + +class EmptyEllipsis: + ... + + +class Dog: + eyes: int = 2 + + +class WithInit: + value: int = 0 + + def __init__(): + ... + + +def function(): + ... + + +... diff --git a/crates/ruff/resources/test/fixtures/flake8_pyi/PYI013.pyi b/crates/ruff/resources/test/fixtures/flake8_pyi/PYI013.pyi new file mode 100644 index 0000000000000..aaf2cb0f794f2 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/flake8_pyi/PYI013.pyi @@ -0,0 +1,56 @@ +# Violations of PYI013 + +class OneAttributeClass: + value: int + ... # Error + +class OneAttributeClass2: + ... # Error + value: int + +class MyClass: + ... + value: int + +class TwoEllipsesClass: + ... + ... # Error + +class DocstringClass: + """ + My body only contains an ellipsis. + """ + + ... # Error + +class NonEmptyChild(Exception): + value: int + ... # Error + +class NonEmptyChild2(Exception): + ... # Error + value: int + +class NonEmptyWithInit: + value: int + ... # Error + + def __init__(): + pass + +# Not violations + +class EmptyClass: ... +class EmptyEllipsis: ... + +class Dog: + eyes: int = 2 + +class WithInit: + value: int = 0 + + def __init__(): ... + +def function(): ... + +... diff --git a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM102.py b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM102.py index c1a9c03cf1053..3b47a3e33505f 100644 --- a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM102.py +++ b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM102.py @@ -86,9 +86,16 @@ ): print("Bad module!") -# SIM102 -if node.module: - if node.module == "multiprocessing" or node.module.startswith( +# SIM102 (auto-fixable) +if node.module012345678: + if node.module == "multiprocß9πŸ’£2ℝ" or node.module.startswith( + "multiprocessing." + ): + print("Bad module!") + +# SIM102 (not auto-fixable) +if node.module0123456789: + if node.module == "multiprocß9πŸ’£2ℝ" or node.module.startswith( "multiprocessing." ): print("Bad module!") diff --git a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM108.py b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM108.py index ab43e3f2b1d48..94b14f911a52a 100644 --- a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM108.py +++ b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM108.py @@ -80,17 +80,25 @@ # SIM108 if a: - b = cccccccccccccccccccccccccccccccccccc + b = "cccccccccccccccccccccccccccccccccß" else: - b = ddddddddddddddddddddddddddddddddddddd + b = "dddddddddddddddddddddddddddddddddπŸ’£" # OK (too long) if True: if a: - b = cccccccccccccccccccccccccccccccccccc + b = ccccccccccccccccccccccccccccccccccc else: - b = ddddddddddddddddddddddddddddddddddddd + b = ddddddddddddddddddddddddddddddddddd + + +# OK (too long with tabs) +if True: + if a: + b = ccccccccccccccccccccccccccccccccccc + else: + b = ddddddddddddddddddddddddddddddddddd # SIM108 (without fix due to trailing comment) diff --git a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM110.py b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM110.py index 30ce25bb0e550..b02ac7c28cb99 100644 --- a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM110.py +++ b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM110.py @@ -155,3 +155,19 @@ def f(): if check(x): return False return True + + +def f(): + # SIM110 + for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ": + if x.isdigit(): + return True + return False + + +def f(): + # OK (too long) + for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9": + if x.isdigit(): + return True + return False diff --git a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM111.py b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM111.py index f0afb793d4ca6..d6908461fcd8d 100644 --- a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM111.py +++ b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM111.py @@ -171,3 +171,19 @@ def f(): if x > y: return False return True + + +def f(): + # SIM111 + for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9": + if x.isdigit(): + return False + return True + + +def f(): + # OK (too long) + for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß90": + if x.isdigit(): + return False + return True diff --git a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM117.py b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM117.py index 34dd47e361ae2..3c99535e43088 100644 --- a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM117.py +++ b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM117.py @@ -90,3 +90,13 @@ D() as d, ): print("hello") + +# SIM117 (auto-fixable) +with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as a: + with B("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as b: + print("hello") + +# SIM117 (not auto-fixable too long) +with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ890") as a: + with B("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as b: + print("hello") \ No newline at end of file diff --git a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM401.py b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM401.py index 0ef88effb7aa7..487f15a5f71e9 100644 --- a/crates/ruff/resources/test/fixtures/flake8_simplify/SIM401.py +++ b/crates/ruff/resources/test/fixtures/flake8_simplify/SIM401.py @@ -14,7 +14,7 @@ else: var = a_dict[key] -# SIM401 (default with a complex expression) +# OK (default contains effect) if key in a_dict: var = a_dict[key] else: @@ -36,12 +36,18 @@ if key in a_dict: vars[idx] = a_dict[key] else: - vars[idx] = "default" + vars[idx] = "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789" ### # Negative cases ### +# OK (too long) +if key in a_dict: + vars[idx] = a_dict[key] +else: + vars[idx] = "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß" + # OK (false negative) if not key in a_dict: var = "default" diff --git a/crates/ruff/resources/test/fixtures/flake8_type_checking/TCH002.py b/crates/ruff/resources/test/fixtures/flake8_type_checking/TCH002.py index 2bda6ff9838a6..7b082da74e1e6 100644 --- a/crates/ruff/resources/test/fixtures/flake8_type_checking/TCH002.py +++ b/crates/ruff/resources/test/fixtures/flake8_type_checking/TCH002.py @@ -146,3 +146,7 @@ def f(): import pandas as pd x = dict[pd.DataFrame, pd.DataFrame] + + +def f(): + import pandas as pd diff --git a/crates/ruff/resources/test/fixtures/isort/fit_line_length_comment.py b/crates/ruff/resources/test/fixtures/isort/fit_line_length_comment.py index 11f1ee1f6e6ca..94648e045ecb7 100644 --- a/crates/ruff/resources/test/fixtures/isort/fit_line_length_comment.py +++ b/crates/ruff/resources/test/fixtures/isort/fit_line_length_comment.py @@ -2,3 +2,7 @@ # Don't take this comment into account when determining whether the next import can fit on one line. from b import c from d import e # Do take this comment into account when determining whether the next import can fit on one line. +# The next import fits on one line. +from f import g # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ +# The next import doesn't fit on one line. +from h import i # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9 diff --git a/crates/ruff/resources/test/fixtures/isort/required_imports/off.py b/crates/ruff/resources/test/fixtures/isort/required_imports/off.py new file mode 100644 index 0000000000000..62590951c4f91 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/isort/required_imports/off.py @@ -0,0 +1,4 @@ +# isort: off + +x = 1 +# isort: on diff --git a/crates/ruff/resources/test/fixtures/isort/split.py b/crates/ruff/resources/test/fixtures/isort/split.py index acdc032fe5203..e4beaec56334a 100644 --- a/crates/ruff/resources/test/fixtures/isort/split.py +++ b/crates/ruff/resources/test/fixtures/isort/split.py @@ -6,7 +6,16 @@ import c import d -# isort: split +# isort: split import a import b + +if True: + import C + import A + + # isort: split + + import D + import B diff --git a/crates/ruff/resources/test/fixtures/pycodestyle/E402.py b/crates/ruff/resources/test/fixtures/pycodestyle/E402.py index b95b2ae1e832a..81e6306c7af11 100644 --- a/crates/ruff/resources/test/fixtures/pycodestyle/E402.py +++ b/crates/ruff/resources/test/fixtures/pycodestyle/E402.py @@ -19,7 +19,7 @@ else: import e -y = x + 1 +__some__magic = 1 import f diff --git a/crates/ruff/resources/test/fixtures/pycodestyle/E501_2.py b/crates/ruff/resources/test/fixtures/pycodestyle/E501_2.py new file mode 100644 index 0000000000000..18ea839392ba1 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pycodestyle/E501_2.py @@ -0,0 +1,11 @@ +a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + +b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + +c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + +d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" diff --git a/crates/ruff/resources/test/fixtures/pycodestyle/W505_utf_8.py b/crates/ruff/resources/test/fixtures/pycodestyle/W505_utf_8.py new file mode 100644 index 0000000000000..6e177dad8f0ea --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pycodestyle/W505_utf_8.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +"""Here's a top-level ß9πŸ’£2ℝing that's over theß9πŸ’£2ℝ.""" + + +def f1(): + """Here's a ß9πŸ’£2ℝing that's also over theß9πŸ’£2ℝ.""" + + x = 1 # Here's a comment that's over theß9πŸ’£2ℝ, but it's not standalone. + + # Here's a standalone comment that's over theß9πŸ’£2ℝ. + + x = 2 + # Another standalone that is preceded by a newline and indent toke and is over theß9πŸ’£2ℝ. + + print("Here's a string that's over theß9πŸ’£2ℝ, but it's not a ß9πŸ’£2ℝing.") + + +"This is also considered a ß9πŸ’£2ℝing, and is over theß9πŸ’£2ℝ." + + +def f2(): + """Here's a multi-line ß9πŸ’£2ℝing. + + It's over theß9πŸ’£2ℝ on this line, which isn't the first line in the ß9πŸ’£2ℝing. + """ + + +def f3(): + """Here's a multi-line ß9πŸ’£2ℝing. + + It's over theß9πŸ’£2ℝ on this line, which isn't the first line in the ß9πŸ’£2ℝing.""" diff --git a/crates/ruff/resources/test/fixtures/pyflakes/F401_12.py b/crates/ruff/resources/test/fixtures/pyflakes/F401_12.py new file mode 100644 index 0000000000000..f3ba7c360c777 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pyflakes/F401_12.py @@ -0,0 +1,10 @@ +"""Test: module bindings are preferred over local bindings, for deferred annotations.""" + +from __future__ import annotations + +import datetime +from typing import Optional + + +class Class: + datetime: Optional[datetime.datetime] diff --git a/crates/ruff/resources/test/fixtures/pyflakes/F401_13.py b/crates/ruff/resources/test/fixtures/pyflakes/F401_13.py new file mode 100644 index 0000000000000..49ba589ae7d75 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pyflakes/F401_13.py @@ -0,0 +1,12 @@ +"""Test: module bindings are preferred over local bindings, for deferred annotations.""" + +from __future__ import annotations + +from typing import TypeAlias, List + + +class Class: + List: TypeAlias = List + + def bar(self) -> List: + pass diff --git a/crates/ruff/resources/test/fixtures/pyflakes/F401_14.py b/crates/ruff/resources/test/fixtures/pyflakes/F401_14.py new file mode 100644 index 0000000000000..6e7bb3695cd84 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pyflakes/F401_14.py @@ -0,0 +1,8 @@ +"""Test: module bindings are preferred over local bindings, for deferred annotations.""" + +import datetime +from typing import Optional + + +class Class: + datetime: "Optional[datetime.datetime]" diff --git a/crates/ruff/resources/test/fixtures/pyflakes/F811_23.py b/crates/ruff/resources/test/fixtures/pyflakes/F811_23.py new file mode 100644 index 0000000000000..0332e48bc8401 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pyflakes/F811_23.py @@ -0,0 +1,4 @@ +"""Test that shadowing an explicit re-export produces a warning.""" + +import foo as foo +import bar as foo diff --git a/crates/ruff/resources/test/fixtures/pyflakes/F811_24.py b/crates/ruff/resources/test/fixtures/pyflakes/F811_24.py new file mode 100644 index 0000000000000..0207b9329dab0 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pyflakes/F811_24.py @@ -0,0 +1,5 @@ +"""Test that shadowing a `__future__` import does not produce a warning.""" + +from __future__ import annotations + +import annotations diff --git a/crates/ruff/resources/test/fixtures/pylint/duplicate_value.py b/crates/ruff/resources/test/fixtures/pylint/duplicate_value.py new file mode 100644 index 0000000000000..61f2dd355ba5c --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pylint/duplicate_value.py @@ -0,0 +1,11 @@ +### +# Errors. +### +incorrect_set = {"value1", 23, 5, "value1"} +incorrect_set = {1, 1} + +### +# Non-errors. +### +correct_set = {"value1", 23, 5} +correct_set = {5, "5"} diff --git a/crates/ruff/resources/test/fixtures/pylint/named_expr_without_context.py b/crates/ruff/resources/test/fixtures/pylint/named_expr_without_context.py new file mode 100644 index 0000000000000..2dcc56cffd206 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pylint/named_expr_without_context.py @@ -0,0 +1,19 @@ +# Errors +(a := 42) +if True: + (b := 1) + + +class Foo: + (c := 1) + + +# OK +if a := 42: + print("Success") + +a = 0 +while (a := a + 1) < 10: + print("Correct") + +a = (b := 1) diff --git a/crates/ruff/resources/test/fixtures/pyupgrade/UP032_2.py b/crates/ruff/resources/test/fixtures/pyupgrade/UP032_2.py new file mode 100644 index 0000000000000..2987164454638 --- /dev/null +++ b/crates/ruff/resources/test/fixtures/pyupgrade/UP032_2.py @@ -0,0 +1,28 @@ +# Errors +"{.real}".format(1) +"{0.real}".format(1) +"{a.real}".format(a=1) + +"{.real}".format(1.0) +"{0.real}".format(1.0) +"{a.real}".format(a=1.0) + +"{.real}".format(1j) +"{0.real}".format(1j) +"{a.real}".format(a=1j) + +"{.real}".format(0b01) +"{0.real}".format(0b01) +"{a.real}".format(a=0b01) + +"{}".format(1 + 2) +"{}".format([1, 2]) +"{}".format({1, 2}) +"{}".format({1: 2, 3: 4}) +"{}".format((i for i in range(2))) + +"{.real}".format(1 + 2) +"{.real}".format([1, 2]) +"{.real}".format({1, 2}) +"{.real}".format({1: 2, 3: 4}) +"{}".format((i for i in range(2))) diff --git a/crates/ruff/resources/test/fixtures/ruff/RUF005.py b/crates/ruff/resources/test/fixtures/ruff/RUF005.py index e8ecc06b16e61..2007e47a2e5e8 100644 --- a/crates/ruff/resources/test/fixtures/ruff/RUF005.py +++ b/crates/ruff/resources/test/fixtures/ruff/RUF005.py @@ -1,9 +1,38 @@ +### +# Non-fixable Errors. +### +foo + [ # This will be preserved. +] +[*foo] + [ # This will be preserved. +] +first = [ + # The order + 1, # here + 2, # is + # extremely + 3, # critical + # to preserve +] +second = first + [ + # please + 4, + # don't + 5, + # touch + 6, +] + + +### +# Fixable errors. +### class Fun: words = ("how", "fun!") def yay(self): return self.words + yay = Fun().yay foo = [4, 5, 6] @@ -13,36 +42,27 @@ def yay(self): spam = quux + (10, 11, 12) spom = list(spam) eggs = spom + [13, 14, 15] -elatement = ("we all say", ) + yay() -excitement = ("we all think", ) + Fun().yay() -astonishment = ("we all feel", ) + Fun.words +elatement = ("we all say",) + yay() +excitement = ("we all think",) + Fun().yay() +astonishment = ("we all feel",) + Fun.words -chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) +chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) baz = () + zoob -first = [ - # The order - 1, # here - 2, # is - # extremely - 3, # critical - # to preserve -] -second = first + [ - # please - 4, - # don't - 5, - # touch - 6, -] - [] + foo + [ ] -[] + foo + [ # This will be preserved, but doesn't prevent the fix -] +pylint_call = [sys.executable, "-m", "pylint"] + args + [path] +pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) +b = a + [2, 3] + [4] # Uses the non-preferred quote style, which should be retained. -f"{[*a(), 'b']}" +f"{a() + ['b']}" + +### +# Non-errors. +### +a = (1,) + [2] +a = [1, 2] + (3, 4) +a = ([1, 2, 3] + b) + (4, 5, 6) diff --git a/crates/ruff/resources/test/fixtures/ruff/RUF010.py b/crates/ruff/resources/test/fixtures/ruff/RUF010.py index cc3e9c7831e09..2d2604f9dc554 100644 --- a/crates/ruff/resources/test/fixtures/ruff/RUF010.py +++ b/crates/ruff/resources/test/fixtures/ruff/RUF010.py @@ -10,6 +10,8 @@ def foo(one_arg): f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + f"{foo(bla)}" # OK f"{str(bla, 'ascii')}, {str(bla, encoding='cp1255')}" # OK diff --git a/crates/ruff/resources/test/fixtures/ruff/confusables.py b/crates/ruff/resources/test/fixtures/ruff/confusables.py index b642cebd6f396..3ae350887fa3f 100644 --- a/crates/ruff/resources/test/fixtures/ruff/confusables.py +++ b/crates/ruff/resources/test/fixtures/ruff/confusables.py @@ -8,7 +8,24 @@ def f(): ... -def g(): +def f(): """Here's a docstring with a greek rho: ρ""" # And here's a comment with a greek alpha: βˆ— ... + + +x = "𝐁ad string" +x = "βˆ’" + +# This should be ignored, since it contains an unambiguous unicode character, and no +# ASCII. +x = "Русский" + +# The first word should be ignored, while the second should be included, since it +# contains ASCII. +x = "Ξ²Ξ± BΞ±d" + +# The two characters should be flagged here. The first character is a "word" +# consisting of a single ambiguous character, while the second character is a "word +# boundary" (whitespace) that it itself ambiguous. +x = "Р усский" diff --git a/crates/ruff/resources/test/fixtures/ruff/noqa.py b/crates/ruff/resources/test/fixtures/ruff/noqa.py new file mode 100644 index 0000000000000..30e59400c6f2f --- /dev/null +++ b/crates/ruff/resources/test/fixtures/ruff/noqa.py @@ -0,0 +1,23 @@ +def f(): + # These should both be ignored by the `noqa`. + I = 1 # noqa: E741, F841 + + +def f(): + # These should both be ignored by the `noqa`. + I = 1 # noqa: E741,F841 + + +def f(): + # These should both be ignored by the `noqa`. + I = 1 # noqa: E741 F841 + + +def f(): + # These should both be ignored by the `noqa`. + I = 1 # noqa: E741 , F841 + + +def f(): + # Only `E741` should be ignored by the `noqa`. + I = 1 # noqa: E741.F841 diff --git a/crates/ruff/resources/test/fixtures/tryceratops/TRY302.py b/crates/ruff/resources/test/fixtures/tryceratops/TRY302.py index 0020926e4d2f3..3691e5472afc5 100644 --- a/crates/ruff/resources/test/fixtures/tryceratops/TRY302.py +++ b/crates/ruff/resources/test/fixtures/tryceratops/TRY302.py @@ -68,6 +68,18 @@ def bad(): except Exception as e: raise e +def fine(): + try: + process() + except Exception as e: + raise e from None + +def fine(): + try: + process() + except Exception as e: + raise e from Exception + def fine(): try: process() diff --git a/crates/ruff/src/autofix/actions.rs b/crates/ruff/src/autofix/actions.rs index fc6231bda9cd1..afa61a274f6f7 100644 --- a/crates/ruff/src/autofix/actions.rs +++ b/crates/ruff/src/autofix/actions.rs @@ -10,14 +10,11 @@ use rustpython_parser::{lexer, Mode, Tok}; use ruff_diagnostics::Edit; use ruff_python_ast::helpers; -use ruff_python_ast::imports::{AnyImport, Import}; use ruff_python_ast::newlines::NewlineWithTrailingNewline; use ruff_python_ast::source_code::{Indexer, Locator, Stylist}; -use ruff_python_semantic::context::Context; use crate::cst::helpers::compose_module_path; -use crate::cst::matchers::match_module; -use crate::importer::Importer; +use crate::cst::matchers::match_statement; /// Determine if a body contains only a single statement, taking into account /// deleted. @@ -215,9 +212,9 @@ pub(crate) fn remove_unused_imports<'a>( stylist: &Stylist, ) -> Result { let module_text = locator.slice(stmt.range()); - let mut tree = match_module(module_text)?; + let mut tree = match_statement(module_text)?; - let Some(Statement::Simple(body)) = tree.body.first_mut() else { + let Statement::Simple(body) = &mut tree else { bail!("Expected Statement::Simple"); }; @@ -423,86 +420,6 @@ pub(crate) fn remove_argument( } } -/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make the -/// symbol available in the current scope along with the bound name of the symbol. -/// -/// For example, assuming `module` is `"functools"` and `member` is `"lru_cache"`, this function -/// could return an [`Edit`] to add `import functools` to the top of the file, alongside with the -/// name on which the `lru_cache` symbol would be made available (`"functools.lru_cache"`). -/// -/// Attempts to reuse existing imports when possible. -pub(crate) fn get_or_import_symbol( - module: &str, - member: &str, - at: TextSize, - context: &Context, - importer: &Importer, - locator: &Locator, -) -> Result<(Edit, String)> { - if let Some((source, binding)) = context.resolve_qualified_import_name(module, member) { - // If the symbol is already available in the current scope, use it. - - // The exception: the symbol source (i.e., the import statement) comes after the current - // location. For example, we could be generating an edit within a function, and the import - // could be defined in the module scope, but after the function definition. In this case, - // it's unclear whether we can use the symbol (the function could be called between the - // import and the current location, and thus the symbol would not be available). It's also - // unclear whether should add an import statement at the top of the file, since it could - // be shadowed between the import and the current location. - if source.start() > at { - bail!("Unable to use existing symbol `{binding}` due to late-import"); - } - - // We also add a no-op edit to force conflicts with any other fixes that might try to - // remove the import. Consider: - // - // ```py - // import sys - // - // quit() - // ``` - // - // Assume you omit this no-op edit. If you run Ruff with `unused-imports` and - // `sys-exit-alias` over this snippet, it will generate two fixes: (1) remove the unused - // `sys` import; and (2) replace `quit()` with `sys.exit()`, under the assumption that `sys` - // is already imported and available. - // - // By adding this no-op edit, we force the `unused-imports` fix to conflict with the - // `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass. - let import_edit = - Edit::range_replacement(locator.slice(source.range()).to_string(), source.range()); - Ok((import_edit, binding)) - } else { - if let Some(stmt) = importer.find_import_from(module, at) { - // Case 1: `from functools import lru_cache` is in scope, and we're trying to reference - // `functools.cache`; thus, we add `cache` to the import, and return `"cache"` as the - // bound name. - if context - .find_binding(member) - .map_or(true, |binding| binding.kind.is_builtin()) - { - let import_edit = importer.add_member(stmt, member)?; - Ok((import_edit, member.to_string())) - } else { - bail!("Unable to insert `{member}` into scope due to name conflict") - } - } else { - // Case 2: No `functools` import is in scope; thus, we add `import functools`, and - // return `"functools.cache"` as the bound name. - if context - .find_binding(module) - .map_or(true, |binding| binding.kind.is_builtin()) - { - let import_edit = - importer.add_import(&AnyImport::Import(Import::module(module)), at); - Ok((import_edit, format!("{module}.{member}"))) - } else { - bail!("Unable to insert `{module}` into scope due to name conflict") - } - } - } -} - #[cfg(test)] mod tests { use anyhow::Result; diff --git a/crates/ruff/src/checkers/ast/deferred.rs b/crates/ruff/src/checkers/ast/deferred.rs index 62a1719af1f51..ab74d512345e3 100644 --- a/crates/ruff/src/checkers/ast/deferred.rs +++ b/crates/ruff/src/checkers/ast/deferred.rs @@ -1,7 +1,7 @@ use ruff_text_size::TextRange; use rustpython_parser::ast::Expr; -use ruff_python_semantic::context::Snapshot; +use ruff_python_semantic::model::Snapshot; /// A collection of AST nodes that are deferred for later analysis. /// Used to, e.g., store functions, whose bodies shouldn't be analyzed until all diff --git a/crates/ruff/src/checkers/ast/mod.rs b/crates/ruff/src/checkers/ast/mod.rs index c6fa2d148b031..da966300e36f8 100644 --- a/crates/ruff/src/checkers/ast/mod.rs +++ b/crates/ruff/src/checkers/ast/mod.rs @@ -24,11 +24,12 @@ use ruff_python_semantic::analyze::branch_detection; use ruff_python_semantic::analyze::typing::{Callable, SubscriptKind}; use ruff_python_semantic::analyze::visibility::ModuleSource; use ruff_python_semantic::binding::{ - Binding, BindingId, BindingKind, Exceptions, ExecutionContext, Export, FromImportation, + Binding, BindingFlags, BindingId, BindingKind, Exceptions, Export, FromImportation, Importation, StarImportation, SubmoduleImportation, }; -use ruff_python_semantic::context::{Context, ContextFlags}; +use ruff_python_semantic::context::ExecutionContext; use ruff_python_semantic::definition::{ContextualizedDefinition, Module, ModuleKind}; +use ruff_python_semantic::model::{ResolvedReference, SemanticModel, SemanticModelFlags}; use ruff_python_semantic::node::NodeId; use ruff_python_semantic::scope::{ClassDef, FunctionDef, Lambda, Scope, ScopeId, ScopeKind}; use ruff_python_stdlib::builtins::{BUILTINS, MAGIC_GLOBALS}; @@ -41,6 +42,7 @@ use crate::fs::relativize_path; use crate::importer::Importer; use crate::noqa::NoqaMapping; use crate::registry::{AsRule, Rule}; +use crate::rules::flake8_builtins::helpers::AnyShadowing; use crate::rules::{ flake8_2020, flake8_annotations, flake8_async, flake8_bandit, flake8_blind_except, flake8_boolean_trap, flake8_bugbear, flake8_builtins, flake8_comprehensions, flake8_datetimez, @@ -71,7 +73,7 @@ pub(crate) struct Checker<'a> { pub(crate) indexer: &'a Indexer, pub(crate) importer: Importer<'a>, // Stateful fields. - pub(crate) ctx: Context<'a>, + semantic_model: SemanticModel<'a>, pub(crate) diagnostics: Vec, pub(crate) deletions: FxHashSet>, deferred: Deferred<'a>, @@ -105,7 +107,7 @@ impl<'a> Checker<'a> { stylist, indexer, importer, - ctx: Context::new(&settings.typing_modules, path, module), + semantic_model: SemanticModel::new(&settings.typing_modules, path, module), deferred: Deferred::default(), diagnostics: Vec::default(), deletions: FxHashSet::default(), @@ -138,13 +140,17 @@ impl<'a> Checker<'a> { /// Create a [`Generator`] to generate source code based on the current AST state. pub(crate) fn generator(&self) -> Generator { - fn quote_style(context: &Context, locator: &Locator, indexer: &Indexer) -> Option { - if !context.in_f_string() { + fn quote_style( + model: &SemanticModel, + locator: &Locator, + indexer: &Indexer, + ) -> Option { + if !model.in_f_string() { return None; } // Find the quote character used to start the containing f-string. - let expr = context.expr()?; + let expr = model.expr()?; let string_range = indexer.f_string_range(expr.start())?; let trailing_quote = trailing_quote(locator.slice(string_range))?; @@ -158,10 +164,27 @@ impl<'a> Checker<'a> { Generator::new( self.stylist.indentation(), - quote_style(&self.ctx, self.locator, self.indexer).unwrap_or(self.stylist.quote()), + quote_style(&self.semantic_model, self.locator, self.indexer) + .unwrap_or(self.stylist.quote()), self.stylist.line_ending(), ) } + + pub(crate) fn semantic_model(&self) -> &SemanticModel<'a> { + &self.semantic_model + } + + /// Returns whether the given rule should be checked. + #[inline] + pub(crate) const fn enabled(&self, rule: Rule) -> bool { + self.settings.rules.enabled(rule) + } + + /// Returns whether any of the given rules should be checked. + #[inline] + pub(crate) const fn any_enabled(&self, rules: &[Rule]) -> bool { + self.settings.rules.any_enabled(rules) + } } impl<'a, 'b> Visitor<'b> for Checker<'a> @@ -169,7 +192,7 @@ where 'b: 'a, { fn visit_stmt(&mut self, stmt: &'b Stmt) { - self.ctx.push_stmt(stmt); + self.semantic_model.push_stmt(stmt); // Track whether we've seen docstrings, non-imports, etc. match stmt { @@ -180,63 +203,62 @@ where .iter() .any(|alias| alias.name.as_str() == "annotations") { - self.ctx.flags |= ContextFlags::FUTURE_ANNOTATIONS; + self.semantic_model.flags |= SemanticModelFlags::FUTURE_ANNOTATIONS; } } else { - self.ctx.flags |= ContextFlags::FUTURES_BOUNDARY; + self.semantic_model.flags |= SemanticModelFlags::FUTURES_BOUNDARY; } } Stmt::Import(_) => { - self.ctx.flags |= ContextFlags::FUTURES_BOUNDARY; + self.semantic_model.flags |= SemanticModelFlags::FUTURES_BOUNDARY; } _ => { - self.ctx.flags |= ContextFlags::FUTURES_BOUNDARY; - if !self.ctx.seen_import_boundary() + self.semantic_model.flags |= SemanticModelFlags::FUTURES_BOUNDARY; + if !self.semantic_model.seen_import_boundary() && !helpers::is_assignment_to_a_dunder(stmt) - && !helpers::in_nested_block(self.ctx.parents()) + && !helpers::in_nested_block(self.semantic_model.parents()) { - self.ctx.flags |= ContextFlags::IMPORT_BOUNDARY; + self.semantic_model.flags |= SemanticModelFlags::IMPORT_BOUNDARY; } } } // Track each top-level import, to guide import insertions. if matches!(stmt, Stmt::Import(_) | Stmt::ImportFrom(_)) { - if self.ctx.at_top_level() { + if self.semantic_model.at_top_level() { self.importer.visit_import(stmt); } } // Store the flags prior to any further descent, so that we can restore them after visiting // the node. - let flags_snapshot = self.ctx.flags; + let flags_snapshot = self.semantic_model.flags; // Pre-visit. match stmt { Stmt::Global(ast::StmtGlobal { names, range: _ }) => { let ranges: Vec = helpers::find_names(stmt, self.locator).collect(); - if !self.ctx.scope_id.is_global() { + if !self.semantic_model.scope_id.is_global() { // Add the binding to the current scope. - let context = self.ctx.execution_context(); - let exceptions = self.ctx.exceptions(); - let scope = &mut self.ctx.scopes[self.ctx.scope_id]; - let usage = Some((self.ctx.scope_id, stmt.range())); + let context = self.semantic_model.execution_context(); + let exceptions = self.semantic_model.exceptions(); + let scope = &mut self.semantic_model.scopes[self.semantic_model.scope_id]; for (name, range) in names.iter().zip(ranges.iter()) { - let id = self.ctx.bindings.push(Binding { + // Add a binding to the current scope. + let binding_id = self.semantic_model.bindings.push(Binding { kind: BindingKind::Global, - runtime_usage: None, - synthetic_usage: usage, - typing_usage: None, range: *range, - source: self.ctx.stmt_id, + references: Vec::new(), + source: self.semantic_model.stmt_id, context, exceptions, + flags: BindingFlags::empty(), }); - scope.add(name, id); + scope.add(name, binding_id); } } - if self.settings.rules.enabled(Rule::AmbiguousVariableName) { + if self.enabled(Rule::AmbiguousVariableName) { self.diagnostics .extend(names.iter().zip(ranges.iter()).filter_map(|(name, range)| { pycodestyle::rules::ambiguous_variable_name(name, *range) @@ -245,42 +267,44 @@ where } Stmt::Nonlocal(ast::StmtNonlocal { names, range: _ }) => { let ranges: Vec = helpers::find_names(stmt, self.locator).collect(); - if !self.ctx.scope_id.is_global() { - let context = self.ctx.execution_context(); - let exceptions = self.ctx.exceptions(); - let scope = &mut self.ctx.scopes[self.ctx.scope_id]; - let usage = Some((self.ctx.scope_id, stmt.range())); + if !self.semantic_model.scope_id.is_global() { + let context = self.semantic_model.execution_context(); + let exceptions = self.semantic_model.exceptions(); + let scope = &mut self.semantic_model.scopes[self.semantic_model.scope_id]; for (name, range) in names.iter().zip(ranges.iter()) { // Add a binding to the current scope. - let id = self.ctx.bindings.push(Binding { + let binding_id = self.semantic_model.bindings.push(Binding { kind: BindingKind::Nonlocal, - runtime_usage: None, - synthetic_usage: usage, - typing_usage: None, range: *range, - source: self.ctx.stmt_id, + references: Vec::new(), + source: self.semantic_model.stmt_id, context, exceptions, + flags: BindingFlags::empty(), }); - scope.add(name, id); + scope.add(name, binding_id); } // Mark the binding in the defining scopes as used too. (Skip the global scope // and the current scope.) for (name, range) in names.iter().zip(ranges.iter()) { let binding_id = self - .ctx + .semantic_model .scopes - .ancestors(self.ctx.scope_id) + .ancestors(self.semantic_model.scope_id) .skip(1) .take_while(|scope| !scope.kind.is_module()) .find_map(|scope| scope.get(name.as_str())); if let Some(binding_id) = binding_id { - self.ctx.bindings[*binding_id].runtime_usage = usage; + self.semantic_model.add_local_reference( + binding_id, + stmt.range(), + ExecutionContext::Runtime, + ); } else { // Ensure that every nonlocal has an existing binding from a parent scope. - if self.settings.rules.enabled(Rule::NonlocalWithoutBinding) { + if self.enabled(Rule::NonlocalWithoutBinding) { self.diagnostics.push(Diagnostic::new( pylint::rules::NonlocalWithoutBinding { name: name.to_string(), @@ -292,7 +316,7 @@ where } } - if self.settings.rules.enabled(Rule::AmbiguousVariableName) { + if self.enabled(Rule::AmbiguousVariableName) { self.diagnostics .extend(names.iter().zip(ranges.iter()).filter_map(|(name, range)| { pycodestyle::rules::ambiguous_variable_name(name, *range) @@ -300,19 +324,20 @@ where } } Stmt::Break(_) => { - if self.settings.rules.enabled(Rule::BreakOutsideLoop) { - if let Some(diagnostic) = - pyflakes::rules::break_outside_loop(stmt, &mut self.ctx.parents().skip(1)) - { + if self.enabled(Rule::BreakOutsideLoop) { + if let Some(diagnostic) = pyflakes::rules::break_outside_loop( + stmt, + &mut self.semantic_model.parents().skip(1), + ) { self.diagnostics.push(diagnostic); } } } Stmt::Continue(_) => { - if self.settings.rules.enabled(Rule::ContinueOutsideLoop) { + if self.enabled(Rule::ContinueOutsideLoop) { if let Some(diagnostic) = pyflakes::rules::continue_outside_loop( stmt, - &mut self.ctx.parents().skip(1), + &mut self.semantic_model.parents().skip(1), ) { self.diagnostics.push(diagnostic); } @@ -334,19 +359,15 @@ where body, .. }) => { - if self - .settings - .rules - .enabled(Rule::DjangoNonLeadingReceiverDecorator) - { + if self.enabled(Rule::DjangoNonLeadingReceiverDecorator) { self.diagnostics .extend(flake8_django::rules::non_leading_receiver_decorator( decorator_list, - |expr| self.ctx.resolve_call_path(expr), + |expr| self.semantic_model.resolve_call_path(expr), )); } - if self.settings.rules.enabled(Rule::AmbiguousFunctionName) { + if self.enabled(Rule::AmbiguousFunctionName) { if let Some(diagnostic) = pycodestyle::rules::ambiguous_function_name(name, || { helpers::identifier_range(stmt, self.locator) @@ -356,28 +377,24 @@ where } } - if self.settings.rules.enabled(Rule::InvalidFunctionName) { + if self.enabled(Rule::InvalidFunctionName) { if let Some(diagnostic) = pep8_naming::rules::invalid_function_name( stmt, name, decorator_list, &self.settings.pep8_naming.ignore_names, - &self.ctx, + &self.semantic_model, self.locator, ) { self.diagnostics.push(diagnostic); } } - if self - .settings - .rules - .enabled(Rule::InvalidFirstArgumentNameForClassMethod) - { + if self.enabled(Rule::InvalidFirstArgumentNameForClassMethod) { if let Some(diagnostic) = pep8_naming::rules::invalid_first_argument_name_for_class_method( self, - self.ctx.scope(), + self.semantic_model.scope(), name, decorator_list, args, @@ -387,15 +404,11 @@ where } } - if self - .settings - .rules - .enabled(Rule::InvalidFirstArgumentNameForMethod) - { + if self.enabled(Rule::InvalidFirstArgumentNameForMethod) { if let Some(diagnostic) = pep8_naming::rules::invalid_first_argument_name_for_method( self, - self.ctx.scope(), + self.semantic_model.scope(), name, decorator_list, args, @@ -406,17 +419,17 @@ where } if self.is_stub { - if self.settings.rules.enabled(Rule::PassStatementStubBody) { + if self.enabled(Rule::PassStatementStubBody) { flake8_pyi::rules::pass_statement_stub_body(self, body); } - if self.settings.rules.enabled(Rule::NonEmptyStubBody) { + if self.enabled(Rule::NonEmptyStubBody) { flake8_pyi::rules::non_empty_stub_body(self, body); } } - if self.settings.rules.enabled(Rule::DunderFunctionName) { + if self.enabled(Rule::DunderFunctionName) { if let Some(diagnostic) = pep8_naming::rules::dunder_function_name( - self.ctx.scope(), + self.semantic_model.scope(), stmt, name, self.locator, @@ -425,26 +438,26 @@ where } } - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { pylint::rules::global_statement(self, name); } - if self.settings.rules.enabled(Rule::LRUCacheWithoutParameters) + if self.enabled(Rule::LRUCacheWithoutParameters) && self.settings.target_version >= PythonVersion::Py38 { pyupgrade::rules::lru_cache_without_parameters(self, decorator_list); } - if self.settings.rules.enabled(Rule::LRUCacheWithMaxsizeNone) + if self.enabled(Rule::LRUCacheWithMaxsizeNone) && self.settings.target_version >= PythonVersion::Py39 { pyupgrade::rules::lru_cache_with_maxsize_none(self, decorator_list); } - if self.settings.rules.enabled(Rule::CachedInstanceMethod) { + if self.enabled(Rule::CachedInstanceMethod) { flake8_bugbear::rules::cached_instance_method(self, decorator_list); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::UnnecessaryReturnNone, Rule::ImplicitReturnValue, Rule::ImplicitReturn, @@ -461,7 +474,7 @@ where ); } - if self.settings.rules.enabled(Rule::UselessReturn) { + if self.enabled(Rule::UselessReturn) { pylint::rules::useless_return( self, stmt, @@ -470,7 +483,7 @@ where ); } - if self.settings.rules.enabled(Rule::ComplexStructure) { + if self.enabled(Rule::ComplexStructure) { if let Some(diagnostic) = mccabe::rules::function_is_too_complex( stmt, name, @@ -482,20 +495,20 @@ where } } - if self.settings.rules.enabled(Rule::HardcodedPasswordDefault) { + if self.enabled(Rule::HardcodedPasswordDefault) { self.diagnostics .extend(flake8_bandit::rules::hardcoded_password_default(args)); } - if self.settings.rules.enabled(Rule::PropertyWithParameters) { + if self.enabled(Rule::PropertyWithParameters) { pylint::rules::property_with_parameters(self, stmt, decorator_list, args); } - if self.settings.rules.enabled(Rule::TooManyArguments) { + if self.enabled(Rule::TooManyArguments) { pylint::rules::too_many_arguments(self, args, stmt); } - if self.settings.rules.enabled(Rule::TooManyReturnStatements) { + if self.enabled(Rule::TooManyReturnStatements) { if let Some(diagnostic) = pylint::rules::too_many_return_statements( stmt, body, @@ -506,7 +519,7 @@ where } } - if self.settings.rules.enabled(Rule::TooManyBranches) { + if self.enabled(Rule::TooManyBranches) { if let Some(diagnostic) = pylint::rules::too_many_branches( stmt, body, @@ -517,7 +530,7 @@ where } } - if self.settings.rules.enabled(Rule::TooManyStatements) { + if self.enabled(Rule::TooManyStatements) { if let Some(diagnostic) = pylint::rules::too_many_statements( stmt, body, @@ -528,7 +541,7 @@ where } } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::PytestFixtureIncorrectParenthesesStyle, Rule::PytestFixturePositionalArgs, Rule::PytestExtraneousScopeFunction, @@ -551,25 +564,21 @@ where ); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::PytestParametrizeNamesWrongType, Rule::PytestParametrizeValuesWrongType, ]) { flake8_pytest_style::rules::parametrize(self, decorator_list); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::PytestIncorrectMarkParenthesesStyle, Rule::PytestUseFixturesWithoutParameters, ]) { flake8_pytest_style::rules::marks(self, decorator_list); } - if self - .settings - .rules - .enabled(Rule::BooleanPositionalArgInFunctionDefinition) - { + if self.enabled(Rule::BooleanPositionalArgInFunctionDefinition) { flake8_boolean_trap::rules::check_positional_boolean_in_def( self, name, @@ -578,11 +587,7 @@ where ); } - if self - .settings - .rules - .enabled(Rule::BooleanDefaultValueInFunctionDefinition) - { + if self.enabled(Rule::BooleanDefaultValueInFunctionDefinition) { flake8_boolean_trap::rules::check_boolean_default_value_in_function_definition( self, name, @@ -591,11 +596,7 @@ where ); } - if self - .settings - .rules - .enabled(Rule::UnexpectedSpecialMethodSignature) - { + if self.enabled(Rule::UnexpectedSpecialMethodSignature) { pylint::rules::unexpected_special_method_signature( self, stmt, @@ -606,29 +607,37 @@ where ); } - if self.settings.rules.enabled(Rule::FStringDocstring) { + if self.enabled(Rule::FStringDocstring) { flake8_bugbear::rules::f_string_docstring(self, body); } - if self.settings.rules.enabled(Rule::YieldInForLoop) { + if self.enabled(Rule::YieldInForLoop) { pyupgrade::rules::yield_in_for_loop(self, stmt); } - if self.ctx.scope().kind.is_class() { - if self.settings.rules.enabled(Rule::BuiltinAttributeShadowing) { - flake8_builtins::rules::builtin_attribute_shadowing(self, name, stmt); + if self.semantic_model.scope().kind.is_class() { + if self.enabled(Rule::BuiltinAttributeShadowing) { + flake8_builtins::rules::builtin_attribute_shadowing( + self, + name, + AnyShadowing::from(stmt), + ); } } else { - if self.settings.rules.enabled(Rule::BuiltinVariableShadowing) { - flake8_builtins::rules::builtin_variable_shadowing(self, name, stmt); + if self.enabled(Rule::BuiltinVariableShadowing) { + flake8_builtins::rules::builtin_variable_shadowing( + self, + name, + AnyShadowing::from(stmt), + ); } } } Stmt::Return(_) => { - if self.settings.rules.enabled(Rule::ReturnOutsideFunction) { + if self.enabled(Rule::ReturnOutsideFunction) { pyflakes::rules::return_outside_function(self, stmt); } - if self.settings.rules.enabled(Rule::ReturnInInit) { + if self.enabled(Rule::ReturnInInit) { pylint::rules::return_in_init(self, stmt); } } @@ -640,61 +649,45 @@ where body, range: _, }) => { - if self - .settings - .rules - .enabled(Rule::DjangoNullableModelStringField) - { + if self.enabled(Rule::DjangoNullableModelStringField) { self.diagnostics .extend(flake8_django::rules::nullable_model_string_field( self, body, )); } - if self - .settings - .rules - .enabled(Rule::DjangoExcludeWithModelForm) - { + if self.enabled(Rule::DjangoExcludeWithModelForm) { if let Some(diagnostic) = flake8_django::rules::exclude_with_model_form(self, bases, body) { self.diagnostics.push(diagnostic); } } - if self.settings.rules.enabled(Rule::DjangoAllWithModelForm) { + if self.enabled(Rule::DjangoAllWithModelForm) { if let Some(diagnostic) = flake8_django::rules::all_with_model_form(self, bases, body) { self.diagnostics.push(diagnostic); } } - if self - .settings - .rules - .enabled(Rule::DjangoModelWithoutDunderStr) - { + if self.enabled(Rule::DjangoModelWithoutDunderStr) { if let Some(diagnostic) = flake8_django::rules::model_without_dunder_str(self, bases, body, stmt) { self.diagnostics.push(diagnostic); } } - if self - .settings - .rules - .enabled(Rule::DjangoUnorderedBodyContentInModel) - { + if self.enabled(Rule::DjangoUnorderedBodyContentInModel) { flake8_django::rules::unordered_body_content_in_model(self, bases, body); } - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { pylint::rules::global_statement(self, name); } - if self.settings.rules.enabled(Rule::UselessObjectInheritance) { + if self.enabled(Rule::UselessObjectInheritance) { pyupgrade::rules::useless_object_inheritance(self, stmt, name, bases, keywords); } - if self.settings.rules.enabled(Rule::AmbiguousClassName) { + if self.enabled(Rule::AmbiguousClassName) { if let Some(diagnostic) = pycodestyle::rules::ambiguous_class_name(name, || { helpers::identifier_range(stmt, self.locator) }) { @@ -702,7 +695,7 @@ where } } - if self.settings.rules.enabled(Rule::InvalidClassName) { + if self.enabled(Rule::InvalidClassName) { if let Some(diagnostic) = pep8_naming::rules::invalid_class_name(stmt, name, self.locator) { @@ -710,11 +703,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::ErrorSuffixOnExceptionName) - { + if self.enabled(Rule::ErrorSuffixOnExceptionName) { if let Some(diagnostic) = pep8_naming::rules::error_suffix_on_exception_name( stmt, bases, @@ -726,7 +715,7 @@ where } if !self.is_stub { - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::AbstractBaseClassWithoutAbstractMethod, Rule::EmptyMethodWithoutAbstractDecorator, ]) { @@ -736,77 +725,68 @@ where } } if self.is_stub { - if self.settings.rules.enabled(Rule::PassStatementStubBody) { + if self.enabled(Rule::PassStatementStubBody) { flake8_pyi::rules::pass_statement_stub_body(self, body); } - if self.settings.rules.enabled(Rule::PassInClassBody) { + if self.enabled(Rule::PassInClassBody) { flake8_pyi::rules::pass_in_class_body(self, stmt, body); } + if self.enabled(Rule::EllipsisInNonEmptyClassBody) { + flake8_pyi::rules::ellipsis_in_non_empty_class_body(self, stmt, body); + } } - if self - .settings - .rules - .enabled(Rule::PytestIncorrectMarkParenthesesStyle) - { + if self.enabled(Rule::PytestIncorrectMarkParenthesesStyle) { flake8_pytest_style::rules::marks(self, decorator_list); } - if self - .settings - .rules - .enabled(Rule::DuplicateClassFieldDefinition) - { + if self.enabled(Rule::DuplicateClassFieldDefinition) { flake8_pie::rules::duplicate_class_field_definition(self, stmt, body); } - if self.settings.rules.enabled(Rule::NonUniqueEnums) { + if self.enabled(Rule::NonUniqueEnums) { flake8_pie::rules::non_unique_enums(self, stmt, body); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MutableDataclassDefault, Rule::FunctionCallInDataclassDefaultArgument, - ]) && ruff::rules::is_dataclass(self, decorator_list) + ]) && ruff::rules::is_dataclass(&self.semantic_model, decorator_list) { - if self.settings.rules.enabled(Rule::MutableDataclassDefault) { + if self.enabled(Rule::MutableDataclassDefault) { ruff::rules::mutable_dataclass_default(self, body); } - if self - .settings - .rules - .enabled(Rule::FunctionCallInDataclassDefaultArgument) - { + if self.enabled(Rule::FunctionCallInDataclassDefaultArgument) { ruff::rules::function_call_in_dataclass_defaults(self, body); } } - if self.settings.rules.enabled(Rule::FStringDocstring) { + if self.enabled(Rule::FStringDocstring) { flake8_bugbear::rules::f_string_docstring(self, body); } - if self.settings.rules.enabled(Rule::BuiltinVariableShadowing) { - flake8_builtins::rules::builtin_variable_shadowing(self, name, stmt); + if self.enabled(Rule::BuiltinVariableShadowing) { + flake8_builtins::rules::builtin_variable_shadowing( + self, + name, + AnyShadowing::from(stmt), + ); } - if self.settings.rules.enabled(Rule::DuplicateBases) { + if self.enabled(Rule::DuplicateBases) { pylint::rules::duplicate_bases(self, name, bases); } } Stmt::Import(ast::StmtImport { names, range: _ }) => { - if self.settings.rules.enabled(Rule::MultipleImportsOnOneLine) { + if self.enabled(Rule::MultipleImportsOnOneLine) { pycodestyle::rules::multiple_imports_on_one_line(self, stmt, names); } - if self - .settings - .rules - .enabled(Rule::ModuleImportNotAtTopOfFile) - { + if self.enabled(Rule::ModuleImportNotAtTopOfFile) { pycodestyle::rules::module_import_not_at_top_of_file(self, stmt, self.locator); } - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { for name in names.iter() { if let Some(asname) = name.asname.as_ref() { pylint::rules::global_statement(self, asname); @@ -816,33 +796,32 @@ where } } - if self.settings.rules.enabled(Rule::DeprecatedCElementTree) { + if self.enabled(Rule::DeprecatedCElementTree) { pyupgrade::rules::deprecated_c_element_tree(self, stmt); } - if self.settings.rules.enabled(Rule::DeprecatedMockImport) { + if self.enabled(Rule::DeprecatedMockImport) { pyupgrade::rules::deprecated_mock_import(self, stmt); } for alias in names { if &alias.name == "__future__" { let name = alias.asname.as_ref().unwrap_or(&alias.name); + self.add_binding( name, Binding { kind: BindingKind::FutureImportation, - runtime_usage: None, - // Always mark `__future__` imports as used. - synthetic_usage: Some((self.ctx.scope_id, alias.range())), - typing_usage: None, range: alias.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); - if self.settings.rules.enabled(Rule::LateFutureImport) { - if self.ctx.seen_futures_boundary() { + if self.enabled(Rule::LateFutureImport) { + if self.semantic_model.seen_futures_boundary() { self.diagnostics.push(Diagnostic::new( pyflakes::rules::LateFutureImport, stmt.range(), @@ -861,54 +840,51 @@ where name, full_name, }), - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: alias.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); } else { - // Treat explicit re-export as usage (e.g., `from .applications - // import FastAPI as FastAPI`). - let is_explicit_reexport = alias - .asname - .as_ref() - .map_or(false, |asname| asname == &alias.name); - let name = alias.asname.as_ref().unwrap_or(&alias.name); let full_name = &alias.name; self.add_binding( name, Binding { kind: BindingKind::Importation(Importation { name, full_name }), - runtime_usage: None, - synthetic_usage: if is_explicit_reexport { - Some((self.ctx.scope_id, alias.range())) + range: alias.range(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: if alias + .asname + .as_ref() + .map_or(false, |asname| asname == &alias.name) + { + BindingFlags::EXPLICIT_EXPORT } else { - None + BindingFlags::empty() }, - typing_usage: None, - range: alias.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), }, ); if let Some(asname) = &alias.asname { - if self.settings.rules.enabled(Rule::BuiltinVariableShadowing) { + if self.enabled(Rule::BuiltinVariableShadowing) { flake8_builtins::rules::builtin_variable_shadowing( - self, asname, stmt, + self, + asname, + AnyShadowing::from(stmt), ); } } } // flake8-debugger - if self.settings.rules.enabled(Rule::Debugger) { + if self.enabled(Rule::Debugger) { if let Some(diagnostic) = flake8_debugger::rules::debugger_import(stmt, None, &alias.name) { @@ -917,8 +893,8 @@ where } // flake8_tidy_imports - if self.settings.rules.enabled(Rule::BannedApi) { - flake8_tidy_imports::banned_api::name_or_parent_is_banned( + if self.enabled(Rule::BannedApi) { + flake8_tidy_imports::rules::name_or_parent_is_banned( self, &alias.name, alias, @@ -927,14 +903,14 @@ where // pylint if !self.is_stub { - if self.settings.rules.enabled(Rule::UselessImportAlias) { + if self.enabled(Rule::UselessImportAlias) { pylint::rules::useless_import_alias(self, alias); } } - if self.settings.rules.enabled(Rule::ManualFromImport) { + if self.enabled(Rule::ManualFromImport) { pylint::rules::manual_from_import(self, stmt, alias, names); } - if self.settings.rules.enabled(Rule::ImportSelf) { + if self.enabled(Rule::ImportSelf) { if let Some(diagnostic) = pylint::rules::import_self(alias, self.module_path) { @@ -944,11 +920,7 @@ where if let Some(asname) = &alias.asname { let name = alias.name.split('.').last().unwrap(); - if self - .settings - .rules - .enabled(Rule::ConstantImportedAsNonConstant) - { + if self.enabled(Rule::ConstantImportedAsNonConstant) { if let Some(diagnostic) = pep8_naming::rules::constant_imported_as_non_constant( name, asname, alias, stmt, @@ -958,11 +930,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::LowercaseImportedAsNonLowercase) - { + if self.enabled(Rule::LowercaseImportedAsNonLowercase) { if let Some(diagnostic) = pep8_naming::rules::lowercase_imported_as_non_lowercase( name, asname, alias, stmt, @@ -972,11 +940,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::CamelcaseImportedAsLowercase) - { + if self.enabled(Rule::CamelcaseImportedAsLowercase) { if let Some(diagnostic) = pep8_naming::rules::camelcase_imported_as_lowercase( name, asname, alias, stmt, @@ -986,11 +950,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::CamelcaseImportedAsConstant) - { + if self.enabled(Rule::CamelcaseImportedAsConstant) { if let Some(diagnostic) = pep8_naming::rules::camelcase_imported_as_constant( name, asname, alias, stmt, @@ -1000,11 +960,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::CamelcaseImportedAsAcronym) - { + if self.enabled(Rule::CamelcaseImportedAsAcronym) { if let Some(diagnostic) = pep8_naming::rules::camelcase_imported_as_acronym( name, asname, alias, stmt, @@ -1015,7 +971,7 @@ where } } - if self.settings.rules.enabled(Rule::UnconventionalImportAlias) { + if self.enabled(Rule::UnconventionalImportAlias) { if let Some(diagnostic) = flake8_import_conventions::rules::conventional_import_alias( stmt, @@ -1028,7 +984,7 @@ where } } - if self.settings.rules.enabled(Rule::BannedImportAlias) { + if self.enabled(Rule::BannedImportAlias) { if let Some(asname) = &alias.asname { if let Some(diagnostic) = flake8_import_conventions::rules::banned_import_alias( @@ -1043,11 +999,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::PytestIncorrectPytestImport) - { + if self.enabled(Rule::PytestIncorrectPytestImport) { if let Some(diagnostic) = flake8_pytest_style::rules::import( stmt, &alias.name, @@ -1066,15 +1018,11 @@ where }) => { let module = module.as_deref(); let level = level.map(|level| level.to_u32()); - if self - .settings - .rules - .enabled(Rule::ModuleImportNotAtTopOfFile) - { + if self.enabled(Rule::ModuleImportNotAtTopOfFile) { pycodestyle::rules::module_import_not_at_top_of_file(self, stmt, self.locator); } - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { for name in names.iter() { if let Some(asname) = name.asname.as_ref() { pylint::rules::global_statement(self, asname); @@ -1084,40 +1032,38 @@ where } } - if self.settings.rules.enabled(Rule::UnnecessaryFutureImport) + if self.enabled(Rule::UnnecessaryFutureImport) && self.settings.target_version >= PythonVersion::Py37 { if let Some("__future__") = module { pyupgrade::rules::unnecessary_future_import(self, stmt, names); } } - if self.settings.rules.enabled(Rule::DeprecatedMockImport) { + if self.enabled(Rule::DeprecatedMockImport) { pyupgrade::rules::deprecated_mock_import(self, stmt); } - if self.settings.rules.enabled(Rule::DeprecatedCElementTree) { + if self.enabled(Rule::DeprecatedCElementTree) { pyupgrade::rules::deprecated_c_element_tree(self, stmt); } - if self.settings.rules.enabled(Rule::DeprecatedImport) { + if self.enabled(Rule::DeprecatedImport) { pyupgrade::rules::deprecated_import(self, stmt, names, module, level); } - if self.settings.rules.enabled(Rule::UnnecessaryBuiltinImport) { + if self.enabled(Rule::UnnecessaryBuiltinImport) { if let Some(module) = module { pyupgrade::rules::unnecessary_builtin_import(self, stmt, module, names); } } - if self.settings.rules.enabled(Rule::BannedApi) { + if self.enabled(Rule::BannedApi) { if let Some(module) = helpers::resolve_imported_module_path(level, module, self.module_path) { - flake8_tidy_imports::banned_api::name_or_parent_is_banned( - self, &module, stmt, - ); + flake8_tidy_imports::rules::name_or_parent_is_banned(self, &module, stmt); for alias in names { if &alias.name == "*" { continue; } - flake8_tidy_imports::banned_api::name_is_banned( + flake8_tidy_imports::rules::name_is_banned( self, format!("{module}.{}", alias.name), alias, @@ -1126,11 +1072,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::PytestIncorrectPytestImport) - { + if self.enabled(Rule::PytestIncorrectPytestImport) { if let Some(diagnostic) = flake8_pytest_style::rules::import_from(stmt, module, level) { @@ -1141,27 +1083,26 @@ where for alias in names { if let Some("__future__") = module { let name = alias.asname.as_ref().unwrap_or(&alias.name); + self.add_binding( name, Binding { kind: BindingKind::FutureImportation, - runtime_usage: None, - // Always mark `__future__` imports as used. - synthetic_usage: Some((self.ctx.scope_id, alias.range())), - typing_usage: None, range: alias.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); - if self.settings.rules.enabled(Rule::FutureFeatureNotDefined) { + if self.enabled(Rule::FutureFeatureNotDefined) { pyflakes::rules::future_feature_not_defined(self, alias); } - if self.settings.rules.enabled(Rule::LateFutureImport) { - if self.ctx.seen_futures_boundary() { + if self.enabled(Rule::LateFutureImport) { + if self.semantic_model.seen_futures_boundary() { self.diagnostics.push(Diagnostic::new( pyflakes::rules::LateFutureImport, stmt.range(), @@ -1169,16 +1110,12 @@ where } } } else if &alias.name == "*" { - self.ctx + self.semantic_model .scope_mut() .add_star_import(StarImportation { level, module }); - if self - .settings - .rules - .enabled(Rule::UndefinedLocalWithNestedImportStarUsage) - { - let scope = self.ctx.scope(); + if self.enabled(Rule::UndefinedLocalWithNestedImportStarUsage) { + let scope = self.semantic_model.scope(); if !matches!(scope.kind, ScopeKind::Module) { self.diagnostics.push(Diagnostic::new( pyflakes::rules::UndefinedLocalWithNestedImportStarUsage { @@ -1189,11 +1126,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::UndefinedLocalWithImportStar) - { + if self.enabled(Rule::UndefinedLocalWithImportStar) { self.diagnostics.push(Diagnostic::new( pyflakes::rules::UndefinedLocalWithImportStar { name: helpers::format_import_from(level, module), @@ -1203,20 +1136,15 @@ where } } else { if let Some(asname) = &alias.asname { - if self.settings.rules.enabled(Rule::BuiltinVariableShadowing) { + if self.enabled(Rule::BuiltinVariableShadowing) { flake8_builtins::rules::builtin_variable_shadowing( - self, asname, stmt, + self, + asname, + AnyShadowing::from(stmt), ); } } - // Treat explicit re-export as usage (e.g., `from .applications - // import FastAPI as FastAPI`). - let is_explicit_reexport = alias - .asname - .as_ref() - .map_or(false, |asname| asname == &alias.name); - // Given `from foo import bar`, `name` would be "bar" and `full_name` would // be "foo.bar". Given `from foo import bar as baz`, `name` would be "baz" // and `full_name` would be "foo.bar". @@ -1230,38 +1158,39 @@ where name, full_name, }), - runtime_usage: None, - synthetic_usage: if is_explicit_reexport { - Some((self.ctx.scope_id, alias.range())) + range: alias.range(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: if alias + .asname + .as_ref() + .map_or(false, |asname| asname == &alias.name) + { + BindingFlags::EXPLICIT_EXPORT } else { - None + BindingFlags::empty() }, - typing_usage: None, - range: alias.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), }, ); } - if self.settings.rules.enabled(Rule::RelativeImports) { - if let Some(diagnostic) = - flake8_tidy_imports::relative_imports::banned_relative_import( - self, - stmt, - level, - module, - self.module_path, - self.settings.flake8_tidy_imports.ban_relative_imports, - ) - { + if self.enabled(Rule::RelativeImports) { + if let Some(diagnostic) = flake8_tidy_imports::rules::banned_relative_import( + self, + stmt, + level, + module, + self.module_path, + self.settings.flake8_tidy_imports.ban_relative_imports, + ) { self.diagnostics.push(diagnostic); } } // flake8-debugger - if self.settings.rules.enabled(Rule::Debugger) { + if self.enabled(Rule::Debugger) { if let Some(diagnostic) = flake8_debugger::rules::debugger_import(stmt, module, &alias.name) { @@ -1269,7 +1198,7 @@ where } } - if self.settings.rules.enabled(Rule::UnconventionalImportAlias) { + if self.enabled(Rule::UnconventionalImportAlias) { let full_name = helpers::format_import_from_member(level, module, &alias.name); if let Some(diagnostic) = @@ -1284,7 +1213,7 @@ where } } - if self.settings.rules.enabled(Rule::BannedImportAlias) { + if self.enabled(Rule::BannedImportAlias) { if let Some(asname) = &alias.asname { let full_name = helpers::format_import_from_member(level, module, &alias.name); @@ -1302,11 +1231,7 @@ where } if let Some(asname) = &alias.asname { - if self - .settings - .rules - .enabled(Rule::ConstantImportedAsNonConstant) - { + if self.enabled(Rule::ConstantImportedAsNonConstant) { if let Some(diagnostic) = pep8_naming::rules::constant_imported_as_non_constant( &alias.name, @@ -1319,11 +1244,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::LowercaseImportedAsNonLowercase) - { + if self.enabled(Rule::LowercaseImportedAsNonLowercase) { if let Some(diagnostic) = pep8_naming::rules::lowercase_imported_as_non_lowercase( &alias.name, @@ -1336,11 +1257,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::CamelcaseImportedAsLowercase) - { + if self.enabled(Rule::CamelcaseImportedAsLowercase) { if let Some(diagnostic) = pep8_naming::rules::camelcase_imported_as_lowercase( &alias.name, @@ -1353,11 +1270,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::CamelcaseImportedAsConstant) - { + if self.enabled(Rule::CamelcaseImportedAsConstant) { if let Some(diagnostic) = pep8_naming::rules::camelcase_imported_as_constant( &alias.name, @@ -1370,11 +1283,7 @@ where } } - if self - .settings - .rules - .enabled(Rule::CamelcaseImportedAsAcronym) - { + if self.enabled(Rule::CamelcaseImportedAsAcronym) { if let Some(diagnostic) = pep8_naming::rules::camelcase_imported_as_acronym( &alias.name, @@ -1389,14 +1298,14 @@ where // pylint if !self.is_stub { - if self.settings.rules.enabled(Rule::UselessImportAlias) { + if self.enabled(Rule::UselessImportAlias) { pylint::rules::useless_import_alias(self, alias); } } } } - if self.settings.rules.enabled(Rule::ImportSelf) { + if self.enabled(Rule::ImportSelf) { if let Some(diagnostic) = pylint::rules::import_from_self(level, module, names, self.module_path) { @@ -1404,7 +1313,7 @@ where } } - if self.settings.rules.enabled(Rule::BannedImportFrom) { + if self.enabled(Rule::BannedImportFrom) { if let Some(diagnostic) = flake8_import_conventions::rules::banned_import_from( stmt, &helpers::format_import_from(level, module), @@ -1415,17 +1324,17 @@ where } } Stmt::Raise(ast::StmtRaise { exc, .. }) => { - if self.settings.rules.enabled(Rule::RaiseNotImplemented) { + if self.enabled(Rule::RaiseNotImplemented) { if let Some(expr) = exc { pyflakes::rules::raise_not_implemented(self, expr); } } - if self.settings.rules.enabled(Rule::CannotRaiseLiteral) { + if self.enabled(Rule::CannotRaiseLiteral) { if let Some(exc) = exc { flake8_bugbear::rules::cannot_raise_literal(self, exc); } } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::RawStringInException, Rule::FStringInException, Rule::DotFormatInException, @@ -1434,26 +1343,22 @@ where flake8_errmsg::rules::string_in_exception(self, stmt, exc); } } - if self.settings.rules.enabled(Rule::OSErrorAlias) { + if self.enabled(Rule::OSErrorAlias) { if let Some(item) = exc { pyupgrade::rules::os_error_alias_raise(self, item); } } - if self.settings.rules.enabled(Rule::RaiseVanillaClass) { + if self.enabled(Rule::RaiseVanillaClass) { if let Some(expr) = exc { tryceratops::rules::raise_vanilla_class(self, expr); } } - if self.settings.rules.enabled(Rule::RaiseVanillaArgs) { + if self.enabled(Rule::RaiseVanillaArgs) { if let Some(expr) = exc { tryceratops::rules::raise_vanilla_args(self, expr); } } - if self - .settings - .rules - .enabled(Rule::UnnecessaryParenOnRaiseException) - { + if self.enabled(Rule::UnnecessaryParenOnRaiseException) { if let Some(expr) = exc { flake8_raise::rules::unnecessary_paren_on_raise_exception(self, expr); } @@ -1462,7 +1367,7 @@ where Stmt::AugAssign(ast::StmtAugAssign { target, .. }) => { self.handle_node_load(target); - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { if let Expr::Name(ast::ExprName { id, .. }) = target.as_ref() { pylint::rules::global_statement(self, id); } @@ -1474,73 +1379,69 @@ where orelse, range: _, }) => { - if self.settings.rules.enabled(Rule::IfTuple) { + if self.enabled(Rule::IfTuple) { pyflakes::rules::if_tuple(self, stmt, test); } - if self.settings.rules.enabled(Rule::CollapsibleIf) { + if self.enabled(Rule::CollapsibleIf) { flake8_simplify::rules::nested_if_statements( self, stmt, test, body, orelse, - self.ctx.stmt_parent(), + self.semantic_model.stmt_parent(), ); } - if self.settings.rules.enabled(Rule::IfWithSameArms) { - flake8_simplify::rules::if_with_same_arms(self, stmt, self.ctx.stmt_parent()); + if self.enabled(Rule::IfWithSameArms) { + flake8_simplify::rules::if_with_same_arms( + self, + stmt, + self.semantic_model.stmt_parent(), + ); } - if self.settings.rules.enabled(Rule::NeedlessBool) { + if self.enabled(Rule::NeedlessBool) { flake8_simplify::rules::needless_bool(self, stmt); } - if self - .settings - .rules - .enabled(Rule::IfElseBlockInsteadOfDictLookup) - { + if self.enabled(Rule::IfElseBlockInsteadOfDictLookup) { flake8_simplify::rules::manual_dict_lookup( self, stmt, test, body, orelse, - self.ctx.stmt_parent(), + self.semantic_model.stmt_parent(), ); } - if self.settings.rules.enabled(Rule::IfElseBlockInsteadOfIfExp) { + if self.enabled(Rule::IfElseBlockInsteadOfIfExp) { flake8_simplify::rules::use_ternary_operator( self, stmt, - self.ctx.stmt_parent(), + self.semantic_model.stmt_parent(), ); } - if self - .settings - .rules - .enabled(Rule::IfElseBlockInsteadOfDictGet) - { + if self.enabled(Rule::IfElseBlockInsteadOfDictGet) { flake8_simplify::rules::use_dict_get_with_default( self, stmt, test, body, orelse, - self.ctx.stmt_parent(), + self.semantic_model.stmt_parent(), ); } - if self.settings.rules.enabled(Rule::TypeCheckWithoutTypeError) { + if self.enabled(Rule::TypeCheckWithoutTypeError) { tryceratops::rules::type_check_without_type_error( self, body, test, orelse, - self.ctx.stmt_parent(), + self.semantic_model.stmt_parent(), ); } - if self.settings.rules.enabled(Rule::OutdatedVersionBlock) { + if self.enabled(Rule::OutdatedVersionBlock) { pyupgrade::rules::outdated_version_block(self, stmt, test, body, orelse); } - if self.settings.rules.enabled(Rule::CollapsibleElseIf) { + if self.enabled(Rule::CollapsibleElseIf) { if let Some(diagnostic) = pylint::rules::collapsible_else_if(orelse, self.locator) { @@ -1553,22 +1454,22 @@ where msg, range: _, }) => { - if !self.ctx.in_type_checking_block() { - if self.settings.rules.enabled(Rule::Assert) { + if !self.semantic_model.in_type_checking_block() { + if self.enabled(Rule::Assert) { self.diagnostics .push(flake8_bandit::rules::assert_used(stmt)); } } - if self.settings.rules.enabled(Rule::AssertTuple) { + if self.enabled(Rule::AssertTuple) { pyflakes::rules::assert_tuple(self, stmt, test); } - if self.settings.rules.enabled(Rule::AssertFalse) { + if self.enabled(Rule::AssertFalse) { flake8_bugbear::rules::assert_false(self, stmt, test, msg.as_deref()); } - if self.settings.rules.enabled(Rule::PytestAssertAlwaysFalse) { + if self.enabled(Rule::PytestAssertAlwaysFalse) { flake8_pytest_style::rules::assert_falsy(self, stmt, test); } - if self.settings.rules.enabled(Rule::PytestCompositeAssertion) { + if self.enabled(Rule::PytestCompositeAssertion) { flake8_pytest_style::rules::composite_condition( self, stmt, @@ -1576,41 +1477,37 @@ where msg.as_deref(), ); } - if self.settings.rules.enabled(Rule::AssertOnStringLiteral) { + if self.enabled(Rule::AssertOnStringLiteral) { pylint::rules::assert_on_string_literal(self, test); } - if self.settings.rules.enabled(Rule::InvalidMockAccess) { + if self.enabled(Rule::InvalidMockAccess) { pygrep_hooks::rules::non_existent_mock_method(self, test); } } Stmt::With(ast::StmtWith { items, body, .. }) => { - if self.settings.rules.enabled(Rule::AssertRaisesException) { + if self.enabled(Rule::AssertRaisesException) { flake8_bugbear::rules::assert_raises_exception(self, stmt, items); } - if self - .settings - .rules - .enabled(Rule::PytestRaisesWithMultipleStatements) - { + if self.enabled(Rule::PytestRaisesWithMultipleStatements) { flake8_pytest_style::rules::complex_raises(self, stmt, items, body); } - if self.settings.rules.enabled(Rule::MultipleWithStatements) { + if self.enabled(Rule::MultipleWithStatements) { flake8_simplify::rules::multiple_with_statements( self, stmt, body, - self.ctx.stmt_parent(), + self.semantic_model.stmt_parent(), ); } - if self.settings.rules.enabled(Rule::RedefinedLoopName) { + if self.enabled(Rule::RedefinedLoopName) { pylint::rules::redefined_loop_name(self, &Node::Stmt(stmt)); } } Stmt::While(ast::StmtWhile { body, orelse, .. }) => { - if self.settings.rules.enabled(Rule::FunctionUsesLoopVariable) { + if self.enabled(Rule::FunctionUsesLoopVariable) { flake8_bugbear::rules::function_uses_loop_variable(self, &Node::Stmt(stmt)); } - if self.settings.rules.enabled(Rule::UselessElseOnLoop) { + if self.enabled(Rule::UselessElseOnLoop) { pylint::rules::useless_else_on_loop(self, stmt, body, orelse); } } @@ -1628,37 +1525,33 @@ where orelse, .. }) => { - if self.settings.rules.enabled(Rule::UnusedLoopControlVariable) { - self.deferred.for_loops.push(self.ctx.snapshot()); + if self.enabled(Rule::UnusedLoopControlVariable) { + self.deferred.for_loops.push(self.semantic_model.snapshot()); } - if self - .settings - .rules - .enabled(Rule::LoopVariableOverridesIterator) - { + if self.enabled(Rule::LoopVariableOverridesIterator) { flake8_bugbear::rules::loop_variable_overrides_iterator(self, target, iter); } - if self.settings.rules.enabled(Rule::FunctionUsesLoopVariable) { + if self.enabled(Rule::FunctionUsesLoopVariable) { flake8_bugbear::rules::function_uses_loop_variable(self, &Node::Stmt(stmt)); } - if self.settings.rules.enabled(Rule::ReuseOfGroupbyGenerator) { + if self.enabled(Rule::ReuseOfGroupbyGenerator) { flake8_bugbear::rules::reuse_of_groupby_generator(self, target, body, iter); } - if self.settings.rules.enabled(Rule::UselessElseOnLoop) { + if self.enabled(Rule::UselessElseOnLoop) { pylint::rules::useless_else_on_loop(self, stmt, body, orelse); } - if self.settings.rules.enabled(Rule::RedefinedLoopName) { + if self.enabled(Rule::RedefinedLoopName) { pylint::rules::redefined_loop_name(self, &Node::Stmt(stmt)); } if matches!(stmt, Stmt::For(_)) { - if self.settings.rules.enabled(Rule::ReimplementedBuiltin) { + if self.enabled(Rule::ReimplementedBuiltin) { flake8_simplify::rules::convert_for_loop_to_any_all( self, stmt, - self.ctx.sibling_stmt(), + self.semantic_model.sibling_stmt(), ); } - if self.settings.rules.enabled(Rule::InDictKeys) { + if self.enabled(Rule::InDictKeys) { flake8_simplify::rules::key_in_dict_for(self, target, iter); } } @@ -1677,75 +1570,71 @@ where finalbody, range: _, }) => { - if self.settings.rules.enabled(Rule::DefaultExceptNotLast) { + if self.enabled(Rule::DefaultExceptNotLast) { if let Some(diagnostic) = pyflakes::rules::default_except_not_last(handlers, self.locator) { self.diagnostics.push(diagnostic); } } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::DuplicateHandlerException, Rule::DuplicateTryBlockException, ]) { flake8_bugbear::rules::duplicate_exceptions(self, handlers); } - if self - .settings - .rules - .enabled(Rule::RedundantTupleInExceptionHandler) - { + if self.enabled(Rule::RedundantTupleInExceptionHandler) { flake8_bugbear::rules::redundant_tuple_in_exception_handler(self, handlers); } - if self.settings.rules.enabled(Rule::OSErrorAlias) { + if self.enabled(Rule::OSErrorAlias) { pyupgrade::rules::os_error_alias_handlers(self, handlers); } - if self.settings.rules.enabled(Rule::PytestAssertInExcept) { + if self.enabled(Rule::PytestAssertInExcept) { self.diagnostics.extend( flake8_pytest_style::rules::assert_in_exception_handler(handlers), ); } - if self.settings.rules.enabled(Rule::SuppressibleException) { + if self.enabled(Rule::SuppressibleException) { flake8_simplify::rules::suppressible_exception( self, stmt, body, handlers, orelse, finalbody, ); } - if self.settings.rules.enabled(Rule::ReturnInTryExceptFinally) { + if self.enabled(Rule::ReturnInTryExceptFinally) { flake8_simplify::rules::return_in_try_except_finally( self, body, handlers, finalbody, ); } - if self.settings.rules.enabled(Rule::TryConsiderElse) { + if self.enabled(Rule::TryConsiderElse) { tryceratops::rules::try_consider_else(self, body, orelse, handlers); } - if self.settings.rules.enabled(Rule::VerboseRaise) { + if self.enabled(Rule::VerboseRaise) { tryceratops::rules::verbose_raise(self, handlers); } - if self.settings.rules.enabled(Rule::VerboseLogMessage) { + if self.enabled(Rule::VerboseLogMessage) { tryceratops::rules::verbose_log_message(self, handlers); } - if self.settings.rules.enabled(Rule::RaiseWithinTry) { + if self.enabled(Rule::RaiseWithinTry) { tryceratops::rules::raise_within_try(self, body, handlers); } - if self.settings.rules.enabled(Rule::UselessTryExcept) { + if self.enabled(Rule::UselessTryExcept) { tryceratops::rules::useless_try_except(self, handlers); } - if self.settings.rules.enabled(Rule::ErrorInsteadOfException) { + if self.enabled(Rule::ErrorInsteadOfException) { tryceratops::rules::error_instead_of_exception(self, handlers); } } Stmt::Assign(ast::StmtAssign { targets, value, .. }) => { - if self.settings.rules.enabled(Rule::LambdaAssignment) { + if self.enabled(Rule::LambdaAssignment) { if let [target] = &targets[..] { pycodestyle::rules::lambda_assignment(self, target, value, None, stmt); } } - if self.settings.rules.enabled(Rule::AssignmentToOsEnviron) { + if self.enabled(Rule::AssignmentToOsEnviron) { flake8_bugbear::rules::assignment_to_os_environ(self, targets); } - if self.settings.rules.enabled(Rule::HardcodedPasswordString) { + if self.enabled(Rule::HardcodedPasswordString) { if let Some(diagnostic) = flake8_bandit::rules::assign_hardcoded_password_string(value, targets) { @@ -1753,7 +1642,7 @@ where } } - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { for target in targets.iter() { if let Expr::Name(ast::ExprName { id, .. }) = target { pylint::rules::global_statement(self, id); @@ -1761,56 +1650,48 @@ where } } - if self.settings.rules.enabled(Rule::UselessMetaclassType) { + if self.enabled(Rule::UselessMetaclassType) { pyupgrade::rules::useless_metaclass_type(self, stmt, value, targets); } - if self - .settings - .rules - .enabled(Rule::ConvertTypedDictFunctionalToClass) - { + if self.enabled(Rule::ConvertTypedDictFunctionalToClass) { pyupgrade::rules::convert_typed_dict_functional_to_class( self, stmt, targets, value, ); } - if self - .settings - .rules - .enabled(Rule::ConvertNamedTupleFunctionalToClass) - { + if self.enabled(Rule::ConvertNamedTupleFunctionalToClass) { pyupgrade::rules::convert_named_tuple_functional_to_class( self, stmt, targets, value, ); } - if self.settings.rules.enabled(Rule::UnpackedListComprehension) { + if self.enabled(Rule::UnpackedListComprehension) { pyupgrade::rules::unpacked_list_comprehension(self, targets, value); } - if self.settings.rules.enabled(Rule::PandasDfVariableName) { + if self.enabled(Rule::PandasDfVariableName) { if let Some(diagnostic) = pandas_vet::rules::assignment_to_df(targets) { self.diagnostics.push(diagnostic); } } if self.is_stub { - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::UnprefixedTypeParam, Rule::AssignmentDefaultInStub, Rule::UnannotatedAssignmentInStub, ]) { // Ignore assignments in function bodies; those are covered by other rules. - if !self.ctx.scopes().any(|scope| scope.kind.is_function()) { - if self.settings.rules.enabled(Rule::UnprefixedTypeParam) { + if !self + .semantic_model + .scopes() + .any(|scope| scope.kind.is_function()) + { + if self.enabled(Rule::UnprefixedTypeParam) { flake8_pyi::rules::prefix_type_params(self, value, targets); } - if self.settings.rules.enabled(Rule::AssignmentDefaultInStub) { + if self.enabled(Rule::AssignmentDefaultInStub) { flake8_pyi::rules::assignment_default_in_stub(self, targets, value); } - if self - .settings - .rules - .enabled(Rule::UnannotatedAssignmentInStub) - { + if self.enabled(Rule::UnannotatedAssignmentInStub) { flake8_pyi::rules::unannotated_assignment_in_stub( self, targets, value, ); @@ -1825,7 +1706,7 @@ where annotation, .. }) => { - if self.settings.rules.enabled(Rule::LambdaAssignment) { + if self.enabled(Rule::LambdaAssignment) { if let Some(value) = value { pycodestyle::rules::lambda_assignment( self, @@ -1836,11 +1717,7 @@ where ); } } - if self - .settings - .rules - .enabled(Rule::UnintentionalTypeAnnotation) - { + if self.enabled(Rule::UnintentionalTypeAnnotation) { flake8_bugbear::rules::unintentional_type_annotation( self, target, @@ -1850,27 +1727,34 @@ where } if self.is_stub { if let Some(value) = value { - if self.settings.rules.enabled(Rule::AssignmentDefaultInStub) { + if self.enabled(Rule::AssignmentDefaultInStub) { // Ignore assignments in function bodies; those are covered by other rules. - if !self.ctx.scopes().any(|scope| scope.kind.is_function()) { + if !self + .semantic_model + .scopes() + .any(|scope| scope.kind.is_function()) + { flake8_pyi::rules::annotated_assignment_default_in_stub( self, target, value, annotation, ); } } } - if self.ctx.match_typing_expr(annotation, "TypeAlias") { - if self.settings.rules.enabled(Rule::SnakeCaseTypeAlias) { + if self + .semantic_model + .match_typing_expr(annotation, "TypeAlias") + { + if self.enabled(Rule::SnakeCaseTypeAlias) { flake8_pyi::rules::snake_case_type_alias(self, target); } - if self.settings.rules.enabled(Rule::TSuffixedTypeAlias) { + if self.enabled(Rule::TSuffixedTypeAlias) { flake8_pyi::rules::t_suffixed_type_alias(self, target); } } } } Stmt::Delete(ast::StmtDelete { targets, range: _ }) => { - if self.settings.rules.enabled(Rule::GlobalStatement) { + if self.enabled(Rule::GlobalStatement) { for target in targets.iter() { if let Expr::Name(ast::ExprName { id, .. }) = target { pylint::rules::global_statement(self, id); @@ -1879,18 +1763,21 @@ where } } Stmt::Expr(ast::StmtExpr { value, range: _ }) => { - if self.settings.rules.enabled(Rule::UselessComparison) { + if self.enabled(Rule::UselessComparison) { flake8_bugbear::rules::useless_comparison(self, value); } - if self.settings.rules.enabled(Rule::UselessExpression) { + if self.enabled(Rule::UselessExpression) { flake8_bugbear::rules::useless_expression(self, value); } - if self.settings.rules.enabled(Rule::InvalidMockAccess) { + if self.enabled(Rule::InvalidMockAccess) { pygrep_hooks::rules::uncalled_mock_method(self, value); } - if self.settings.rules.enabled(Rule::AsyncioDanglingTask) { + if self.enabled(Rule::NamedExprWithoutContext) { + pylint::rules::named_expr_without_context(self, value); + } + if self.enabled(Rule::AsyncioDanglingTask) { if let Some(diagnostic) = ruff::rules::asyncio_dangling_task(value, |expr| { - self.ctx.resolve_call_path(expr) + self.semantic_model.resolve_call_path(expr) }) { self.diagnostics.push(diagnostic); } @@ -1925,7 +1812,7 @@ where // Function annotations are always evaluated at runtime, unless future annotations // are enabled. - let runtime_annotation = !self.ctx.future_annotations(); + let runtime_annotation = !self.semantic_model.future_annotations(); for arg in &args.posonlyargs { if let Some(expr) = &arg.annotation { @@ -1990,13 +1877,12 @@ where name, Binding { kind: BindingKind::FunctionDefinition, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: stmt.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); @@ -2004,43 +1890,47 @@ where let globals = helpers::extract_globals(body); for (name, stmt) in helpers::extract_globals(body) { if self - .ctx + .semantic_model .global_scope() .get(name) - .map_or(true, |index| self.ctx.bindings[*index].kind.is_annotation()) + .map_or(true, |binding_id| { + self.semantic_model.bindings[binding_id] + .kind + .is_annotation() + }) { - let id = self.ctx.bindings.push(Binding { + let id = self.semantic_model.bindings.push(Binding { kind: BindingKind::Assignment, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: stmt.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }); - self.ctx.global_scope_mut().add(name, id); + self.semantic_model.global_scope_mut().add(name, id); } } let definition = docstrings::extraction::extract_definition( ExtractionTarget::Function, stmt, - self.ctx.definition_id, - &self.ctx.definitions, + self.semantic_model.definition_id, + &self.semantic_model.definitions, ); - self.ctx.push_definition(definition); + self.semantic_model.push_definition(definition); - self.ctx.push_scope(ScopeKind::Function(FunctionDef { - name, - body, - args, - decorator_list, - async_: matches!(stmt, Stmt::AsyncFunctionDef(_)), - globals, - })); + self.semantic_model + .push_scope(ScopeKind::Function(FunctionDef { + name, + body, + args, + decorator_list, + async_: matches!(stmt, Stmt::AsyncFunctionDef(_)), + globals, + })); - self.deferred.functions.push(self.ctx.snapshot()); + self.deferred.functions.push(self.semantic_model.snapshot()); } Stmt::ClassDef(ast::StmtClassDef { body, @@ -2064,34 +1954,37 @@ where let globals = helpers::extract_globals(body); for (name, stmt) in &globals { if self - .ctx + .semantic_model .global_scope() .get(name) - .map_or(true, |index| self.ctx.bindings[*index].kind.is_annotation()) + .map_or(true, |binding_id| { + self.semantic_model.bindings[binding_id] + .kind + .is_annotation() + }) { - let id = self.ctx.bindings.push(Binding { + let id = self.semantic_model.bindings.push(Binding { kind: BindingKind::Assignment, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: stmt.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }); - self.ctx.global_scope_mut().add(name, id); + self.semantic_model.global_scope_mut().add(name, id); } } let definition = docstrings::extraction::extract_definition( ExtractionTarget::Class, stmt, - self.ctx.definition_id, - &self.ctx.definitions, + self.semantic_model.definition_id, + &self.semantic_model.definitions, ); - self.ctx.push_definition(definition); + self.semantic_model.push_definition(definition); - self.ctx.push_scope(ScopeKind::Class(ClassDef { + self.semantic_model.push_scope(ScopeKind::Class(ClassDef { name, bases, keywords, @@ -2117,7 +2010,7 @@ where }) => { let mut handled_exceptions = Exceptions::empty(); for type_ in extract_handled_exceptions(handlers) { - if let Some(call_path) = self.ctx.resolve_call_path(type_) { + if let Some(call_path) = self.semantic_model.resolve_call_path(type_) { if call_path.as_slice() == ["", "NameError"] { handled_exceptions |= Exceptions::NAME_ERROR; } else if call_path.as_slice() == ["", "ModuleNotFoundError"] { @@ -2126,22 +2019,24 @@ where } } - self.ctx.handled_exceptions.push(handled_exceptions); + self.semantic_model + .handled_exceptions + .push(handled_exceptions); - if self.settings.rules.enabled(Rule::JumpStatementInFinally) { + if self.enabled(Rule::JumpStatementInFinally) { flake8_bugbear::rules::jump_statement_in_finally(self, finalbody); } - if self.settings.rules.enabled(Rule::ContinueInFinally) { + if self.enabled(Rule::ContinueInFinally) { if self.settings.target_version <= PythonVersion::Py38 { pylint::rules::continue_in_finally(self, finalbody); } } self.visit_body(body); - self.ctx.handled_exceptions.pop(); + self.semantic_model.handled_exceptions.pop(); - self.ctx.flags |= ContextFlags::EXCEPTION_HANDLER; + self.semantic_model.flags |= SemanticModelFlags::EXCEPTION_HANDLER; for excepthandler in handlers { self.visit_excepthandler(excepthandler); } @@ -2158,8 +2053,8 @@ where // If we're in a class or module scope, then the annotation needs to be // available at runtime. // See: https://docs.python.org/3/reference/simple_stmts.html#annotated-assignment-statements - let runtime_annotation = if self.ctx.future_annotations() { - if matches!(self.ctx.scope().kind, ScopeKind::Class(..)) { + let runtime_annotation = if self.semantic_model.future_annotations() { + if matches!(self.semantic_model.scope().kind, ScopeKind::Class(..)) { let baseclasses = &self .settings .flake8_type_checking @@ -2169,7 +2064,7 @@ where .flake8_type_checking .runtime_evaluated_decorators; flake8_type_checking::helpers::runtime_evaluated( - &self.ctx, + &self.semantic_model, baseclasses, decorators, ) @@ -2178,7 +2073,7 @@ where } } else { matches!( - self.ctx.scope().kind, + self.semantic_model.scope().kind, ScopeKind::Class(..) | ScopeKind::Module ) }; @@ -2189,7 +2084,10 @@ where self.visit_annotation(annotation); } if let Some(expr) = value { - if self.ctx.match_typing_expr(annotation, "TypeAlias") { + if self + .semantic_model + .match_typing_expr(annotation, "TypeAlias") + { self.visit_type_definition(expr); } else { self.visit_expr(expr); @@ -2225,8 +2123,9 @@ where }) => { self.visit_boolean_test(test); - if flake8_type_checking::helpers::is_type_checking_block(&self.ctx, test) { - if self.settings.rules.enabled(Rule::EmptyTypeCheckingBlock) { + if flake8_type_checking::helpers::is_type_checking_block(&self.semantic_model, test) + { + if self.enabled(Rule::EmptyTypeCheckingBlock) { flake8_type_checking::rules::empty_type_checking_block(self, stmt, body); } @@ -2243,45 +2142,44 @@ where // Post-visit. match stmt { Stmt::FunctionDef(_) | Stmt::AsyncFunctionDef(_) => { - self.ctx.pop_scope(); - self.ctx.pop_definition(); + self.semantic_model.pop_scope(); + self.semantic_model.pop_definition(); } Stmt::ClassDef(ast::StmtClassDef { name, .. }) => { - self.ctx.pop_scope(); - self.ctx.pop_definition(); + self.semantic_model.pop_scope(); + self.semantic_model.pop_definition(); self.add_binding( name, Binding { kind: BindingKind::ClassDefinition, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: stmt.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); } _ => {} } - self.ctx.flags = flags_snapshot; - self.ctx.pop_stmt(); + self.semantic_model.flags = flags_snapshot; + self.semantic_model.pop_stmt(); } fn visit_annotation(&mut self, expr: &'b Expr) { - let flags_snapshot = self.ctx.flags; - self.ctx.flags |= ContextFlags::ANNOTATION; + let flags_snapshot = self.semantic_model.flags; + self.semantic_model.flags |= SemanticModelFlags::ANNOTATION; self.visit_type_definition(expr); - self.ctx.flags = flags_snapshot; + self.semantic_model.flags = flags_snapshot; } fn visit_expr(&mut self, expr: &'b Expr) { - if !self.ctx.in_f_string() - && !self.ctx.in_deferred_type_definition() - && self.ctx.in_type_definition() - && self.ctx.future_annotations() + if !self.semantic_model.in_f_string() + && !self.semantic_model.in_deferred_type_definition() + && self.semantic_model.in_type_definition() + && self.semantic_model.future_annotations() { if let Expr::Constant(ast::ExprConstant { value: Constant::Str(value), @@ -2291,21 +2189,21 @@ where self.deferred.string_type_definitions.push(( expr.range(), value, - self.ctx.snapshot(), + self.semantic_model.snapshot(), )); } else { self.deferred .future_type_definitions - .push((expr, self.ctx.snapshot())); + .push((expr, self.semantic_model.snapshot())); } return; } - self.ctx.push_expr(expr); + self.semantic_model.push_expr(expr); // Store the flags prior to any further descent, so that we can restore them after visiting // the node. - let flags_snapshot = self.ctx.flags; + let flags_snapshot = self.semantic_model.flags; // If we're in a boolean test (e.g., the `test` of a `Stmt::If`), but now within a // subexpression (e.g., `a` in `f(a)`), then we're no longer in a boolean test. @@ -2317,40 +2215,36 @@ where .. }) ) { - self.ctx.flags -= ContextFlags::BOOLEAN_TEST; + self.semantic_model.flags -= SemanticModelFlags::BOOLEAN_TEST; } // Pre-visit. match expr { Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => { // Ex) Optional[...], Union[...] - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MissingFutureAnnotationsImport, Rule::NonPEP604Annotation, ]) { if let Some(operator) = - analyze::typing::to_pep604_operator(value, slice, &self.ctx) + analyze::typing::to_pep604_operator(value, slice, &self.semantic_model) { - if self - .settings - .rules - .enabled(Rule::MissingFutureAnnotationsImport) - { + if self.enabled(Rule::MissingFutureAnnotationsImport) { if self.settings.target_version < PythonVersion::Py310 && self.settings.target_version >= PythonVersion::Py37 - && !self.ctx.future_annotations() - && self.ctx.in_annotation() + && !self.semantic_model.future_annotations() + && self.semantic_model.in_annotation() { flake8_future_annotations::rules::missing_future_annotations( self, value, ); } } - if self.settings.rules.enabled(Rule::NonPEP604Annotation) { + if self.enabled(Rule::NonPEP604Annotation) { if self.settings.target_version >= PythonVersion::Py310 || (self.settings.target_version >= PythonVersion::Py37 - && self.ctx.future_annotations() - && self.ctx.in_annotation()) + && self.semantic_model.future_annotations() + && self.semantic_model.in_annotation()) { pyupgrade::rules::use_pep604_annotation( self, expr, slice, operator, @@ -2360,11 +2254,11 @@ where } } - if self.ctx.match_typing_expr(value, "Literal") { - self.ctx.flags |= ContextFlags::LITERAL; + if self.semantic_model.match_typing_expr(value, "Literal") { + self.semantic_model.flags |= SemanticModelFlags::LITERAL; } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::SysVersionSlice3, Rule::SysVersion2, Rule::SysVersion0, @@ -2373,11 +2267,7 @@ where flake8_2020::rules::subscript(self, value, slice); } - if self - .settings - .rules - .enabled(Rule::UncapitalizedEnvironmentVariables) - { + if self.enabled(Rule::UncapitalizedEnvironmentVariables) { flake8_simplify::rules::use_capital_environment_variables(self, expr); } @@ -2394,14 +2284,10 @@ where range: _, }) => { if matches!(ctx, ExprContext::Store) { - let check_too_many_expressions = self - .settings - .rules - .enabled(Rule::ExpressionsInStarAssignment); - let check_two_starred_expressions = self - .settings - .rules - .enabled(Rule::MultipleStarredExpressions); + let check_too_many_expressions = + self.enabled(Rule::ExpressionsInStarAssignment); + let check_two_starred_expressions = + self.enabled(Rule::MultipleStarredExpressions); if let Some(diagnostic) = pyflakes::rules::starred_expressions( elts, check_too_many_expressions, @@ -2415,41 +2301,37 @@ where Expr::Name(ast::ExprName { id, ctx, range: _ }) => { match ctx { ExprContext::Load => { - if self.settings.rules.enabled(Rule::TypingTextStrAlias) { + if self.enabled(Rule::TypingTextStrAlias) { pyupgrade::rules::typing_text_str_alias(self, expr); } - if self.settings.rules.enabled(Rule::NumpyDeprecatedTypeAlias) { + if self.enabled(Rule::NumpyDeprecatedTypeAlias) { numpy::rules::deprecated_type_alias(self, expr); } // Ex) List[...] - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MissingFutureAnnotationsImport, Rule::NonPEP585Annotation, ]) { if let Some(replacement) = - analyze::typing::to_pep585_generic(expr, &self.ctx) + analyze::typing::to_pep585_generic(expr, &self.semantic_model) { - if self - .settings - .rules - .enabled(Rule::MissingFutureAnnotationsImport) - { + if self.enabled(Rule::MissingFutureAnnotationsImport) { if self.settings.target_version < PythonVersion::Py39 && self.settings.target_version >= PythonVersion::Py37 - && !self.ctx.future_annotations() - && self.ctx.in_annotation() + && !self.semantic_model.future_annotations() + && self.semantic_model.in_annotation() { flake8_future_annotations::rules::missing_future_annotations( self, expr, ); } } - if self.settings.rules.enabled(Rule::NonPEP585Annotation) { + if self.enabled(Rule::NonPEP585Annotation) { if self.settings.target_version >= PythonVersion::Py39 || (self.settings.target_version >= PythonVersion::Py37 - && self.ctx.future_annotations() - && self.ctx.in_annotation()) + && self.semantic_model.future_annotations() + && self.semantic_model.in_annotation()) { pyupgrade::rules::use_pep585_annotation( self, @@ -2464,7 +2346,7 @@ where self.handle_node_load(expr); } ExprContext::Store => { - if self.settings.rules.enabled(Rule::AmbiguousVariableName) { + if self.enabled(Rule::AmbiguousVariableName) { if let Some(diagnostic) = pycodestyle::rules::ambiguous_variable_name(id, expr.range()) { @@ -2472,13 +2354,21 @@ where } } - if self.ctx.scope().kind.is_class() { - if self.settings.rules.enabled(Rule::BuiltinAttributeShadowing) { - flake8_builtins::rules::builtin_attribute_shadowing(self, id, expr); + if self.semantic_model.scope().kind.is_class() { + if self.enabled(Rule::BuiltinAttributeShadowing) { + flake8_builtins::rules::builtin_attribute_shadowing( + self, + id, + AnyShadowing::from(expr), + ); } } else { - if self.settings.rules.enabled(Rule::BuiltinVariableShadowing) { - flake8_builtins::rules::builtin_variable_shadowing(self, id, expr); + if self.enabled(Rule::BuiltinVariableShadowing) { + flake8_builtins::rules::builtin_variable_shadowing( + self, + id, + AnyShadowing::from(expr), + ); } } @@ -2487,72 +2377,66 @@ where ExprContext::Del => self.handle_node_delete(expr), } - if self.settings.rules.enabled(Rule::SixPY3) { + if self.enabled(Rule::SixPY3) { flake8_2020::rules::name_or_attribute(self, expr); } - if self - .settings - .rules - .enabled(Rule::LoadBeforeGlobalDeclaration) - { + if self.enabled(Rule::LoadBeforeGlobalDeclaration) { pylint::rules::load_before_global_declaration(self, id, expr); } } Expr::Attribute(ast::ExprAttribute { attr, value, .. }) => { // Ex) typing.List[...] - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MissingFutureAnnotationsImport, Rule::NonPEP585Annotation, ]) { - if let Some(replacement) = analyze::typing::to_pep585_generic(expr, &self.ctx) { - if self - .settings - .rules - .enabled(Rule::MissingFutureAnnotationsImport) - { + if let Some(replacement) = + analyze::typing::to_pep585_generic(expr, &self.semantic_model) + { + if self.enabled(Rule::MissingFutureAnnotationsImport) { if self.settings.target_version < PythonVersion::Py39 && self.settings.target_version >= PythonVersion::Py37 - && !self.ctx.future_annotations() - && self.ctx.in_annotation() + && !self.semantic_model.future_annotations() + && self.semantic_model.in_annotation() { flake8_future_annotations::rules::missing_future_annotations( self, expr, ); } } - if self.settings.rules.enabled(Rule::NonPEP585Annotation) { + if self.enabled(Rule::NonPEP585Annotation) { if self.settings.target_version >= PythonVersion::Py39 || (self.settings.target_version >= PythonVersion::Py37 - && self.ctx.future_annotations() - && self.ctx.in_annotation()) + && self.semantic_model.future_annotations() + && self.semantic_model.in_annotation()) { pyupgrade::rules::use_pep585_annotation(self, expr, &replacement); } } } } - if self.settings.rules.enabled(Rule::DatetimeTimezoneUTC) + if self.enabled(Rule::DatetimeTimezoneUTC) && self.settings.target_version >= PythonVersion::Py311 { pyupgrade::rules::datetime_utc_alias(self, expr); } - if self.settings.rules.enabled(Rule::TypingTextStrAlias) { + if self.enabled(Rule::TypingTextStrAlias) { pyupgrade::rules::typing_text_str_alias(self, expr); } - if self.settings.rules.enabled(Rule::NumpyDeprecatedTypeAlias) { + if self.enabled(Rule::NumpyDeprecatedTypeAlias) { numpy::rules::deprecated_type_alias(self, expr); } - if self.settings.rules.enabled(Rule::DeprecatedMockImport) { + if self.enabled(Rule::DeprecatedMockImport) { pyupgrade::rules::deprecated_mock_attribute(self, expr); } - if self.settings.rules.enabled(Rule::SixPY3) { + if self.enabled(Rule::SixPY3) { flake8_2020::rules::name_or_attribute(self, expr); } - if self.settings.rules.enabled(Rule::BannedApi) { - flake8_tidy_imports::banned_api::banned_attribute_access(self, expr); + if self.enabled(Rule::BannedApi) { + flake8_tidy_imports::rules::banned_attribute_access(self, expr); } - if self.settings.rules.enabled(Rule::PrivateMemberAccess) { + if self.enabled(Rule::PrivateMemberAccess) { flake8_self::rules::private_member_access(self, expr); } pandas_vet::rules::attr(self, attr, value, expr); @@ -2563,7 +2447,7 @@ where keywords, range: _, }) => { - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ // pyflakes Rule::StringDotFormatInvalidFormat, Rule::StringDotFormatExtraNamedArguments, @@ -2585,7 +2469,7 @@ where { if attr == "join" { // "...".join(...) call - if self.settings.rules.enabled(Rule::StaticJoinToFString) { + if self.enabled(Rule::StaticJoinToFString) { flynt::rules::static_join_to_fstring(self, expr, value); } } else if attr == "format" { @@ -2593,11 +2477,7 @@ where let location = expr.range(); match pyflakes::format::FormatSummary::try_from(value.as_ref()) { Err(e) => { - if self - .settings - .rules - .enabled(Rule::StringDotFormatInvalidFormat) - { + if self.enabled(Rule::StringDotFormatInvalidFormat) { self.diagnostics.push(Diagnostic::new( pyflakes::rules::StringDotFormatInvalidFormat { message: pyflakes::format::error_to_string(&e), @@ -2607,19 +2487,13 @@ where } } Ok(summary) => { - if self - .settings - .rules - .enabled(Rule::StringDotFormatExtraNamedArguments) - { + if self.enabled(Rule::StringDotFormatExtraNamedArguments) { pyflakes::rules::string_dot_format_extra_named_arguments( self, &summary, keywords, location, ); } if self - .settings - .rules .enabled(Rule::StringDotFormatExtraPositionalArguments) { pyflakes::rules::string_dot_format_extra_positional_arguments( @@ -2628,31 +2502,23 @@ where ); } - if self - .settings - .rules - .enabled(Rule::StringDotFormatMissingArguments) - { + if self.enabled(Rule::StringDotFormatMissingArguments) { pyflakes::rules::string_dot_format_missing_argument( self, &summary, args, keywords, location, ); } - if self - .settings - .rules - .enabled(Rule::StringDotFormatMixingAutomatic) - { + if self.enabled(Rule::StringDotFormatMixingAutomatic) { pyflakes::rules::string_dot_format_mixing_automatic( self, &summary, location, ); } - if self.settings.rules.enabled(Rule::FormatLiterals) { + if self.enabled(Rule::FormatLiterals) { pyupgrade::rules::format_literals(self, &summary, expr); } - if self.settings.rules.enabled(Rule::FString) { + if self.enabled(Rule::FString) { pyupgrade::rules::f_strings(self, &summary, expr); } } @@ -2663,76 +2529,60 @@ where } // pyupgrade - if self.settings.rules.enabled(Rule::TypeOfPrimitive) { + if self.enabled(Rule::TypeOfPrimitive) { pyupgrade::rules::type_of_primitive(self, expr, func, args); } - if self.settings.rules.enabled(Rule::DeprecatedUnittestAlias) { + if self.enabled(Rule::DeprecatedUnittestAlias) { pyupgrade::rules::deprecated_unittest_alias(self, func); } - if self.settings.rules.enabled(Rule::SuperCallWithParameters) { + if self.enabled(Rule::SuperCallWithParameters) { pyupgrade::rules::super_call_with_parameters(self, expr, func, args); } - if self.settings.rules.enabled(Rule::UnnecessaryEncodeUTF8) { + if self.enabled(Rule::UnnecessaryEncodeUTF8) { pyupgrade::rules::unnecessary_encode_utf8(self, expr, func, args, keywords); } - if self.settings.rules.enabled(Rule::RedundantOpenModes) { + if self.enabled(Rule::RedundantOpenModes) { pyupgrade::rules::redundant_open_modes(self, expr); } - if self.settings.rules.enabled(Rule::NativeLiterals) { + if self.enabled(Rule::NativeLiterals) { pyupgrade::rules::native_literals(self, expr, func, args, keywords); } - if self.settings.rules.enabled(Rule::OpenAlias) { + if self.enabled(Rule::OpenAlias) { pyupgrade::rules::open_alias(self, expr, func); } - if self.settings.rules.enabled(Rule::ReplaceUniversalNewlines) { + if self.enabled(Rule::ReplaceUniversalNewlines) { pyupgrade::rules::replace_universal_newlines(self, func, keywords); } - if self.settings.rules.enabled(Rule::ReplaceStdoutStderr) { + if self.enabled(Rule::ReplaceStdoutStderr) { pyupgrade::rules::replace_stdout_stderr(self, expr, func, args, keywords); } - if self.settings.rules.enabled(Rule::OSErrorAlias) { + if self.enabled(Rule::OSErrorAlias) { pyupgrade::rules::os_error_alias_call(self, func); } - if self.settings.rules.enabled(Rule::NonPEP604Isinstance) + if self.enabled(Rule::NonPEP604Isinstance) && self.settings.target_version >= PythonVersion::Py310 { pyupgrade::rules::use_pep604_isinstance(self, expr, func, args); } // flake8-async - if self - .settings - .rules - .enabled(Rule::BlockingHttpCallInAsyncFunction) - { + if self.enabled(Rule::BlockingHttpCallInAsyncFunction) { flake8_async::rules::blocking_http_call(self, expr); } - if self - .settings - .rules - .enabled(Rule::OpenSleepOrSubprocessInAsyncFunction) - { + if self.enabled(Rule::OpenSleepOrSubprocessInAsyncFunction) { flake8_async::rules::open_sleep_or_subprocess_call(self, expr); } - if self - .settings - .rules - .enabled(Rule::BlockingOsCallInAsyncFunction) - { + if self.enabled(Rule::BlockingOsCallInAsyncFunction) { flake8_async::rules::blocking_os_call(self, expr); } // flake8-print - if self - .settings - .rules - .any_enabled(&[Rule::Print, Rule::PPrint]) - { + if self.any_enabled(&[Rule::Print, Rule::PPrint]) { flake8_print::rules::print_call(self, func, keywords); } // flake8-bandit - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::SuspiciousPickleUsage, Rule::SuspiciousMarshalUsage, Rule::SuspiciousInsecureHashUsage, @@ -2759,103 +2609,90 @@ where } // flake8-bugbear - if self.settings.rules.enabled(Rule::UnreliableCallableCheck) { + if self.enabled(Rule::UnreliableCallableCheck) { flake8_bugbear::rules::unreliable_callable_check(self, expr, func, args); } - if self.settings.rules.enabled(Rule::StripWithMultiCharacters) { + if self.enabled(Rule::StripWithMultiCharacters) { flake8_bugbear::rules::strip_with_multi_characters(self, expr, func, args); } - if self.settings.rules.enabled(Rule::GetAttrWithConstant) { + if self.enabled(Rule::GetAttrWithConstant) { flake8_bugbear::rules::getattr_with_constant(self, expr, func, args); } - if self.settings.rules.enabled(Rule::SetAttrWithConstant) { + if self.enabled(Rule::SetAttrWithConstant) { flake8_bugbear::rules::setattr_with_constant(self, expr, func, args); } - if self.settings.rules.enabled(Rule::UselessContextlibSuppress) { + if self.enabled(Rule::UselessContextlibSuppress) { flake8_bugbear::rules::useless_contextlib_suppress(self, expr, func, args); } - if self - .settings - .rules - .enabled(Rule::StarArgUnpackingAfterKeywordArg) - { + if self.enabled(Rule::StarArgUnpackingAfterKeywordArg) { flake8_bugbear::rules::star_arg_unpacking_after_keyword_arg( self, args, keywords, ); } - if self.settings.rules.enabled(Rule::ZipWithoutExplicitStrict) + if self.enabled(Rule::ZipWithoutExplicitStrict) && self.settings.target_version >= PythonVersion::Py310 { flake8_bugbear::rules::zip_without_explicit_strict(self, expr, func, keywords); } - if self.settings.rules.enabled(Rule::NoExplicitStacklevel) { + if self.enabled(Rule::NoExplicitStacklevel) { flake8_bugbear::rules::no_explicit_stacklevel(self, func, args, keywords); } // flake8-pie - if self.settings.rules.enabled(Rule::UnnecessaryDictKwargs) { + if self.enabled(Rule::UnnecessaryDictKwargs) { flake8_pie::rules::unnecessary_dict_kwargs(self, expr, keywords); } // flake8-bandit - if self.settings.rules.enabled(Rule::ExecBuiltin) { + if self.enabled(Rule::ExecBuiltin) { if let Some(diagnostic) = flake8_bandit::rules::exec_used(expr, func) { self.diagnostics.push(diagnostic); } } - if self.settings.rules.enabled(Rule::BadFilePermissions) { + if self.enabled(Rule::BadFilePermissions) { flake8_bandit::rules::bad_file_permissions(self, func, args, keywords); } - if self - .settings - .rules - .enabled(Rule::RequestWithNoCertValidation) - { + if self.enabled(Rule::RequestWithNoCertValidation) { flake8_bandit::rules::request_with_no_cert_validation( self, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnsafeYAMLLoad) { + if self.enabled(Rule::UnsafeYAMLLoad) { flake8_bandit::rules::unsafe_yaml_load(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::SnmpInsecureVersion) { + if self.enabled(Rule::SnmpInsecureVersion) { flake8_bandit::rules::snmp_insecure_version(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::SnmpWeakCryptography) { + if self.enabled(Rule::SnmpWeakCryptography) { flake8_bandit::rules::snmp_weak_cryptography(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::Jinja2AutoescapeFalse) { + if self.enabled(Rule::Jinja2AutoescapeFalse) { flake8_bandit::rules::jinja2_autoescape_false(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::HardcodedPasswordFuncArg) { + if self.enabled(Rule::HardcodedPasswordFuncArg) { self.diagnostics .extend(flake8_bandit::rules::hardcoded_password_func_arg(keywords)); } - if self.settings.rules.enabled(Rule::HardcodedSQLExpression) { + if self.enabled(Rule::HardcodedSQLExpression) { flake8_bandit::rules::hardcoded_sql_expression(self, expr); } - if self - .settings - .rules - .enabled(Rule::HashlibInsecureHashFunction) - { + if self.enabled(Rule::HashlibInsecureHashFunction) { flake8_bandit::rules::hashlib_insecure_hash_functions( self, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::RequestWithoutTimeout) { + if self.enabled(Rule::RequestWithoutTimeout) { flake8_bandit::rules::request_without_timeout(self, func, args, keywords); } - if self - .settings - .rules - .enabled(Rule::LoggingConfigInsecureListen) - { + if self.enabled(Rule::ParamikoCall) { + flake8_bandit::rules::paramiko_call(self, func); + } + if self.enabled(Rule::LoggingConfigInsecureListen) { flake8_bandit::rules::logging_config_insecure_listen( self, func, args, keywords, ); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::SubprocessWithoutShellEqualsTrue, Rule::SubprocessPopenWithShellEqualsTrue, Rule::CallWithShellEqualsTrue, @@ -2867,60 +2704,52 @@ where } // flake8-comprehensions - if self.settings.rules.enabled(Rule::UnnecessaryGeneratorList) { + if self.enabled(Rule::UnnecessaryGeneratorList) { flake8_comprehensions::rules::unnecessary_generator_list( self, expr, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnnecessaryGeneratorSet) { + if self.enabled(Rule::UnnecessaryGeneratorSet) { flake8_comprehensions::rules::unnecessary_generator_set( self, expr, - self.ctx.expr_parent(), + self.semantic_model.expr_parent(), func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnnecessaryGeneratorDict) { + if self.enabled(Rule::UnnecessaryGeneratorDict) { flake8_comprehensions::rules::unnecessary_generator_dict( self, expr, - self.ctx.expr_parent(), + self.semantic_model.expr_parent(), func, args, keywords, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryListComprehensionSet) - { + if self.enabled(Rule::UnnecessaryListComprehensionSet) { flake8_comprehensions::rules::unnecessary_list_comprehension_set( self, expr, func, args, keywords, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryListComprehensionDict) - { + if self.enabled(Rule::UnnecessaryListComprehensionDict) { flake8_comprehensions::rules::unnecessary_list_comprehension_dict( self, expr, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnnecessaryLiteralSet) { + if self.enabled(Rule::UnnecessaryLiteralSet) { flake8_comprehensions::rules::unnecessary_literal_set( self, expr, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnnecessaryLiteralDict) { + if self.enabled(Rule::UnnecessaryLiteralDict) { flake8_comprehensions::rules::unnecessary_literal_dict( self, expr, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnnecessaryCollectionCall) { + if self.enabled(Rule::UnnecessaryCollectionCall) { flake8_comprehensions::rules::unnecessary_collection_call( self, expr, @@ -2930,110 +2759,74 @@ where &self.settings.flake8_comprehensions, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryLiteralWithinTupleCall) - { + if self.enabled(Rule::UnnecessaryLiteralWithinTupleCall) { flake8_comprehensions::rules::unnecessary_literal_within_tuple_call( self, expr, func, args, keywords, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryLiteralWithinListCall) - { + if self.enabled(Rule::UnnecessaryLiteralWithinListCall) { flake8_comprehensions::rules::unnecessary_literal_within_list_call( self, expr, func, args, keywords, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryLiteralWithinDictCall) - { + if self.enabled(Rule::UnnecessaryLiteralWithinDictCall) { flake8_comprehensions::rules::unnecessary_literal_within_dict_call( self, expr, func, args, keywords, ); } - if self.settings.rules.enabled(Rule::UnnecessaryListCall) { + if self.enabled(Rule::UnnecessaryListCall) { flake8_comprehensions::rules::unnecessary_list_call(self, expr, func, args); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryCallAroundSorted) - { + if self.enabled(Rule::UnnecessaryCallAroundSorted) { flake8_comprehensions::rules::unnecessary_call_around_sorted( self, expr, func, args, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryDoubleCastOrProcess) - { + if self.enabled(Rule::UnnecessaryDoubleCastOrProcess) { flake8_comprehensions::rules::unnecessary_double_cast_or_process( self, expr, func, args, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessarySubscriptReversal) - { + if self.enabled(Rule::UnnecessarySubscriptReversal) { flake8_comprehensions::rules::unnecessary_subscript_reversal( self, expr, func, args, ); } - if self.settings.rules.enabled(Rule::UnnecessaryMap) { + if self.enabled(Rule::UnnecessaryMap) { flake8_comprehensions::rules::unnecessary_map( self, expr, - self.ctx.expr_parent(), + self.semantic_model.expr_parent(), func, args, ); } - if self - .settings - .rules - .enabled(Rule::UnnecessaryComprehensionAnyAll) - { + if self.enabled(Rule::UnnecessaryComprehensionAnyAll) { flake8_comprehensions::rules::unnecessary_comprehension_any_all( self, expr, func, args, keywords, ); } // flake8-boolean-trap - if self - .settings - .rules - .enabled(Rule::BooleanPositionalValueInFunctionCall) - { + if self.enabled(Rule::BooleanPositionalValueInFunctionCall) { flake8_boolean_trap::rules::check_boolean_positional_value_in_function_call( self, args, func, ); } if let Expr::Name(ast::ExprName { id, ctx, range: _ }) = func.as_ref() { if id == "locals" && matches!(ctx, ExprContext::Load) { - let scope = self.ctx.scope_mut(); + let scope = self.semantic_model.scope_mut(); scope.uses_locals = true; } } // flake8-debugger - if self.settings.rules.enabled(Rule::Debugger) { + if self.enabled(Rule::Debugger) { flake8_debugger::rules::debugger_call(self, expr, func); } // pandas-vet - if self - .settings - .rules - .enabled(Rule::PandasUseOfInplaceArgument) - { + if self.enabled(Rule::PandasUseOfInplaceArgument) { self.diagnostics.extend( pandas_vet::rules::inplace_argument(self, expr, func, args, keywords) .into_iter(), @@ -3041,14 +2834,14 @@ where } pandas_vet::rules::call(self, func); - if self.settings.rules.enabled(Rule::PandasUseOfPdMerge) { + if self.enabled(Rule::PandasUseOfPdMerge) { if let Some(diagnostic) = pandas_vet::rules::use_of_pd_merge(func) { self.diagnostics.push(diagnostic); }; } // flake8-datetimez - if self.settings.rules.enabled(Rule::CallDatetimeWithoutTzinfo) { + if self.enabled(Rule::CallDatetimeWithoutTzinfo) { flake8_datetimez::rules::call_datetime_without_tzinfo( self, func, @@ -3057,28 +2850,20 @@ where expr.range(), ); } - if self.settings.rules.enabled(Rule::CallDatetimeToday) { + if self.enabled(Rule::CallDatetimeToday) { flake8_datetimez::rules::call_datetime_today(self, func, expr.range()); } - if self.settings.rules.enabled(Rule::CallDatetimeUtcnow) { + if self.enabled(Rule::CallDatetimeUtcnow) { flake8_datetimez::rules::call_datetime_utcnow(self, func, expr.range()); } - if self - .settings - .rules - .enabled(Rule::CallDatetimeUtcfromtimestamp) - { + if self.enabled(Rule::CallDatetimeUtcfromtimestamp) { flake8_datetimez::rules::call_datetime_utcfromtimestamp( self, func, expr.range(), ); } - if self - .settings - .rules - .enabled(Rule::CallDatetimeNowWithoutTzinfo) - { + if self.enabled(Rule::CallDatetimeNowWithoutTzinfo) { flake8_datetimez::rules::call_datetime_now_without_tzinfo( self, func, @@ -3087,7 +2872,7 @@ where expr.range(), ); } - if self.settings.rules.enabled(Rule::CallDatetimeFromtimestamp) { + if self.enabled(Rule::CallDatetimeFromtimestamp) { flake8_datetimez::rules::call_datetime_fromtimestamp( self, func, @@ -3096,11 +2881,7 @@ where expr.range(), ); } - if self - .settings - .rules - .enabled(Rule::CallDatetimeStrptimeWithoutZone) - { + if self.enabled(Rule::CallDatetimeStrptimeWithoutZone) { flake8_datetimez::rules::call_datetime_strptime_without_zone( self, func, @@ -3108,54 +2889,50 @@ where expr.range(), ); } - if self.settings.rules.enabled(Rule::CallDateToday) { + if self.enabled(Rule::CallDateToday) { flake8_datetimez::rules::call_date_today(self, func, expr.range()); } - if self.settings.rules.enabled(Rule::CallDateFromtimestamp) { + if self.enabled(Rule::CallDateFromtimestamp) { flake8_datetimez::rules::call_date_fromtimestamp(self, func, expr.range()); } // pygrep-hooks - if self.settings.rules.enabled(Rule::Eval) { + if self.enabled(Rule::Eval) { pygrep_hooks::rules::no_eval(self, func); } - if self.settings.rules.enabled(Rule::DeprecatedLogWarn) { + if self.enabled(Rule::DeprecatedLogWarn) { pygrep_hooks::rules::deprecated_log_warn(self, func); } // pylint - if self - .settings - .rules - .enabled(Rule::UnnecessaryDirectLambdaCall) - { + if self.enabled(Rule::UnnecessaryDirectLambdaCall) { pylint::rules::unnecessary_direct_lambda_call(self, expr, func); } - if self.settings.rules.enabled(Rule::SysExitAlias) { + if self.enabled(Rule::SysExitAlias) { pylint::rules::sys_exit_alias(self, func); } - if self.settings.rules.enabled(Rule::BadStrStripCall) { + if self.enabled(Rule::BadStrStripCall) { pylint::rules::bad_str_strip_call(self, func, args); } - if self.settings.rules.enabled(Rule::InvalidEnvvarDefault) { + if self.enabled(Rule::InvalidEnvvarDefault) { pylint::rules::invalid_envvar_default(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::InvalidEnvvarValue) { + if self.enabled(Rule::InvalidEnvvarValue) { pylint::rules::invalid_envvar_value(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::NestedMinMax) { + if self.enabled(Rule::NestedMinMax) { pylint::rules::nested_min_max(self, expr, func, args, keywords); } // flake8-pytest-style - if self.settings.rules.enabled(Rule::PytestPatchWithLambda) { + if self.enabled(Rule::PytestPatchWithLambda) { if let Some(diagnostic) = flake8_pytest_style::rules::patch_with_lambda(func, args, keywords) { self.diagnostics.push(diagnostic); } } - if self.settings.rules.enabled(Rule::PytestUnittestAssertion) { + if self.enabled(Rule::PytestUnittestAssertion) { if let Some(diagnostic) = flake8_pytest_style::rules::unittest_assertion( self, expr, func, args, keywords, ) { @@ -3163,25 +2940,25 @@ where } } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::PytestRaisesWithoutException, Rule::PytestRaisesTooBroad, ]) { flake8_pytest_style::rules::raises_call(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::PytestFailWithoutMessage) { + if self.enabled(Rule::PytestFailWithoutMessage) { flake8_pytest_style::rules::fail_call(self, func, args, keywords); } - if self.settings.rules.enabled(Rule::PairwiseOverZipped) { + if self.enabled(Rule::PairwiseOverZipped) { if self.settings.target_version >= PythonVersion::Py310 { ruff::rules::pairwise_over_zipped(self, func, args); } } // flake8-gettext - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::FStringInGetTextFuncCall, Rule::FormatInGetTextFuncCall, Rule::PrintfInGetTextFuncCall, @@ -3189,43 +2966,35 @@ where func, &self.settings.flake8_gettext.functions_names, ) { - if self.settings.rules.enabled(Rule::FStringInGetTextFuncCall) { + if self.enabled(Rule::FStringInGetTextFuncCall) { self.diagnostics .extend(flake8_gettext::rules::f_string_in_gettext_func_call(args)); } - if self.settings.rules.enabled(Rule::FormatInGetTextFuncCall) { + if self.enabled(Rule::FormatInGetTextFuncCall) { self.diagnostics .extend(flake8_gettext::rules::format_in_gettext_func_call(args)); } - if self.settings.rules.enabled(Rule::PrintfInGetTextFuncCall) { + if self.enabled(Rule::PrintfInGetTextFuncCall) { self.diagnostics .extend(flake8_gettext::rules::printf_in_gettext_func_call(args)); } } // flake8-simplify - if self - .settings - .rules - .enabled(Rule::UncapitalizedEnvironmentVariables) - { + if self.enabled(Rule::UncapitalizedEnvironmentVariables) { flake8_simplify::rules::use_capital_environment_variables(self, expr); } - if self - .settings - .rules - .enabled(Rule::OpenFileWithContextHandler) - { + if self.enabled(Rule::OpenFileWithContextHandler) { flake8_simplify::rules::open_file_with_context_handler(self, func); } - if self.settings.rules.enabled(Rule::DictGetWithNoneDefault) { + if self.enabled(Rule::DictGetWithNoneDefault) { flake8_simplify::rules::dict_get_with_none_default(self, expr); } // flake8-use-pathlib - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::OsPathAbspath, Rule::OsChmod, Rule::OsMkdir, @@ -3251,16 +3020,16 @@ where Rule::BuiltinOpen, Rule::PyPath, ]) { - flake8_use_pathlib::helpers::replaceable_by_pathlib(self, func); + flake8_use_pathlib::rules::replaceable_by_pathlib(self, func); } // numpy - if self.settings.rules.enabled(Rule::NumpyLegacyRandom) { + if self.enabled(Rule::NumpyLegacyRandom) { numpy::rules::numpy_legacy_random(self, func); } // flake8-logging-format - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::LoggingStringFormat, Rule::LoggingPercentFormat, Rule::LoggingStringConcat, @@ -3274,20 +3043,12 @@ where } // pylint logging checker - if self - .settings - .rules - .any_enabled(&[Rule::LoggingTooFewArgs, Rule::LoggingTooManyArgs]) - { + if self.any_enabled(&[Rule::LoggingTooFewArgs, Rule::LoggingTooManyArgs]) { pylint::rules::logging_call(self, func, args, keywords); } // flake8-django - if self - .settings - .rules - .enabled(Rule::DjangoLocalsInRenderFunction) - { + if self.enabled(Rule::DjangoLocalsInRenderFunction) { flake8_django::rules::locals_in_render_function(self, func, args, keywords); } } @@ -3296,59 +3057,63 @@ where values, range: _, }) => { - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MultiValueRepeatedKeyLiteral, Rule::MultiValueRepeatedKeyVariable, ]) { pyflakes::rules::repeated_keys(self, keys, values); } - if self.settings.rules.enabled(Rule::UnnecessarySpread) { + if self.enabled(Rule::UnnecessarySpread) { flake8_pie::rules::unnecessary_spread(self, keys, values); } } + Expr::Set(ast::ExprSet { elts, range: _ }) => { + if self.enabled(Rule::DuplicateValue) { + pylint::rules::duplicate_value(self, elts); + } + } Expr::Yield(_) => { - if self.settings.rules.enabled(Rule::YieldOutsideFunction) { + if self.enabled(Rule::YieldOutsideFunction) { pyflakes::rules::yield_outside_function(self, expr); } - if self.settings.rules.enabled(Rule::YieldInInit) { + if self.enabled(Rule::YieldInInit) { pylint::rules::yield_in_init(self, expr); } } Expr::YieldFrom(_) => { - if self.settings.rules.enabled(Rule::YieldOutsideFunction) { + if self.enabled(Rule::YieldOutsideFunction) { pyflakes::rules::yield_outside_function(self, expr); } - if self.settings.rules.enabled(Rule::YieldInInit) { + if self.enabled(Rule::YieldInInit) { pylint::rules::yield_in_init(self, expr); } } Expr::Await(_) => { - if self.settings.rules.enabled(Rule::YieldOutsideFunction) { + if self.enabled(Rule::YieldOutsideFunction) { pyflakes::rules::yield_outside_function(self, expr); } - if self.settings.rules.enabled(Rule::AwaitOutsideAsync) { + if self.enabled(Rule::AwaitOutsideAsync) { pylint::rules::await_outside_async(self, expr); } } Expr::JoinedStr(ast::ExprJoinedStr { values, range: _ }) => { - if self - .settings - .rules - .enabled(Rule::FStringMissingPlaceholders) - { + if self.enabled(Rule::FStringMissingPlaceholders) { pyflakes::rules::f_string_missing_placeholders(expr, values, self); } - if self.settings.rules.enabled(Rule::HardcodedSQLExpression) { + if self.enabled(Rule::HardcodedSQLExpression) { flake8_bandit::rules::hardcoded_sql_expression(self, expr); } + if self.enabled(Rule::ExplicitFStringTypeConversion) { + ruff::rules::explicit_f_string_type_conversion(self, expr, values); + } } Expr::BinOp(ast::ExprBinOp { left, op: Operator::RShift, .. }) => { - if self.settings.rules.enabled(Rule::InvalidPrintSyntax) { + if self.enabled(Rule::InvalidPrintSyntax) { pyflakes::rules::invalid_print_syntax(self, left); } } @@ -3363,7 +3128,7 @@ where .. }) = left.as_ref() { - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::PercentFormatInvalidFormat, Rule::PercentFormatExpectedMapping, Rule::PercentFormatExpectedSequence, @@ -3380,11 +3145,7 @@ where typ: CFormatErrorType::UnsupportedFormatChar(c), .. }) => { - if self - .settings - .rules - .enabled(Rule::PercentFormatUnsupportedFormatCharacter) - { + if self.enabled(Rule::PercentFormatUnsupportedFormatCharacter) { self.diagnostics.push(Diagnostic::new( pyflakes::rules::PercentFormatUnsupportedFormatCharacter { char: c, @@ -3394,11 +3155,7 @@ where } } Err(e) => { - if self - .settings - .rules - .enabled(Rule::PercentFormatInvalidFormat) - { + if self.enabled(Rule::PercentFormatInvalidFormat) { self.diagnostics.push(Diagnostic::new( pyflakes::rules::PercentFormatInvalidFormat { message: e.to_string(), @@ -3408,65 +3165,37 @@ where } } Ok(summary) => { - if self - .settings - .rules - .enabled(Rule::PercentFormatExpectedMapping) - { + if self.enabled(Rule::PercentFormatExpectedMapping) { pyflakes::rules::percent_format_expected_mapping( self, &summary, right, location, ); } - if self - .settings - .rules - .enabled(Rule::PercentFormatExpectedSequence) - { + if self.enabled(Rule::PercentFormatExpectedSequence) { pyflakes::rules::percent_format_expected_sequence( self, &summary, right, location, ); } - if self - .settings - .rules - .enabled(Rule::PercentFormatExtraNamedArguments) - { + if self.enabled(Rule::PercentFormatExtraNamedArguments) { pyflakes::rules::percent_format_extra_named_arguments( self, &summary, right, location, ); } - if self - .settings - .rules - .enabled(Rule::PercentFormatMissingArgument) - { + if self.enabled(Rule::PercentFormatMissingArgument) { pyflakes::rules::percent_format_missing_arguments( self, &summary, right, location, ); } - if self - .settings - .rules - .enabled(Rule::PercentFormatMixedPositionalAndNamed) - { + if self.enabled(Rule::PercentFormatMixedPositionalAndNamed) { pyflakes::rules::percent_format_mixed_positional_and_named( self, &summary, location, ); } - if self - .settings - .rules - .enabled(Rule::PercentFormatPositionalCountMismatch) - { + if self.enabled(Rule::PercentFormatPositionalCountMismatch) { pyflakes::rules::percent_format_positional_count_mismatch( self, &summary, right, location, ); } - if self - .settings - .rules - .enabled(Rule::PercentFormatStarRequiresSequence) - { + if self.enabled(Rule::PercentFormatStarRequiresSequence) { pyflakes::rules::percent_format_star_requires_sequence( self, &summary, right, location, ); @@ -3475,13 +3204,13 @@ where } } - if self.settings.rules.enabled(Rule::PrintfStringFormatting) { + if self.enabled(Rule::PrintfStringFormatting) { pyupgrade::rules::printf_string_formatting(self, expr, right, self.locator); } - if self.settings.rules.enabled(Rule::BadStringFormatType) { + if self.enabled(Rule::BadStringFormatType) { pylint::rules::bad_string_format_type(self, expr, right); } - if self.settings.rules.enabled(Rule::HardcodedSQLExpression) { + if self.enabled(Rule::HardcodedSQLExpression) { flake8_bandit::rules::hardcoded_sql_expression(self, expr); } } @@ -3489,23 +3218,15 @@ where Expr::BinOp(ast::ExprBinOp { op: Operator::Add, .. }) => { - if self - .settings - .rules - .enabled(Rule::ExplicitStringConcatenation) - { + if self.enabled(Rule::ExplicitStringConcatenation) { if let Some(diagnostic) = flake8_implicit_str_concat::rules::explicit(expr) { self.diagnostics.push(diagnostic); } } - if self - .settings - .rules - .enabled(Rule::CollectionLiteralConcatenation) - { + if self.enabled(Rule::CollectionLiteralConcatenation) { ruff::rules::collection_literal_concatenation(self, expr); } - if self.settings.rules.enabled(Rule::HardcodedSQLExpression) { + if self.enabled(Rule::HardcodedSQLExpression) { flake8_bandit::rules::hardcoded_sql_expression(self, expr); } } @@ -3514,9 +3235,9 @@ where .. }) => { if self.is_stub { - if self.settings.rules.enabled(Rule::DuplicateUnionMember) - && self.ctx.in_type_definition() - && self.ctx.expr_parent().map_or(true, |parent| { + if self.enabled(Rule::DuplicateUnionMember) + && self.semantic_model.in_type_definition() + && self.semantic_model.expr_parent().map_or(true, |parent| { !matches!( parent, Expr::BinOp(ast::ExprBinOp { @@ -3535,8 +3256,8 @@ where operand, range: _, }) => { - let check_not_in = self.settings.rules.enabled(Rule::NotInTest); - let check_not_is = self.settings.rules.enabled(Rule::NotIsTest); + let check_not_in = self.enabled(Rule::NotInTest); + let check_not_is = self.enabled(Rule::NotIsTest); if check_not_in || check_not_is { pycodestyle::rules::not_tests( self, @@ -3548,17 +3269,17 @@ where ); } - if self.settings.rules.enabled(Rule::UnaryPrefixIncrement) { + if self.enabled(Rule::UnaryPrefixIncrement) { flake8_bugbear::rules::unary_prefix_increment(self, expr, *op, operand); } - if self.settings.rules.enabled(Rule::NegateEqualOp) { + if self.enabled(Rule::NegateEqualOp) { flake8_simplify::rules::negation_with_equal_op(self, expr, *op, operand); } - if self.settings.rules.enabled(Rule::NegateNotEqualOp) { + if self.enabled(Rule::NegateNotEqualOp) { flake8_simplify::rules::negation_with_not_equal_op(self, expr, *op, operand); } - if self.settings.rules.enabled(Rule::DoubleNegation) { + if self.enabled(Rule::DoubleNegation) { flake8_simplify::rules::double_negation(self, expr, *op, operand); } } @@ -3568,9 +3289,8 @@ where comparators, range: _, }) => { - let check_none_comparisons = self.settings.rules.enabled(Rule::NoneComparison); - let check_true_false_comparisons = - self.settings.rules.enabled(Rule::TrueFalseComparison); + let check_none_comparisons = self.enabled(Rule::NoneComparison); + let check_true_false_comparisons = self.enabled(Rule::TrueFalseComparison); if check_none_comparisons || check_true_false_comparisons { pycodestyle::rules::literal_comparisons( self, @@ -3583,7 +3303,7 @@ where ); } - if self.settings.rules.enabled(Rule::IsLiteral) { + if self.enabled(Rule::IsLiteral) { pyflakes::rules::invalid_literal_comparison( self, left, @@ -3593,11 +3313,11 @@ where ); } - if self.settings.rules.enabled(Rule::TypeComparison) { + if self.enabled(Rule::TypeComparison) { pycodestyle::rules::type_comparison(self, expr, ops, comparators); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::SysVersionCmpStr3, Rule::SysVersionInfo0Eq3, Rule::SysVersionInfo1CmpInt, @@ -3607,7 +3327,7 @@ where flake8_2020::rules::compare(self, left, ops, comparators); } - if self.settings.rules.enabled(Rule::HardcodedPasswordString) { + if self.enabled(Rule::HardcodedPasswordString) { self.diagnostics.extend( flake8_bandit::rules::compare_to_hardcoded_password_string( left, @@ -3616,28 +3336,28 @@ where ); } - if self.settings.rules.enabled(Rule::ComparisonOfConstant) { + if self.enabled(Rule::ComparisonOfConstant) { pylint::rules::comparison_of_constant(self, left, ops, comparators); } - if self.settings.rules.enabled(Rule::CompareToEmptyString) { + if self.enabled(Rule::CompareToEmptyString) { pylint::rules::compare_to_empty_string(self, left, ops, comparators); } - if self.settings.rules.enabled(Rule::MagicValueComparison) { + if self.enabled(Rule::MagicValueComparison) { pylint::rules::magic_value_comparison(self, left, comparators); } - if self.settings.rules.enabled(Rule::InDictKeys) { + if self.enabled(Rule::InDictKeys) { flake8_simplify::rules::key_in_dict_compare(self, expr, left, ops, comparators); } - if self.settings.rules.enabled(Rule::YodaConditions) { + if self.enabled(Rule::YodaConditions) { flake8_simplify::rules::yoda_conditions(self, expr, left, ops, comparators); } if self.is_stub { - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::UnrecognizedPlatformCheck, Rule::UnrecognizedPlatformName, ]) { @@ -3650,7 +3370,7 @@ where ); } - if self.settings.rules.enabled(Rule::BadVersionInfoComparison) { + if self.enabled(Rule::BadVersionInfoComparison) { flake8_pyi::rules::bad_version_info_comparison( self, expr, @@ -3666,28 +3386,24 @@ where kind, range: _, }) => { - if self.ctx.in_type_definition() - && !self.ctx.in_literal() - && !self.ctx.in_f_string() + if self.semantic_model.in_type_definition() + && !self.semantic_model.in_literal() + && !self.semantic_model.in_f_string() { self.deferred.string_type_definitions.push(( expr.range(), value, - self.ctx.snapshot(), + self.semantic_model.snapshot(), )); } - if self - .settings - .rules - .enabled(Rule::HardcodedBindAllInterfaces) - { + if self.enabled(Rule::HardcodedBindAllInterfaces) { if let Some(diagnostic) = flake8_bandit::rules::hardcoded_bind_all_interfaces(value, expr.range()) { self.diagnostics.push(diagnostic); } } - if self.settings.rules.enabled(Rule::HardcodedTempFile) { + if self.enabled(Rule::HardcodedTempFile) { if let Some(diagnostic) = flake8_bandit::rules::hardcoded_tmp_directory( expr, value, @@ -3696,7 +3412,7 @@ where self.diagnostics.push(diagnostic); } } - if self.settings.rules.enabled(Rule::UnicodeKindPrefix) { + if self.enabled(Rule::UnicodeKindPrefix) { pyupgrade::rules::unicode_kind_prefix(self, expr, kind.as_deref()); } } @@ -3707,7 +3423,7 @@ where range: _, }, ) => { - if self.settings.rules.enabled(Rule::ReimplementedListBuiltin) { + if self.enabled(Rule::ReimplementedListBuiltin) { flake8_pie::rules::reimplemented_list_builtin(self, lambda); } @@ -3718,7 +3434,7 @@ where for expr in &args.defaults { self.visit_expr(expr); } - self.ctx + self.semantic_model .push_scope(ScopeKind::Lambda(Lambda { args, body })); } Expr::IfExp(ast::ExprIfExp { @@ -3727,17 +3443,17 @@ where orelse, range: _, }) => { - if self.settings.rules.enabled(Rule::IfExprWithTrueFalse) { + if self.enabled(Rule::IfExprWithTrueFalse) { flake8_simplify::rules::explicit_true_false_in_ifexpr( self, expr, test, body, orelse, ); } - if self.settings.rules.enabled(Rule::IfExprWithFalseTrue) { + if self.enabled(Rule::IfExprWithFalseTrue) { flake8_simplify::rules::explicit_false_true_in_ifexpr( self, expr, test, body, orelse, ); } - if self.settings.rules.enabled(Rule::IfExprWithTwistedArms) { + if self.enabled(Rule::IfExprWithTwistedArms) { flake8_simplify::rules::twisted_arms_in_ifexpr(self, expr, test, body, orelse); } } @@ -3751,15 +3467,15 @@ where generators, range: _, }) => { - if self.settings.rules.enabled(Rule::UnnecessaryComprehension) { + if self.enabled(Rule::UnnecessaryComprehension) { flake8_comprehensions::rules::unnecessary_list_set_comprehension( self, expr, elt, generators, ); } - if self.settings.rules.enabled(Rule::FunctionUsesLoopVariable) { + if self.enabled(Rule::FunctionUsesLoopVariable) { flake8_bugbear::rules::function_uses_loop_variable(self, &Node::Expr(expr)); } - if self.settings.rules.enabled(Rule::InDictKeys) { + if self.enabled(Rule::InDictKeys) { for generator in generators { flake8_simplify::rules::key_in_dict_for( self, @@ -3775,15 +3491,15 @@ where generators, range: _, }) => { - if self.settings.rules.enabled(Rule::UnnecessaryComprehension) { + if self.enabled(Rule::UnnecessaryComprehension) { flake8_comprehensions::rules::unnecessary_dict_comprehension( self, expr, key, value, generators, ); } - if self.settings.rules.enabled(Rule::FunctionUsesLoopVariable) { + if self.enabled(Rule::FunctionUsesLoopVariable) { flake8_bugbear::rules::function_uses_loop_variable(self, &Node::Expr(expr)); } - if self.settings.rules.enabled(Rule::InDictKeys) { + if self.enabled(Rule::InDictKeys) { for generator in generators { flake8_simplify::rules::key_in_dict_for( self, @@ -3798,10 +3514,10 @@ where elt: _, range: _, }) => { - if self.settings.rules.enabled(Rule::FunctionUsesLoopVariable) { + if self.enabled(Rule::FunctionUsesLoopVariable) { flake8_bugbear::rules::function_uses_loop_variable(self, &Node::Expr(expr)); } - if self.settings.rules.enabled(Rule::InDictKeys) { + if self.enabled(Rule::InDictKeys) { for generator in generators { flake8_simplify::rules::key_in_dict_for( self, @@ -3816,42 +3532,31 @@ where values, range: _, }) => { - if self.settings.rules.enabled(Rule::RepeatedIsinstanceCalls) { + if self.enabled(Rule::RepeatedIsinstanceCalls) { pylint::rules::repeated_isinstance_calls(self, expr, *op, values); } - if self.settings.rules.enabled(Rule::MultipleStartsEndsWith) { + if self.enabled(Rule::MultipleStartsEndsWith) { flake8_pie::rules::multiple_starts_ends_with(self, expr); } - if self.settings.rules.enabled(Rule::DuplicateIsinstanceCall) { + if self.enabled(Rule::DuplicateIsinstanceCall) { flake8_simplify::rules::duplicate_isinstance_call(self, expr); } - if self.settings.rules.enabled(Rule::CompareWithTuple) { + if self.enabled(Rule::CompareWithTuple) { flake8_simplify::rules::compare_with_tuple(self, expr); } - if self.settings.rules.enabled(Rule::ExprAndNotExpr) { + if self.enabled(Rule::ExprAndNotExpr) { flake8_simplify::rules::expr_and_not_expr(self, expr); } - if self.settings.rules.enabled(Rule::ExprOrNotExpr) { + if self.enabled(Rule::ExprOrNotExpr) { flake8_simplify::rules::expr_or_not_expr(self, expr); } - if self.settings.rules.enabled(Rule::ExprOrTrue) { + if self.enabled(Rule::ExprOrTrue) { flake8_simplify::rules::expr_or_true(self, expr); } - if self.settings.rules.enabled(Rule::ExprAndFalse) { + if self.enabled(Rule::ExprAndFalse) { flake8_simplify::rules::expr_and_false(self, expr); } } - Expr::FormattedValue(ast::ExprFormattedValue { - value, conversion, .. - }) => { - if self - .settings - .rules - .enabled(Rule::ExplicitFStringTypeConversion) - { - ruff::rules::explicit_f_string_type_conversion(self, value, *conversion); - } - } _ => {} }; @@ -3886,7 +3591,9 @@ where self.visit_expr(value); } Expr::Lambda(_) => { - self.deferred.lambdas.push((expr, self.ctx.snapshot())); + self.deferred + .lambdas + .push((expr, self.semantic_model.snapshot())); } Expr::IfExp(ast::ExprIfExp { test, @@ -3904,64 +3611,92 @@ where keywords, range: _, }) => { - let callable = self.ctx.resolve_call_path(func).and_then(|call_path| { - if self.ctx.match_typing_call_path(&call_path, "cast") { - Some(Callable::Cast) - } else if self.ctx.match_typing_call_path(&call_path, "NewType") { - Some(Callable::NewType) - } else if self.ctx.match_typing_call_path(&call_path, "TypeVar") { - Some(Callable::TypeVar) - } else if self.ctx.match_typing_call_path(&call_path, "NamedTuple") { - Some(Callable::NamedTuple) - } else if self.ctx.match_typing_call_path(&call_path, "TypedDict") { - Some(Callable::TypedDict) - } else if [ - "Arg", - "DefaultArg", - "NamedArg", - "DefaultNamedArg", - "VarArg", - "KwArg", - ] - .iter() - .any(|target| call_path.as_slice() == ["mypy_extensions", target]) - { - Some(Callable::MypyExtension) - } else if call_path.as_slice() == ["", "bool"] { - Some(Callable::Bool) - } else { - None - } - }); + let callable = self + .semantic_model + .resolve_call_path(func) + .and_then(|call_path| { + if self + .semantic_model + .match_typing_call_path(&call_path, "cast") + { + Some(Callable::Cast) + } else if self + .semantic_model + .match_typing_call_path(&call_path, "NewType") + { + Some(Callable::NewType) + } else if self + .semantic_model + .match_typing_call_path(&call_path, "TypeVar") + { + Some(Callable::TypeVar) + } else if self + .semantic_model + .match_typing_call_path(&call_path, "NamedTuple") + { + Some(Callable::NamedTuple) + } else if self + .semantic_model + .match_typing_call_path(&call_path, "TypedDict") + { + Some(Callable::TypedDict) + } else if [ + "Arg", + "DefaultArg", + "NamedArg", + "DefaultNamedArg", + "VarArg", + "KwArg", + ] + .iter() + .any(|target| call_path.as_slice() == ["mypy_extensions", target]) + { + Some(Callable::MypyExtension) + } else if call_path.as_slice() == ["", "bool"] { + Some(Callable::Bool) + } else { + None + } + }); match callable { Some(Callable::Bool) => { self.visit_expr(func); - if !args.is_empty() { - self.visit_boolean_test(&args[0]); + let mut args = args.iter(); + if let Some(arg) = args.next() { + self.visit_boolean_test(arg); } - for expr in args.iter().skip(1) { - self.visit_expr(expr); + for arg in args { + self.visit_expr(arg); } } Some(Callable::Cast) => { self.visit_expr(func); - if !args.is_empty() { - self.visit_type_definition(&args[0]); + let mut args = args.iter(); + if let Some(arg) = args.next() { + self.visit_type_definition(arg); } - for expr in args.iter().skip(1) { - self.visit_expr(expr); + for arg in args { + self.visit_expr(arg); } } Some(Callable::NewType) => { self.visit_expr(func); - for expr in args.iter().skip(1) { - self.visit_type_definition(expr); + let mut args = args.iter(); + if let Some(arg) = args.next() { + self.visit_non_type_definition(arg); + } + for arg in args { + self.visit_type_definition(arg); } } Some(Callable::TypeVar) => { self.visit_expr(func); - for expr in args.iter().skip(1) { - self.visit_type_definition(expr); + let mut args = args.iter(); + if let Some(arg) = args.next() { + self.visit_non_type_definition(arg); + } + for arg in args { + self.visit_type_definition(arg); } for keyword in keywords { let Keyword { @@ -3982,24 +3717,30 @@ where self.visit_expr(func); // Ex) NamedTuple("a", [("a", int)]) - if args.len() > 1 { - match &args[1] { - Expr::List(ast::ExprList { elts, .. }) - | Expr::Tuple(ast::ExprTuple { elts, .. }) => { - for elt in elts { - match elt { - Expr::List(ast::ExprList { elts, .. }) - | Expr::Tuple(ast::ExprTuple { elts, .. }) => { - if elts.len() == 2 { - self.visit_non_type_definition(&elts[0]); - self.visit_type_definition(&elts[1]); - } - } - _ => {} + let mut args = args.iter(); + if let Some(arg) = args.next() { + self.visit_non_type_definition(arg); + } + for arg in args { + if let Expr::List(ast::ExprList { elts, .. }) + | Expr::Tuple(ast::ExprTuple { elts, .. }) = arg + { + for elt in elts { + match elt { + Expr::List(ast::ExprList { elts, .. }) + | Expr::Tuple(ast::ExprTuple { elts, .. }) + if elts.len() == 2 => + { + self.visit_non_type_definition(&elts[0]); + self.visit_type_definition(&elts[1]); + } + _ => { + self.visit_non_type_definition(elt); } } } - _ => {} + } else { + self.visit_non_type_definition(arg); } } @@ -4013,12 +3754,16 @@ where self.visit_expr(func); // Ex) TypedDict("a", {"a": int}) - if args.len() > 1 { + let mut args = args.iter(); + if let Some(arg) = args.next() { + self.visit_non_type_definition(arg); + } + for arg in args { if let Expr::Dict(ast::ExprDict { keys, values, range: _, - }) = &args[1] + }) = arg { for key in keys.iter().flatten() { self.visit_non_type_definition(key); @@ -4026,6 +3771,8 @@ where for value in values { self.visit_type_definition(value); } + } else { + self.visit_non_type_definition(arg); } } @@ -4038,11 +3785,12 @@ where Some(Callable::MypyExtension) => { self.visit_expr(func); - if let Some(arg) = args.first() { + let mut args = args.iter(); + if let Some(arg) = args.next() { // Ex) DefaultNamedArg(bool | None, name="some_prop_name") self.visit_type_definition(arg); - for arg in args.iter().skip(1) { + for arg in args { self.visit_non_type_definition(arg); } for keyword in keywords { @@ -4090,15 +3838,15 @@ where // `obj["foo"]["bar"]`, we need to avoid treating the `obj["foo"]` // portion as an annotation, despite having `ExprContext::Load`. Thus, we track // the `ExprContext` at the top-level. - if self.ctx.in_subscript() { + if self.semantic_model.in_subscript() { visitor::walk_expr(self, expr); } else if matches!(ctx, ExprContext::Store | ExprContext::Del) { - self.ctx.flags |= ContextFlags::SUBSCRIPT; + self.semantic_model.flags |= SemanticModelFlags::SUBSCRIPT; visitor::walk_expr(self, expr); } else { match analyze::typing::match_annotated_subscript( value, - &self.ctx, + &self.semantic_model, self.settings.typing_modules.iter().map(String::as_str), ) { Some(subscript) => { @@ -4141,7 +3889,7 @@ where } } Expr::JoinedStr(_) => { - self.ctx.flags |= ContextFlags::F_STRING; + self.semantic_model.flags |= SemanticModelFlags::F_STRING; visitor::walk_expr(self, expr); } _ => visitor::walk_expr(self, expr), @@ -4154,13 +3902,13 @@ where | Expr::ListComp(_) | Expr::DictComp(_) | Expr::SetComp(_) => { - self.ctx.pop_scope(); + self.semantic_model.pop_scope(); } _ => {} }; - self.ctx.flags = flags_snapshot; - self.ctx.pop_expr(); + self.semantic_model.flags = flags_snapshot; + self.semantic_model.pop_expr(); } fn visit_excepthandler(&mut self, excepthandler: &'b Excepthandler) { @@ -4172,7 +3920,7 @@ where range: _, }) => { let name = name.as_deref(); - if self.settings.rules.enabled(Rule::BareExcept) { + if self.enabled(Rule::BareExcept) { if let Some(diagnostic) = pycodestyle::rules::bare_except( type_.as_deref(), body, @@ -4182,17 +3930,13 @@ where self.diagnostics.push(diagnostic); } } - if self - .settings - .rules - .enabled(Rule::RaiseWithoutFromInsideExcept) - { + if self.enabled(Rule::RaiseWithoutFromInsideExcept) { flake8_bugbear::rules::raise_without_from_inside_except(self, body); } - if self.settings.rules.enabled(Rule::BlindExcept) { + if self.enabled(Rule::BlindExcept) { flake8_blind_except::rules::blind_except(self, type_.as_deref(), name, body); } - if self.settings.rules.enabled(Rule::TryExceptPass) { + if self.enabled(Rule::TryExceptPass) { flake8_bandit::rules::try_except_pass( self, excepthandler, @@ -4202,7 +3946,7 @@ where self.settings.flake8_bandit.check_typed_exception, ); } - if self.settings.rules.enabled(Rule::TryExceptContinue) { + if self.enabled(Rule::TryExceptContinue) { flake8_bandit::rules::try_except_continue( self, excepthandler, @@ -4212,26 +3956,22 @@ where self.settings.flake8_bandit.check_typed_exception, ); } - if self.settings.rules.enabled(Rule::ExceptWithEmptyTuple) { + if self.enabled(Rule::ExceptWithEmptyTuple) { flake8_bugbear::rules::except_with_empty_tuple(self, excepthandler); } - if self - .settings - .rules - .enabled(Rule::ExceptWithNonExceptionClasses) - { + if self.enabled(Rule::ExceptWithNonExceptionClasses) { flake8_bugbear::rules::except_with_non_exception_classes(self, excepthandler); } - if self.settings.rules.enabled(Rule::ReraiseNoCause) { + if self.enabled(Rule::ReraiseNoCause) { tryceratops::rules::reraise_no_cause(self, body); } - if self.settings.rules.enabled(Rule::BinaryOpException) { + if self.enabled(Rule::BinaryOpException) { pylint::rules::binary_op_exception(self, excepthandler); } match name { Some(name) => { - if self.settings.rules.enabled(Rule::AmbiguousVariableName) { + if self.enabled(Rule::AmbiguousVariableName) { if let Some(diagnostic) = pycodestyle::rules::ambiguous_variable_name( name, helpers::excepthandler_name_range(excepthandler, self.locator) @@ -4241,18 +3981,18 @@ where } } - if self.settings.rules.enabled(Rule::BuiltinVariableShadowing) { + if self.enabled(Rule::BuiltinVariableShadowing) { flake8_builtins::rules::builtin_variable_shadowing( self, name, - excepthandler, + AnyShadowing::from(excepthandler), ); } let name_range = helpers::excepthandler_name_range(excepthandler, self.locator).unwrap(); - if self.ctx.scope().defines(name) { + if self.semantic_model.scope().defines(name) { self.handle_node_store( name, &Expr::Name(ast::ExprName { @@ -4263,7 +4003,7 @@ where ); } - let definition = self.ctx.scope().get(name).copied(); + let definition = self.semantic_model.scope().get(name); self.handle_node_store( name, &Expr::Name(ast::ExprName { @@ -4275,12 +4015,12 @@ where walk_excepthandler(self, excepthandler); - if let Some(index) = { - let scope = self.ctx.scope_mut(); - &scope.remove(name) + if let Some(binding_id) = { + let scope = self.semantic_model.scope_mut(); + scope.remove(name) } { - if !self.ctx.bindings[*index].used() { - if self.settings.rules.enabled(Rule::UnusedVariable) { + if !self.semantic_model.is_used(binding_id) { + if self.enabled(Rule::UnusedVariable) { let mut diagnostic = Diagnostic::new( pyflakes::rules::UnusedVariable { name: name.into() }, name_range, @@ -4299,9 +4039,9 @@ where } } - if let Some(index) = definition { - let scope = self.ctx.scope_mut(); - scope.add(name, index); + if let Some(binding_id) = definition { + let scope = self.semantic_model.scope_mut(); + scope.add(name, binding_id); } } None => walk_excepthandler(self, excepthandler), @@ -4322,28 +4062,20 @@ where } fn visit_arguments(&mut self, arguments: &'b Arguments) { - if self.settings.rules.enabled(Rule::MutableArgumentDefault) { + if self.enabled(Rule::MutableArgumentDefault) { flake8_bugbear::rules::mutable_argument_default(self, arguments); } - if self - .settings - .rules - .enabled(Rule::FunctionCallInDefaultArgument) - { + if self.enabled(Rule::FunctionCallInDefaultArgument) { flake8_bugbear::rules::function_call_argument_default(self, arguments); } if self.is_stub { - if self - .settings - .rules - .enabled(Rule::TypedArgumentDefaultInStub) - { + if self.enabled(Rule::TypedArgumentDefaultInStub) { flake8_pyi::rules::typed_argument_simple_defaults(self, arguments); } } if self.is_stub { - if self.settings.rules.enabled(Rule::ArgumentDefaultInStub) { + if self.enabled(Rule::ArgumentDefaultInStub) { flake8_pyi::rules::argument_simple_defaults(self, arguments); } } @@ -4374,17 +4106,16 @@ where &arg.arg, Binding { kind: BindingKind::Argument, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: arg.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); - if self.settings.rules.enabled(Rule::AmbiguousVariableName) { + if self.enabled(Rule::AmbiguousVariableName) { if let Some(diagnostic) = pycodestyle::rules::ambiguous_variable_name(&arg.arg, arg.range()) { @@ -4392,7 +4123,7 @@ where } } - if self.settings.rules.enabled(Rule::InvalidArgumentName) { + if self.enabled(Rule::InvalidArgumentName) { if let Some(diagnostic) = pep8_naming::rules::invalid_argument_name( &arg.arg, arg, @@ -4402,8 +4133,8 @@ where } } - if self.settings.rules.enabled(Rule::BuiltinArgumentShadowing) { - flake8_builtins::rules::builtin_argument_shadowing(self, &arg.arg, arg); + if self.enabled(Rule::BuiltinArgumentShadowing) { + flake8_builtins::rules::builtin_argument_shadowing(self, arg); } } @@ -4423,13 +4154,12 @@ where name, Binding { kind: BindingKind::Assignment, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: pattern.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); } @@ -4438,29 +4168,29 @@ where } fn visit_body(&mut self, body: &'b [Stmt]) { - if self.settings.rules.enabled(Rule::UnnecessaryPass) { + if self.enabled(Rule::UnnecessaryPass) { flake8_pie::rules::no_unnecessary_pass(self, body); } - let prev_body = self.ctx.body; - let prev_body_index = self.ctx.body_index; - self.ctx.body = body; - self.ctx.body_index = 0; + let prev_body = self.semantic_model.body; + let prev_body_index = self.semantic_model.body_index; + self.semantic_model.body = body; + self.semantic_model.body_index = 0; for stmt in body { self.visit_stmt(stmt); - self.ctx.body_index += 1; + self.semantic_model.body_index += 1; } - self.ctx.body = prev_body; - self.ctx.body_index = prev_body_index; + self.semantic_model.body = prev_body; + self.semantic_model.body_index = prev_body_index; } } impl<'a> Checker<'a> { /// Visit a [`Module`]. Returns `true` if the module contains a module-level docstring. fn visit_module(&mut self, python_ast: &'a Suite) -> bool { - if self.settings.rules.enabled(Rule::FStringDocstring) { + if self.enabled(Rule::FStringDocstring) { flake8_bugbear::rules::f_string_docstring(self, python_ast); } let docstring = docstrings::extraction::docstring_from(python_ast); @@ -4504,7 +4234,7 @@ impl<'a> Checker<'a> { // while all subsequent reads and writes are evaluated in the inner scope. In particular, // `x` is local to `foo`, and the `T` in `y=T` skips the class scope when resolving. self.visit_expr(&generator.iter); - self.ctx.push_scope(ScopeKind::Generator); + self.semantic_model.push_scope(ScopeKind::Generator); self.visit_expr(&generator.target); for expr in &generator.ifs { self.visit_boolean_test(expr); @@ -4521,56 +4251,56 @@ impl<'a> Checker<'a> { /// Visit an body of [`Stmt`] nodes within a type-checking block. fn visit_type_checking_block(&mut self, body: &'a [Stmt]) { - let snapshot = self.ctx.flags; - self.ctx.flags |= ContextFlags::TYPE_CHECKING_BLOCK; + let snapshot = self.semantic_model.flags; + self.semantic_model.flags |= SemanticModelFlags::TYPE_CHECKING_BLOCK; self.visit_body(body); - self.ctx.flags = snapshot; + self.semantic_model.flags = snapshot; } /// Visit an [`Expr`], and treat it as a type definition. pub(crate) fn visit_type_definition(&mut self, expr: &'a Expr) { - let snapshot = self.ctx.flags; - self.ctx.flags |= ContextFlags::TYPE_DEFINITION; + let snapshot = self.semantic_model.flags; + self.semantic_model.flags |= SemanticModelFlags::TYPE_DEFINITION; self.visit_expr(expr); - self.ctx.flags = snapshot; + self.semantic_model.flags = snapshot; } /// Visit an [`Expr`], and treat it as _not_ a type definition. pub(crate) fn visit_non_type_definition(&mut self, expr: &'a Expr) { - let snapshot = self.ctx.flags; - self.ctx.flags -= ContextFlags::TYPE_DEFINITION; + let snapshot = self.semantic_model.flags; + self.semantic_model.flags -= SemanticModelFlags::TYPE_DEFINITION; self.visit_expr(expr); - self.ctx.flags = snapshot; + self.semantic_model.flags = snapshot; } /// Visit an [`Expr`], and treat it as a boolean test. This is useful for detecting whether an /// expressions return value is significant, or whether the calling context only relies on /// its truthiness. pub(crate) fn visit_boolean_test(&mut self, expr: &'a Expr) { - let snapshot = self.ctx.flags; - self.ctx.flags |= ContextFlags::BOOLEAN_TEST; + let snapshot = self.semantic_model.flags; + self.semantic_model.flags |= SemanticModelFlags::BOOLEAN_TEST; self.visit_expr(expr); - self.ctx.flags = snapshot; + self.semantic_model.flags = snapshot; } /// Add a [`Binding`] to the current scope, bound to the given name. - fn add_binding(&mut self, name: &'a str, binding: Binding<'a>) { - let binding_id = self.ctx.bindings.next_id(); + fn add_binding(&mut self, name: &'a str, binding: Binding<'a>) -> BindingId { + let binding_id = self.semantic_model.bindings.next_id(); if let Some((stack_index, existing_binding_id)) = self - .ctx + .semantic_model .scopes - .ancestors(self.ctx.scope_id) + .ancestors(self.semantic_model.scope_id) .enumerate() .find_map(|(stack_index, scope)| { - scope.get(name).map(|binding_id| (stack_index, *binding_id)) + scope.get(name).map(|binding_id| (stack_index, binding_id)) }) { - let existing = &self.ctx.bindings[existing_binding_id]; + let existing = &self.semantic_model.bindings[existing_binding_id]; let in_current_scope = stack_index == 0; if !existing.kind.is_builtin() && existing.source.map_or(true, |left| { binding.source.map_or(true, |right| { - !branch_detection::different_forks(left, right, &self.ctx.stmts) + !branch_detection::different_forks(left, right, &self.semantic_model.stmts) }) }) { @@ -4582,7 +4312,7 @@ impl<'a> Checker<'a> { | BindingKind::FutureImportation ); if binding.kind.is_loop_var() && existing_is_import { - if self.settings.rules.enabled(Rule::ImportShadowedByLoopVar) { + if self.enabled(Rule::ImportShadowedByLoopVar) { #[allow(deprecated)] let line = self.locator.compute_line_index(existing.range.start()); @@ -4595,40 +4325,34 @@ impl<'a> Checker<'a> { )); } } else if in_current_scope { - if !existing.used() + if !existing.is_used() && binding.redefines(existing) && (!self.settings.dummy_variable_rgx.is_match(name) || existing_is_import) && !(existing.kind.is_function_definition() && analyze::visibility::is_overload( - &self.ctx, - cast::decorator_list(self.ctx.stmts[existing.source.unwrap()]), + &self.semantic_model, + cast::decorator_list( + self.semantic_model.stmts[existing.source.unwrap()], + ), )) { - if self.settings.rules.enabled(Rule::RedefinedWhileUnused) { + if self.enabled(Rule::RedefinedWhileUnused) { #[allow(deprecated)] - let line = self.locator.compute_line_index(existing.range.start()); + let line = self.locator.compute_line_index( + existing + .trimmed_range(&self.semantic_model, self.locator) + .start(), + ); let mut diagnostic = Diagnostic::new( pyflakes::rules::RedefinedWhileUnused { name: name.to_string(), line, }, - matches!( - binding.kind, - BindingKind::ClassDefinition | BindingKind::FunctionDefinition - ) - .then(|| { - binding.source.map_or(binding.range, |source| { - helpers::identifier_range( - self.ctx.stmts[source], - self.locator, - ) - }) - }) - .unwrap_or(binding.range), + binding.trimmed_range(&self.semantic_model, self.locator), ); if let Some(parent) = binding.source { - let parent = self.ctx.stmts[parent]; + let parent = self.semantic_model.stmts[parent]; if matches!(parent, Stmt::ImportFrom(_)) && parent.range().contains_range(binding.range) { @@ -4639,7 +4363,7 @@ impl<'a> Checker<'a> { } } } else if existing_is_import && binding.redefines(existing) { - self.ctx + self.semantic_model .shadowed_bindings .entry(existing_binding_id) .or_insert_with(Vec::new) @@ -4652,18 +4376,18 @@ impl<'a> Checker<'a> { // expressions in generators and comprehensions bind to the scope that contains the // outermost comprehension. let scope_id = if binding.kind.is_named_expr_assignment() { - self.ctx + self.semantic_model .scopes - .ancestor_ids(self.ctx.scope_id) - .find_or_last(|scope_id| !self.ctx.scopes[*scope_id].kind.is_generator()) - .unwrap_or(self.ctx.scope_id) + .ancestor_ids(self.semantic_model.scope_id) + .find_or_last(|scope_id| !self.semantic_model.scopes[*scope_id].kind.is_generator()) + .unwrap_or(self.semantic_model.scope_id) } else { - self.ctx.scope_id + self.semantic_model.scope_id }; - let scope = &mut self.ctx.scopes[scope_id]; + let scope = &mut self.semantic_model.scopes[scope_id]; - let binding = if let Some(index) = scope.get(name) { - let existing = &self.ctx.bindings[*index]; + let binding = if let Some(binding_id) = scope.get(name) { + let existing = &self.semantic_model.bindings[binding_id]; match &existing.kind { BindingKind::Builtin => { // Avoid overriding builtins. @@ -4673,17 +4397,13 @@ impl<'a> Checker<'a> { // If the original binding was a global or nonlocal, and the new binding conflicts within // the current scope, then the new binding is also as the same. Binding { - runtime_usage: existing.runtime_usage, - synthetic_usage: existing.synthetic_usage, - typing_usage: existing.typing_usage, + references: existing.references.clone(), kind: kind.clone(), ..binding } } _ => Binding { - runtime_usage: existing.runtime_usage, - synthetic_usage: existing.synthetic_usage, - typing_usage: existing.typing_usage, + references: existing.references.clone(), ..binding }, } @@ -4694,37 +4414,35 @@ impl<'a> Checker<'a> { // Don't treat annotations as assignments if there is an existing value // in scope. if binding.kind.is_annotation() && scope.defines(name) { - self.ctx.bindings.push(binding); - return; + return self.semantic_model.bindings.push(binding); } // Add the binding to the scope. scope.add(name, binding_id); // Add the binding to the arena. - self.ctx.bindings.push(binding); + self.semantic_model.bindings.push(binding) } fn bind_builtins(&mut self) { - let scope = &mut self.ctx.scopes[self.ctx.scope_id]; - + let scope = &mut self.semantic_model.scopes[self.semantic_model.scope_id]; for builtin in BUILTINS .iter() .chain(MAGIC_GLOBALS.iter()) .copied() .chain(self.settings.builtins.iter().map(String::as_str)) { - let id = self.ctx.bindings.push(Binding { + // Add the builtin to the scope. + let binding_id = self.semantic_model.bindings.push(Binding { kind: BindingKind::Builtin, range: TextRange::default(), - runtime_usage: None, - synthetic_usage: Some((ScopeId::global(), TextRange::default())), - typing_usage: None, source: None, + references: Vec::new(), context: ExecutionContext::Runtime, exceptions: Exceptions::empty(), + flags: BindingFlags::empty(), }); - scope.add(builtin, id); + scope.add(builtin, binding_id); } } @@ -4732,178 +4450,87 @@ impl<'a> Checker<'a> { let Expr::Name(ast::ExprName { id, .. } )= expr else { return; }; - let id = id.as_str(); - - let mut first_iter = true; - let mut import_starred = false; - - for scope in self.ctx.scopes.ancestors(self.ctx.scope_id) { - if scope.kind.is_class() { - if id == "__class__" { - return; - } else if !first_iter { - continue; - } + match self.semantic_model.resolve_reference(id, expr.range()) { + ResolvedReference::Resolved(..) | ResolvedReference::ImplicitGlobal => { + // Nothing to do. } - - if let Some(index) = scope.get(id) { - // Mark the binding as used. - let context = self.ctx.execution_context(); - self.ctx.bindings[*index].mark_used(self.ctx.scope_id, expr.range(), context); - - if !self.ctx.in_deferred_type_definition() - && self.ctx.bindings[*index].kind.is_annotation() - { - continue; + ResolvedReference::StarImport => { + // F405 + if self.enabled(Rule::UndefinedLocalWithImportStarUsage) { + let sources: Vec = self + .semantic_model + .scopes + .iter() + .flat_map(Scope::star_imports) + .map(|StarImportation { level, module }| { + helpers::format_import_from(*level, *module) + }) + .sorted() + .dedup() + .collect(); + self.diagnostics.push(Diagnostic::new( + pyflakes::rules::UndefinedLocalWithImportStarUsage { + name: id.to_string(), + sources, + }, + expr.range(), + )); } + } + ResolvedReference::NotFound => { + // F821 + if self.enabled(Rule::UndefinedName) { + // Allow __path__. + if self.path.ends_with("__init__.py") && id == "__path__" { + return; + } - // If the name of the sub-importation is the same as an alias of another - // importation and the alias is used, that sub-importation should be - // marked as used too. - // - // This handles code like: - // import pyarrow as pa - // import pyarrow.csv - // print(pa.csv.read_csv("test.csv")) - match &self.ctx.bindings[*index].kind { - BindingKind::Importation(Importation { name, full_name }) - | BindingKind::SubmoduleImportation(SubmoduleImportation { name, full_name }) => + // Avoid flagging if `NameError` is handled. + if self + .semantic_model + .handled_exceptions + .iter() + .any(|handler_names| handler_names.contains(Exceptions::NAME_ERROR)) { - let has_alias = full_name - .split('.') - .last() - .map(|segment| &segment != name) - .unwrap_or_default(); - if has_alias { - // Mark the sub-importation as used. - if let Some(index) = scope.get(full_name) { - self.ctx.bindings[*index].mark_used( - self.ctx.scope_id, - expr.range(), - context, - ); - } - } - } - BindingKind::FromImportation(FromImportation { name, full_name }) => { - let has_alias = full_name - .split('.') - .last() - .map(|segment| &segment != name) - .unwrap_or_default(); - if has_alias { - // Mark the sub-importation as used. - if let Some(index) = scope.get(full_name.as_str()) { - self.ctx.bindings[*index].mark_used( - self.ctx.scope_id, - expr.range(), - context, - ); - } - } + return; } - _ => {} - } - - return; - } - - first_iter = false; - import_starred = import_starred || scope.uses_star_imports(); - } - - if import_starred { - // F405 - if self - .settings - .rules - .enabled(Rule::UndefinedLocalWithImportStarUsage) - { - let sources: Vec = self - .ctx - .scopes - .iter() - .flat_map(Scope::star_imports) - .map(|StarImportation { level, module }| { - helpers::format_import_from(*level, *module) - }) - .sorted() - .dedup() - .collect(); - self.diagnostics.push(Diagnostic::new( - pyflakes::rules::UndefinedLocalWithImportStarUsage { - name: id.to_string(), - sources, - }, - expr.range(), - )); - } - return; - } - - if self.settings.rules.enabled(Rule::UndefinedName) { - // Allow __path__. - if self.path.ends_with("__init__.py") && id == "__path__" { - return; - } - // Allow "__module__" and "__qualname__" in class scopes. - if (id == "__module__" || id == "__qualname__") - && matches!(self.ctx.scope().kind, ScopeKind::Class(..)) - { - return; - } - - // Avoid flagging if NameError is handled. - if self - .ctx - .handled_exceptions - .iter() - .any(|handler_names| handler_names.contains(Exceptions::NAME_ERROR)) - { - return; + self.diagnostics.push(Diagnostic::new( + pyflakes::rules::UndefinedName { + name: id.to_string(), + }, + expr.range(), + )); + } } - - self.diagnostics.push(Diagnostic::new( - pyflakes::rules::UndefinedName { - name: id.to_string(), - }, - expr.range(), - )); } } fn handle_node_store(&mut self, id: &'a str, expr: &Expr) { - let parent = self.ctx.stmt(); + let parent = self.semantic_model.stmt(); - if self.settings.rules.enabled(Rule::UndefinedLocal) { + if self.enabled(Rule::UndefinedLocal) { pyflakes::rules::undefined_local(self, id); } - if self - .settings - .rules - .enabled(Rule::NonLowercaseVariableInFunction) - { - if matches!(self.ctx.scope().kind, ScopeKind::Function(..)) { + if self.enabled(Rule::NonLowercaseVariableInFunction) { + if matches!(self.semantic_model.scope().kind, ScopeKind::Function(..)) { // Ignore globals. if !self - .ctx + .semantic_model .scope() .get(id) - .map_or(false, |index| self.ctx.bindings[*index].kind.is_global()) + .map_or(false, |binding_id| { + self.semantic_model.bindings[binding_id].kind.is_global() + }) { pep8_naming::rules::non_lowercase_variable_in_function(self, expr, parent, id); } } } - if self - .settings - .rules - .enabled(Rule::MixedCaseVariableInClassScope) - { - if let ScopeKind::Class(class) = &self.ctx.scope().kind { + if self.enabled(Rule::MixedCaseVariableInClassScope) { + if let ScopeKind::Class(class) = &self.semantic_model.scope().kind { pep8_naming::rules::mixed_case_variable_in_class_scope( self, expr, @@ -4914,12 +4541,8 @@ impl<'a> Checker<'a> { } } - if self - .settings - .rules - .enabled(Rule::MixedCaseVariableInGlobalScope) - { - if matches!(self.ctx.scope().kind, ScopeKind::Module) { + if self.enabled(Rule::MixedCaseVariableInGlobalScope) { + if matches!(self.semantic_model.scope().kind, ScopeKind::Module) { pep8_naming::rules::mixed_case_variable_in_global_scope(self, expr, parent, id); } } @@ -4932,13 +4555,12 @@ impl<'a> Checker<'a> { id, Binding { kind: BindingKind::Annotation, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: expr.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); return; @@ -4949,13 +4571,12 @@ impl<'a> Checker<'a> { id, Binding { kind: BindingKind::LoopVar, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: expr.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); return; @@ -4966,19 +4587,18 @@ impl<'a> Checker<'a> { id, Binding { kind: BindingKind::Binding, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: expr.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); return; } - let scope = self.ctx.scope(); + let scope = self.semantic_model.scope(); if id == "__all__" && scope.kind.is_module() @@ -5013,13 +4633,13 @@ impl<'a> Checker<'a> { } { let (all_names, all_names_flags) = { let (mut names, flags) = - extract_all_names(parent, |name| self.ctx.is_builtin(name)); + extract_all_names(parent, |name| self.semantic_model.is_builtin(name)); // Grab the existing bound __all__ values. if let Stmt::AugAssign(_) = parent { - if let Some(index) = scope.get("__all__") { + if let Some(binding_id) = scope.get("__all__") { if let BindingKind::Export(Export { names: existing }) = - &self.ctx.bindings[*index].kind + &self.semantic_model.bindings[binding_id].kind { names.extend_from_slice(existing); } @@ -5029,14 +4649,14 @@ impl<'a> Checker<'a> { (names, flags) }; - if self.settings.rules.enabled(Rule::InvalidAllFormat) { + if self.enabled(Rule::InvalidAllFormat) { if matches!(all_names_flags, AllNamesFlags::INVALID_FORMAT) { self.diagnostics .push(pylint::rules::invalid_all_format(expr)); } } - if self.settings.rules.enabled(Rule::InvalidAllObject) { + if self.enabled(Rule::InvalidAllObject) { if matches!(all_names_flags, AllNamesFlags::INVALID_OBJECT) { self.diagnostics .push(pylint::rules::invalid_all_object(expr)); @@ -5047,13 +4667,12 @@ impl<'a> Checker<'a> { id, Binding { kind: BindingKind::Export(Export { names: all_names }), - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: expr.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); return; @@ -5061,7 +4680,7 @@ impl<'a> Checker<'a> { } if self - .ctx + .semantic_model .expr_ancestors() .any(|expr| matches!(expr, Expr::NamedExpr(_))) { @@ -5069,13 +4688,12 @@ impl<'a> Checker<'a> { id, Binding { kind: BindingKind::NamedExprAssignment, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: expr.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); return; @@ -5085,13 +4703,12 @@ impl<'a> Checker<'a> { id, Binding { kind: BindingKind::Assignment, - runtime_usage: None, - synthetic_usage: None, - typing_usage: None, range: expr.range(), - source: self.ctx.stmt_id, - context: self.ctx.execution_context(), - exceptions: self.ctx.exceptions(), + references: Vec::new(), + source: self.semantic_model.stmt_id, + context: self.semantic_model.execution_context(), + exceptions: self.semantic_model.exceptions(), + flags: BindingFlags::empty(), }, ); } @@ -5100,15 +4717,15 @@ impl<'a> Checker<'a> { let Expr::Name(ast::ExprName { id, .. } )= expr else { return; }; - if helpers::on_conditional_branch(&mut self.ctx.parents()) { + if helpers::on_conditional_branch(&mut self.semantic_model.parents()) { return; } - let scope = self.ctx.scope_mut(); + let scope = self.semantic_model.scope_mut(); if scope.remove(id.as_str()).is_some() { return; } - if !self.settings.rules.enabled(Rule::UndefinedName) { + if !self.enabled(Rule::UndefinedName) { return; } @@ -5124,10 +4741,10 @@ impl<'a> Checker<'a> { while !self.deferred.future_type_definitions.is_empty() { let type_definitions = std::mem::take(&mut self.deferred.future_type_definitions); for (expr, snapshot) in type_definitions { - self.ctx.restore(snapshot); + self.semantic_model.restore(snapshot); - self.ctx.flags |= - ContextFlags::TYPE_DEFINITION | ContextFlags::FUTURE_TYPE_DEFINITION; + self.semantic_model.flags |= SemanticModelFlags::TYPE_DEFINITION + | SemanticModelFlags::FUTURE_TYPE_DEFINITION; self.visit_expr(expr); } } @@ -5140,32 +4757,33 @@ impl<'a> Checker<'a> { if let Ok((expr, kind)) = parse_type_annotation(value, range, self.locator) { let expr = allocator.alloc(expr); - self.ctx.restore(snapshot); + self.semantic_model.restore(snapshot); - if self.ctx.in_annotation() && self.ctx.future_annotations() { - if self.settings.rules.enabled(Rule::QuotedAnnotation) { + if self.semantic_model.in_annotation() + && self.semantic_model.future_annotations() + { + if self.enabled(Rule::QuotedAnnotation) { pyupgrade::rules::quoted_annotation(self, value, range); } } if self.is_stub { - if self.settings.rules.enabled(Rule::QuotedAnnotationInStub) { + if self.enabled(Rule::QuotedAnnotationInStub) { flake8_pyi::rules::quoted_annotation_in_stub(self, value, range); } } let type_definition_flag = match kind { - AnnotationKind::Simple => ContextFlags::SIMPLE_STRING_TYPE_DEFINITION, - AnnotationKind::Complex => ContextFlags::COMPLEX_STRING_TYPE_DEFINITION, + AnnotationKind::Simple => SemanticModelFlags::SIMPLE_STRING_TYPE_DEFINITION, + AnnotationKind::Complex => { + SemanticModelFlags::COMPLEX_STRING_TYPE_DEFINITION + } }; - self.ctx.flags |= ContextFlags::TYPE_DEFINITION | type_definition_flag; + self.semantic_model.flags |= + SemanticModelFlags::TYPE_DEFINITION | type_definition_flag; self.visit_expr(expr); } else { - if self - .settings - .rules - .enabled(Rule::ForwardAnnotationSyntaxError) - { + if self.enabled(Rule::ForwardAnnotationSyntaxError) { self.diagnostics.push(Diagnostic::new( pyflakes::rules::ForwardAnnotationSyntaxError { body: value.to_string(), @@ -5182,9 +4800,9 @@ impl<'a> Checker<'a> { while !self.deferred.functions.is_empty() { let deferred_functions = std::mem::take(&mut self.deferred.functions); for snapshot in deferred_functions { - self.ctx.restore(snapshot); + self.semantic_model.restore(snapshot); - match &self.ctx.stmt() { + match &self.semantic_model.stmt() { Stmt::FunctionDef(ast::StmtFunctionDef { body, args, .. }) | Stmt::AsyncFunctionDef(ast::StmtAsyncFunctionDef { body, args, .. }) => { self.visit_arguments(args); @@ -5204,7 +4822,7 @@ impl<'a> Checker<'a> { while !self.deferred.lambdas.is_empty() { let lambdas = std::mem::take(&mut self.deferred.lambdas); for (expr, snapshot) in lambdas { - self.ctx.restore(snapshot); + self.semantic_model.restore(snapshot); if let Expr::Lambda(ast::ExprLambda { args, @@ -5227,33 +4845,33 @@ impl<'a> Checker<'a> { while !self.deferred.assignments.is_empty() { let assignments = std::mem::take(&mut self.deferred.assignments); for snapshot in assignments { - self.ctx.restore(snapshot); + self.semantic_model.restore(snapshot); // pyflakes - if self.settings.rules.enabled(Rule::UnusedVariable) { - pyflakes::rules::unused_variable(self, self.ctx.scope_id); + if self.enabled(Rule::UnusedVariable) { + pyflakes::rules::unused_variable(self, self.semantic_model.scope_id); } - if self.settings.rules.enabled(Rule::UnusedAnnotation) { - pyflakes::rules::unused_annotation(self, self.ctx.scope_id); + if self.enabled(Rule::UnusedAnnotation) { + pyflakes::rules::unused_annotation(self, self.semantic_model.scope_id); } if !self.is_stub { // flake8-unused-arguments - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::UnusedFunctionArgument, Rule::UnusedMethodArgument, Rule::UnusedClassMethodArgument, Rule::UnusedStaticMethodArgument, Rule::UnusedLambdaArgument, ]) { - let scope = &self.ctx.scopes[self.ctx.scope_id]; - let parent = &self.ctx.scopes[scope.parent.unwrap()]; + let scope = &self.semantic_model.scopes[self.semantic_model.scope_id]; + let parent = &self.semantic_model.scopes[scope.parent.unwrap()]; self.diagnostics .extend(flake8_unused_arguments::rules::unused_arguments( self, parent, scope, - &self.ctx.bindings, + &self.semantic_model.bindings, )); } } @@ -5266,12 +4884,13 @@ impl<'a> Checker<'a> { let for_loops = std::mem::take(&mut self.deferred.for_loops); for snapshot in for_loops { - self.ctx.restore(snapshot); + self.semantic_model.restore(snapshot); if let Stmt::For(ast::StmtFor { target, body, .. }) - | Stmt::AsyncFor(ast::StmtAsyncFor { target, body, .. }) = &self.ctx.stmt() + | Stmt::AsyncFor(ast::StmtAsyncFor { target, body, .. }) = + &self.semantic_model.stmt() { - if self.settings.rules.enabled(Rule::UnusedLoopControlVariable) { + if self.enabled(Rule::UnusedLoopControlVariable) { flake8_bugbear::rules::unused_loop_control_variable(self, target, body); } } else { @@ -5283,7 +4902,7 @@ impl<'a> Checker<'a> { fn check_dead_scopes(&mut self) { let enforce_typing_imports = !self.is_stub - && self.settings.rules.any_enabled(&[ + && self.any_enabled(&[ Rule::GlobalVariableNotAssigned, Rule::RuntimeImportInTypeCheckingBlock, Rule::TypingOnlyFirstPartyImport, @@ -5292,7 +4911,7 @@ impl<'a> Checker<'a> { ]); if !(enforce_typing_imports - || self.settings.rules.any_enabled(&[ + || self.any_enabled(&[ Rule::UnusedImport, Rule::UndefinedLocalWithImportStarUsage, Rule::RedefinedWhileUnused, @@ -5304,10 +4923,10 @@ impl<'a> Checker<'a> { // Mark anything referenced in `__all__` as used. let all_bindings: Option<(Vec, TextRange)> = { - let global_scope = self.ctx.global_scope(); + let global_scope = self.semantic_model.global_scope(); let all_names: Option<(&[&str], TextRange)> = global_scope .get("__all__") - .map(|index| &self.ctx.bindings[*index]) + .map(|binding_id| &self.semantic_model.bindings[binding_id]) .and_then(|binding| match &binding.kind { BindingKind::Export(Export { names }) => { Some((names.as_slice(), binding.range)) @@ -5319,7 +4938,7 @@ impl<'a> Checker<'a> { ( names .iter() - .filter_map(|name| global_scope.get(name).copied()) + .filter_map(|name| global_scope.get(name)) .collect(), range, ) @@ -5327,9 +4946,9 @@ impl<'a> Checker<'a> { }; if let Some((bindings, range)) = all_bindings { - for index in bindings { - self.ctx.bindings[index].mark_used( - ScopeId::global(), + for binding_id in bindings { + self.semantic_model.add_global_reference( + binding_id, range, ExecutionContext::Runtime, ); @@ -5338,10 +4957,10 @@ impl<'a> Checker<'a> { // Extract `__all__` names from the global scope. let all_names: Option<(&[&str], TextRange)> = self - .ctx + .semantic_model .global_scope() .get("__all__") - .map(|index| &self.ctx.bindings[*index]) + .map(|binding_id| &self.semantic_model.bindings[binding_id]) .and_then(|binding| match &binding.kind { BindingKind::Export(Export { names }) => Some((names.as_slice(), binding.range)), _ => None, @@ -5354,15 +4973,18 @@ impl<'a> Checker<'a> { if self.settings.flake8_type_checking.strict { vec![] } else { - self.ctx + self.semantic_model .scopes .iter() .map(|scope| { scope .binding_ids() - .map(|index| &self.ctx.bindings[*index]) + .map(|binding_id| &self.semantic_model.bindings[binding_id]) .filter(|binding| { - flake8_type_checking::helpers::is_valid_runtime_import(binding) + flake8_type_checking::helpers::is_valid_runtime_import( + &self.semantic_model, + binding, + ) }) .collect() }) @@ -5373,12 +4995,12 @@ impl<'a> Checker<'a> { }; let mut diagnostics: Vec = vec![]; - for scope_id in self.ctx.dead_scopes.iter().rev() { - let scope = &self.ctx.scopes[*scope_id]; + for scope_id in self.semantic_model.dead_scopes.iter().rev() { + let scope = &self.semantic_model.scopes[*scope_id]; if scope.kind.is_module() { // F822 - if self.settings.rules.enabled(Rule::UndefinedExport) { + if self.enabled(Rule::UndefinedExport) { if !self.path.ends_with("__init__.py") { if let Some((names, range)) = all_names { diagnostics @@ -5388,11 +5010,7 @@ impl<'a> Checker<'a> { } // F405 - if self - .settings - .rules - .enabled(Rule::UndefinedLocalWithImportStarUsage) - { + if self.enabled(Rule::UndefinedLocalWithImportStarUsage) { if let Some((names, range)) = &all_names { let sources: Vec = scope .star_imports() @@ -5420,12 +5038,12 @@ impl<'a> Checker<'a> { } // PLW0602 - if self.settings.rules.enabled(Rule::GlobalVariableNotAssigned) { - for (name, index) in scope.bindings() { - let binding = &self.ctx.bindings[*index]; + if self.enabled(Rule::GlobalVariableNotAssigned) { + for (name, binding_id) in scope.bindings() { + let binding = &self.semantic_model.bindings[binding_id]; if binding.kind.is_global() { if let Some(source) = binding.source { - let stmt = &self.ctx.stmts[source]; + let stmt = &self.semantic_model.stmts[source]; if matches!(stmt, Stmt::Global(_)) { diagnostics.push(Diagnostic::new( pylint::rules::GlobalVariableNotAssigned { @@ -5447,49 +5065,41 @@ impl<'a> Checker<'a> { // Look for any bindings that were redefined in another scope, and remain // unused. Note that we only store references in `redefinitions` if // the bindings are in different scopes. - if self.settings.rules.enabled(Rule::RedefinedWhileUnused) { - for (name, index) in scope.bindings() { - let binding = &self.ctx.bindings[*index]; + if self.enabled(Rule::RedefinedWhileUnused) { + for (name, binding_id) in scope.bindings() { + let binding = &self.semantic_model.bindings[binding_id]; if matches!( binding.kind, BindingKind::Importation(..) | BindingKind::FromImportation(..) | BindingKind::SubmoduleImportation(..) - | BindingKind::FutureImportation ) { - if binding.used() { + if binding.is_used() { continue; } - if let Some(indices) = self.ctx.shadowed_bindings.get(index) { - for index in indices { - let rebound = &self.ctx.bindings[*index]; + if let Some(shadowed_ids) = + self.semantic_model.shadowed_bindings.get(&binding_id) + { + for binding_id in shadowed_ids.iter().copied() { + let rebound = &self.semantic_model.bindings[binding_id]; #[allow(deprecated)] - let line = self.locator.compute_line_index(binding.range.start()); + let line = self.locator.compute_line_index( + binding + .trimmed_range(&self.semantic_model, self.locator) + .start(), + ); let mut diagnostic = Diagnostic::new( pyflakes::rules::RedefinedWhileUnused { name: (*name).to_string(), line, }, - matches!( - rebound.kind, - BindingKind::ClassDefinition - | BindingKind::FunctionDefinition - ) - .then(|| { - rebound.source.map_or(rebound.range, |source| { - helpers::identifier_range( - self.ctx.stmts[source], - self.locator, - ) - }) - }) - .unwrap_or(rebound.range), + rebound.trimmed_range(&self.semantic_model, self.locator), ); if let Some(source) = rebound.source { - let parent = &self.ctx.stmts[source]; + let parent = &self.semantic_model.stmts[source]; if matches!(parent, Stmt::ImportFrom(_)) && parent.range().contains_range(rebound.range) { @@ -5507,20 +5117,23 @@ impl<'a> Checker<'a> { let runtime_imports: Vec<&Binding> = if self.settings.flake8_type_checking.strict { vec![] } else { - self.ctx + self.semantic_model .scopes .ancestor_ids(*scope_id) - .flat_map(|scope_id| runtime_imports[usize::from(scope_id)].iter()) + .flat_map(|scope_id| runtime_imports[scope_id.as_usize()].iter()) .copied() .collect() }; - for index in scope.binding_ids() { - let binding = &self.ctx.bindings[*index]; + for binding_id in scope.binding_ids() { + let binding = &self.semantic_model.bindings[binding_id]; if let Some(diagnostic) = - flake8_type_checking::rules::runtime_import_in_type_checking_block(binding) + flake8_type_checking::rules::runtime_import_in_type_checking_block( + binding, + &self.semantic_model, + ) { - if self.settings.rules.enabled(diagnostic.kind.rule()) { + if self.enabled(diagnostic.kind.rule()) { diagnostics.push(diagnostic); } } @@ -5528,18 +5141,19 @@ impl<'a> Checker<'a> { flake8_type_checking::rules::typing_only_runtime_import( binding, &runtime_imports, + &self.semantic_model, self.package, self.settings, ) { - if self.settings.rules.enabled(diagnostic.kind.rule()) { + if self.enabled(diagnostic.kind.rule()) { diagnostics.push(diagnostic); } } } } - if self.settings.rules.enabled(Rule::UnusedImport) { + if self.enabled(Rule::UnusedImport) { // Collect all unused imports by location. (Multiple unused imports at the same // location indicates an `import from`.) type UnusedImport<'a> = (&'a str, &'a TextRange); @@ -5549,8 +5163,12 @@ impl<'a> Checker<'a> { let mut ignored: FxHashMap> = FxHashMap::default(); - for index in scope.binding_ids() { - let binding = &self.ctx.bindings[*index]; + for binding_id in scope.binding_ids() { + let binding = &self.semantic_model.bindings[binding_id]; + + if binding.is_used() || binding.is_explicit_export() { + continue; + } let full_name = match &binding.kind { BindingKind::Importation(Importation { full_name, .. }) => full_name, @@ -5564,16 +5182,12 @@ impl<'a> Checker<'a> { _ => continue, }; - if binding.used() { - continue; - } - let child_id = binding.source.unwrap(); - let parent_id = self.ctx.stmts.parent_id(child_id); + let parent_id = self.semantic_model.stmts.parent_id(child_id); let exceptions = binding.exceptions; let diagnostic_offset = binding.range.start(); - let child = &self.ctx.stmts[child_id]; + let child = &self.semantic_model.stmts[child_id]; let parent_offset = if matches!(child, Stmt::ImportFrom(_)) { Some(child.start()) } else { @@ -5603,8 +5217,8 @@ impl<'a> Checker<'a> { .into_iter() .sorted_by_key(|((defined_by, ..), ..)| *defined_by) { - let child = self.ctx.stmts[defined_by]; - let parent = defined_in.map(|defined_in| self.ctx.stmts[defined_in]); + let child = self.semantic_model.stmts[defined_by]; + let parent = defined_in.map(|defined_in| self.semantic_model.stmts[defined_in]); let multiple = unused_imports.len() > 1; let in_except_handler = exceptions .intersects(Exceptions::MODULE_NOT_FOUND_ERROR | Exceptions::IMPORT_ERROR); @@ -5665,7 +5279,7 @@ impl<'a> Checker<'a> { .into_iter() .sorted_by_key(|((defined_by, ..), ..)| *defined_by) { - let child = self.ctx.stmts[child]; + let child = self.semantic_model.stmts[child]; let multiple = unused_imports.len() > 1; let in_except_handler = exceptions .intersects(Exceptions::MODULE_NOT_FOUND_ERROR | Exceptions::IMPORT_ERROR); @@ -5701,7 +5315,7 @@ impl<'a> Checker<'a> { /// it is expected that all [`Definition`] nodes have been visited by the time, and that this /// method will not recurse into any other nodes. fn check_definitions(&mut self) { - let enforce_annotations = self.settings.rules.any_enabled(&[ + let enforce_annotations = self.any_enabled(&[ Rule::MissingTypeFunctionArgument, Rule::MissingTypeArgs, Rule::MissingTypeKwargs, @@ -5714,9 +5328,8 @@ impl<'a> Checker<'a> { Rule::MissingReturnTypeClassMethod, Rule::AnyType, ]); - let enforce_stubs = - self.is_stub && self.settings.rules.any_enabled(&[Rule::DocstringInStub]); - let enforce_docstrings = self.settings.rules.any_enabled(&[ + let enforce_stubs = self.is_stub && self.any_enabled(&[Rule::DocstringInStub]); + let enforce_docstrings = self.any_enabled(&[ Rule::UndocumentedPublicModule, Rule::UndocumentedPublicClass, Rule::UndocumentedPublicMethod, @@ -5770,15 +5383,15 @@ impl<'a> Checker<'a> { } // Compute visibility of all definitions. - let global_scope = self.ctx.global_scope(); + let global_scope = self.semantic_model.global_scope(); let exports: Option<&[&str]> = global_scope .get("__all__") - .map(|index| &self.ctx.bindings[*index]) + .map(|binding_id| &self.semantic_model.bindings[binding_id]) .and_then(|binding| match &binding.kind { BindingKind::Export(Export { names }) => Some(names.as_slice()), _ => None, }); - let definitions = std::mem::take(&mut self.ctx.definitions); + let definitions = std::mem::take(&mut self.semantic_model.definitions); let mut overloaded_name: Option = None; for ContextualizedDefinition { @@ -5797,7 +5410,7 @@ impl<'a> Checker<'a> { // classes, etc.). if !overloaded_name.map_or(false, |overloaded_name| { flake8_annotations::helpers::is_overload_impl( - self, + &self.semantic_model, definition, &overloaded_name, ) @@ -5809,13 +5422,14 @@ impl<'a> Checker<'a> { *visibility, )); } - overloaded_name = flake8_annotations::helpers::overloaded_name(self, definition); + overloaded_name = + flake8_annotations::helpers::overloaded_name(&self.semantic_model, definition); } // flake8-pyi if enforce_stubs { if self.is_stub { - if self.settings.rules.enabled(Rule::DocstringInStub) { + if self.enabled(Rule::DocstringInStub) { flake8_pyi::rules::docstring_in_stubs(self, docstring); } } @@ -5824,7 +5438,7 @@ impl<'a> Checker<'a> { // pydocstyle if enforce_docstrings { if pydocstyle::helpers::should_ignore_definition( - self, + &self.semantic_model, definition, &self.settings.pydocstyle.ignore_decorators, ) { @@ -5870,76 +5484,76 @@ impl<'a> Checker<'a> { continue; } - if self.settings.rules.enabled(Rule::FitsOnOneLine) { + if self.enabled(Rule::FitsOnOneLine) { pydocstyle::rules::one_liner(self, &docstring); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::NoBlankLineBeforeFunction, Rule::NoBlankLineAfterFunction, ]) { pydocstyle::rules::blank_before_after_function(self, &docstring); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::OneBlankLineBeforeClass, Rule::OneBlankLineAfterClass, Rule::BlankLineBeforeClass, ]) { pydocstyle::rules::blank_before_after_class(self, &docstring); } - if self.settings.rules.enabled(Rule::BlankLineAfterSummary) { + if self.enabled(Rule::BlankLineAfterSummary) { pydocstyle::rules::blank_after_summary(self, &docstring); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::IndentWithSpaces, Rule::UnderIndentation, Rule::OverIndentation, ]) { pydocstyle::rules::indent(self, &docstring); } - if self.settings.rules.enabled(Rule::NewLineAfterLastParagraph) { + if self.enabled(Rule::NewLineAfterLastParagraph) { pydocstyle::rules::newline_after_last_paragraph(self, &docstring); } - if self.settings.rules.enabled(Rule::SurroundingWhitespace) { + if self.enabled(Rule::SurroundingWhitespace) { pydocstyle::rules::no_surrounding_whitespace(self, &docstring); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MultiLineSummaryFirstLine, Rule::MultiLineSummarySecondLine, ]) { pydocstyle::rules::multi_line_summary_start(self, &docstring); } - if self.settings.rules.enabled(Rule::TripleSingleQuotes) { + if self.enabled(Rule::TripleSingleQuotes) { pydocstyle::rules::triple_quotes(self, &docstring); } - if self.settings.rules.enabled(Rule::EscapeSequenceInDocstring) { + if self.enabled(Rule::EscapeSequenceInDocstring) { pydocstyle::rules::backslashes(self, &docstring); } - if self.settings.rules.enabled(Rule::EndsInPeriod) { + if self.enabled(Rule::EndsInPeriod) { pydocstyle::rules::ends_with_period(self, &docstring); } - if self.settings.rules.enabled(Rule::NonImperativeMood) { + if self.enabled(Rule::NonImperativeMood) { pydocstyle::rules::non_imperative_mood( self, &docstring, &self.settings.pydocstyle.property_decorators, ); } - if self.settings.rules.enabled(Rule::NoSignature) { + if self.enabled(Rule::NoSignature) { pydocstyle::rules::no_signature(self, &docstring); } - if self.settings.rules.enabled(Rule::FirstLineCapitalized) { + if self.enabled(Rule::FirstLineCapitalized) { pydocstyle::rules::capitalized(self, &docstring); } - if self.settings.rules.enabled(Rule::DocstringStartsWithThis) { + if self.enabled(Rule::DocstringStartsWithThis) { pydocstyle::rules::starts_with_this(self, &docstring); } - if self.settings.rules.enabled(Rule::EndsInPunctuation) { + if self.enabled(Rule::EndsInPunctuation) { pydocstyle::rules::ends_with_punctuation(self, &docstring); } - if self.settings.rules.enabled(Rule::OverloadWithDocstring) { + if self.enabled(Rule::OverloadWithDocstring) { pydocstyle::rules::if_needed(self, &docstring); } - if self.settings.rules.any_enabled(&[ + if self.any_enabled(&[ Rule::MultiLineSummaryFirstLine, Rule::SectionNotOverIndented, Rule::SectionUnderlineNotOverIndented, @@ -6031,8 +5645,8 @@ pub(crate) fn check_ast( checker.check_definitions(); // Reset the scope to module-level, and check all consumed scopes. - checker.ctx.scope_id = ScopeId::global(); - checker.ctx.dead_scopes.push(ScopeId::global()); + checker.semantic_model.scope_id = ScopeId::global(); + checker.semantic_model.dead_scopes.push(ScopeId::global()); checker.check_dead_scopes(); checker.diagnostics diff --git a/crates/ruff/src/checkers/imports.rs b/crates/ruff/src/checkers/imports.rs index 979da3e0c3432..a985c0daafdec 100644 --- a/crates/ruff/src/checkers/imports.rs +++ b/crates/ruff/src/checkers/imports.rs @@ -14,7 +14,7 @@ use ruff_python_stdlib::path::is_python_stub_file; use crate::directives::IsortDirectives; use crate::registry::Rule; use crate::rules::isort; -use crate::rules::isort::track::{Block, ImportTracker}; +use crate::rules::isort::block::{Block, BlockBuilder}; use crate::settings::Settings; fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) -> Option { @@ -86,9 +86,9 @@ pub(crate) fn check_imports( ) -> (Vec, Option) { let is_stub = is_python_stub_file(path); - // Extract all imports from the AST. + // Extract all import blocks from the AST. let tracker = { - let mut tracker = ImportTracker::new(locator, directives, is_stub); + let mut tracker = BlockBuilder::new(locator, directives, is_stub); tracker.visit_body(python_ast); tracker }; @@ -109,7 +109,7 @@ pub(crate) fn check_imports( } if settings.rules.enabled(Rule::MissingRequiredImport) { diagnostics.extend(isort::rules::add_required_imports( - &blocks, python_ast, locator, stylist, settings, is_stub, + python_ast, locator, stylist, settings, is_stub, )); } diff --git a/crates/ruff/src/checkers/physical_lines.rs b/crates/ruff/src/checkers/physical_lines.rs index 47b647677d096..53b4704811369 100644 --- a/crates/ruff/src/checkers/physical_lines.rs +++ b/crates/ruff/src/checkers/physical_lines.rs @@ -183,6 +183,7 @@ mod tests { use ruff_python_ast::source_code::{Indexer, Locator, Stylist}; + use crate::line_width::LineLength; use crate::registry::Rule; use crate::settings::Settings; @@ -196,7 +197,7 @@ mod tests { let indexer = Indexer::from_tokens(&tokens, &locator); let stylist = Stylist::from_tokens(&tokens, &locator); - let check_with_max_line_length = |line_length: usize| { + let check_with_max_line_length = |line_length: LineLength| { check_physical_lines( Path::new("foo.py"), &locator, @@ -209,7 +210,8 @@ mod tests { }, ) }; - assert_eq!(check_with_max_line_length(8), vec![]); - assert_eq!(check_with_max_line_length(8), vec![]); + let line_length = LineLength::from(8); + assert_eq!(check_with_max_line_length(line_length), vec![]); + assert_eq!(check_with_max_line_length(line_length), vec![]); } } diff --git a/crates/ruff/src/checkers/tokens.rs b/crates/ruff/src/checkers/tokens.rs index e935f9cc18203..7ebe8937337f2 100644 --- a/crates/ruff/src/checkers/tokens.rs +++ b/crates/ruff/src/checkers/tokens.rs @@ -12,10 +12,11 @@ use crate::rules::{ }; use crate::settings::Settings; use ruff_diagnostics::Diagnostic; -use ruff_python_ast::source_code::Locator; +use ruff_python_ast::source_code::{Indexer, Locator}; pub(crate) fn check_tokens( locator: &Locator, + indexer: &Indexer, tokens: &[LexResult], settings: &Settings, is_stub: bool, @@ -100,15 +101,9 @@ pub(crate) fn check_tokens( // ERA001 if enforce_commented_out_code { - for (tok, range) in tokens.iter().flatten() { - if matches!(tok, Tok::Comment(_)) { - if let Some(diagnostic) = - eradicate::rules::commented_out_code(locator, *range, settings) - { - diagnostics.push(diagnostic); - } - } - } + diagnostics.extend(eradicate::rules::commented_out_code( + indexer, locator, settings, + )); } // W605 @@ -185,13 +180,13 @@ pub(crate) fn check_tokens( // PYI033 if enforce_type_comment_in_stub && is_stub { - diagnostics.extend(flake8_pyi::rules::type_comment_in_stub(tokens)); + diagnostics.extend(flake8_pyi::rules::type_comment_in_stub(indexer, locator)); } // TD001, TD002, TD003, TD004, TD005, TD006, TD007 if enforce_todos { diagnostics.extend( - flake8_todos::rules::todos(tokens, settings) + flake8_todos::rules::todos(indexer, locator, settings) .into_iter() .filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())), ); diff --git a/crates/ruff/src/codes.rs b/crates/ruff/src/codes.rs index 064fd722f4318..0fe8f7cb60b46 100644 --- a/crates/ruff/src/codes.rs +++ b/crates/ruff/src/codes.rs @@ -185,6 +185,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> { (Pylint, "R5501") => (RuleGroup::Unspecified, Rule::CollapsibleElseIf), (Pylint, "W0120") => (RuleGroup::Unspecified, Rule::UselessElseOnLoop), (Pylint, "W0129") => (RuleGroup::Unspecified, Rule::AssertOnStringLiteral), + (Pylint, "W0131") => (RuleGroup::Unspecified, Rule::NamedExprWithoutContext), (Pylint, "W0406") => (RuleGroup::Unspecified, Rule::ImportSelf), (Pylint, "W0602") => (RuleGroup::Unspecified, Rule::GlobalVariableNotAssigned), (Pylint, "W0603") => (RuleGroup::Unspecified, Rule::GlobalStatement), @@ -192,6 +193,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> { (Pylint, "W1508") => (RuleGroup::Unspecified, Rule::InvalidEnvvarDefault), (Pylint, "W2901") => (RuleGroup::Unspecified, Rule::RedefinedLoopName), (Pylint, "W3301") => (RuleGroup::Unspecified, Rule::NestedMinMax), + (Pylint, "W0130") => (RuleGroup::Unspecified, Rule::DuplicateValue), // flake8-async (Flake8Async, "100") => (RuleGroup::Unspecified, Rule::BlockingHttpCallInAsyncFunction), @@ -507,6 +509,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> { (Flake8Bandit, "506") => (RuleGroup::Unspecified, Rule::UnsafeYAMLLoad), (Flake8Bandit, "508") => (RuleGroup::Unspecified, Rule::SnmpInsecureVersion), (Flake8Bandit, "509") => (RuleGroup::Unspecified, Rule::SnmpWeakCryptography), + (Flake8Bandit, "601") => (RuleGroup::Unspecified, Rule::ParamikoCall), (Flake8Bandit, "602") => (RuleGroup::Unspecified, Rule::SubprocessPopenWithShellEqualsTrue), (Flake8Bandit, "603") => (RuleGroup::Unspecified, Rule::SubprocessWithoutShellEqualsTrue), (Flake8Bandit, "604") => (RuleGroup::Unspecified, Rule::CallWithShellEqualsTrue), @@ -580,6 +583,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> { (Flake8Pyi, "010") => (RuleGroup::Unspecified, Rule::NonEmptyStubBody), (Flake8Pyi, "011") => (RuleGroup::Unspecified, Rule::TypedArgumentDefaultInStub), (Flake8Pyi, "012") => (RuleGroup::Unspecified, Rule::PassInClassBody), + (Flake8Pyi, "013") => (RuleGroup::Unspecified, Rule::EllipsisInNonEmptyClassBody), (Flake8Pyi, "014") => (RuleGroup::Unspecified, Rule::ArgumentDefaultInStub), (Flake8Pyi, "015") => (RuleGroup::Unspecified, Rule::AssignmentDefaultInStub), (Flake8Pyi, "016") => (RuleGroup::Unspecified, Rule::DuplicateUnionMember), @@ -733,13 +737,13 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> { (Flynt, "002") => (RuleGroup::Unspecified, Rule::StaticJoinToFString), // flake8-todos - (Flake8Todo, "001") => (RuleGroup::Unspecified, Rule::InvalidTodoTag), - (Flake8Todo, "002") => (RuleGroup::Unspecified, Rule::MissingTodoAuthor), - (Flake8Todo, "003") => (RuleGroup::Unspecified, Rule::MissingTodoLink), - (Flake8Todo, "004") => (RuleGroup::Unspecified, Rule::MissingTodoColon), - (Flake8Todo, "005") => (RuleGroup::Unspecified, Rule::MissingTodoDescription), - (Flake8Todo, "006") => (RuleGroup::Unspecified, Rule::InvalidTodoCapitalization), - (Flake8Todo, "007") => (RuleGroup::Unspecified, Rule::MissingSpaceAfterTodoColon), + (Flake8Todos, "001") => (RuleGroup::Unspecified, Rule::InvalidTodoTag), + (Flake8Todos, "002") => (RuleGroup::Unspecified, Rule::MissingTodoAuthor), + (Flake8Todos, "003") => (RuleGroup::Unspecified, Rule::MissingTodoLink), + (Flake8Todos, "004") => (RuleGroup::Unspecified, Rule::MissingTodoColon), + (Flake8Todos, "005") => (RuleGroup::Unspecified, Rule::MissingTodoDescription), + (Flake8Todos, "006") => (RuleGroup::Unspecified, Rule::InvalidTodoCapitalization), + (Flake8Todos, "007") => (RuleGroup::Unspecified, Rule::MissingSpaceAfterTodoColon), _ => return None, }) diff --git a/crates/ruff/src/cst/matchers.rs b/crates/ruff/src/cst/matchers.rs index 00baadea98410..0df0590fcc65f 100644 --- a/crates/ruff/src/cst/matchers.rs +++ b/crates/ruff/src/cst/matchers.rs @@ -1,7 +1,9 @@ use anyhow::{bail, Result}; use libcst_native::{ - Attribute, Call, Comparison, Dict, Expr, Expression, Import, ImportAlias, ImportFrom, - ImportNames, Module, SimpleString, SmallStatement, Statement, + Arg, Attribute, Call, Comparison, CompoundStatement, Dict, Expression, FormattedString, + FormattedStringContent, FormattedStringExpression, FunctionDef, GeneratorExp, If, Import, + ImportAlias, ImportFrom, ImportNames, IndentedBlock, Lambda, ListComp, Module, Name, + SimpleString, SmallStatement, Statement, Suite, Tuple, With, }; pub(crate) fn match_module(module_text: &str) -> Result { @@ -18,20 +20,15 @@ pub(crate) fn match_expression(expression_text: &str) -> Result { } } -pub(crate) fn match_expr<'a, 'b>(module: &'a mut Module<'b>) -> Result<&'a mut Expr<'b>> { - if let Some(Statement::Simple(expr)) = module.body.first_mut() { - if let Some(SmallStatement::Expr(expr)) = expr.body.first_mut() { - Ok(expr) - } else { - bail!("Expected SmallStatement::Expr") - } - } else { - bail!("Expected Statement::Simple") +pub(crate) fn match_statement(statement_text: &str) -> Result { + match libcst_native::parse_statement(statement_text) { + Ok(statement) => Ok(statement), + Err(_) => bail!("Failed to extract statement from source"), } } -pub(crate) fn match_import<'a, 'b>(module: &'a mut Module<'b>) -> Result<&'a mut Import<'b>> { - if let Some(Statement::Simple(expr)) = module.body.first_mut() { +pub(crate) fn match_import<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut Import<'b>> { + if let Statement::Simple(expr) = statement { if let Some(SmallStatement::Import(expr)) = expr.body.first_mut() { Ok(expr) } else { @@ -43,9 +40,9 @@ pub(crate) fn match_import<'a, 'b>(module: &'a mut Module<'b>) -> Result<&'a mut } pub(crate) fn match_import_from<'a, 'b>( - module: &'a mut Module<'b>, + statement: &'a mut Statement<'b>, ) -> Result<&'a mut ImportFrom<'b>> { - if let Some(Statement::Simple(expr)) = module.body.first_mut() { + if let Statement::Simple(expr) = statement { if let Some(SmallStatement::ImportFrom(expr)) = expr.body.first_mut() { Ok(expr) } else { @@ -66,7 +63,17 @@ pub(crate) fn match_aliases<'a, 'b>( } } -pub(crate) fn match_call<'a, 'b>(expression: &'a mut Expression<'b>) -> Result<&'a mut Call<'b>> { +pub(crate) fn match_call<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Call<'b>> { + if let Expression::Call(call) = expression { + Ok(call) + } else { + bail!("Expected Expression::Call") + } +} + +pub(crate) fn match_call_mut<'a, 'b>( + expression: &'a mut Expression<'b>, +) -> Result<&'a mut Call<'b>> { if let Expression::Call(call) = expression { Ok(call) } else { @@ -111,3 +118,123 @@ pub(crate) fn match_simple_string<'a, 'b>( bail!("Expected Expression::SimpleString") } } + +pub(crate) fn match_formatted_string<'a, 'b>( + expression: &'a mut Expression<'b>, +) -> Result<&'a mut FormattedString<'b>> { + if let Expression::FormattedString(formatted_string) = expression { + Ok(formatted_string) + } else { + bail!("Expected Expression::FormattedString") + } +} + +pub(crate) fn match_formatted_string_expression<'a, 'b>( + formatted_string_content: &'a mut FormattedStringContent<'b>, +) -> Result<&'a mut FormattedStringExpression<'b>> { + if let FormattedStringContent::Expression(formatted_string_expression) = + formatted_string_content + { + Ok(formatted_string_expression) + } else { + bail!("Expected FormattedStringContent::Expression") + } +} + +pub(crate) fn match_name<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Name<'b>> { + if let Expression::Name(name) = expression { + Ok(name) + } else { + bail!("Expected Expression::Name") + } +} + +pub(crate) fn match_arg<'a, 'b>(call: &'a Call<'b>) -> Result<&'a Arg<'b>> { + if let Some(arg) = call.args.first() { + Ok(arg) + } else { + bail!("Expected Arg") + } +} + +pub(crate) fn match_generator_exp<'a, 'b>( + expression: &'a Expression<'b>, +) -> Result<&'a GeneratorExp<'b>> { + if let Expression::GeneratorExp(generator_exp) = expression { + Ok(generator_exp) + } else { + bail!("Expected Expression::GeneratorExp") + } +} + +pub(crate) fn match_tuple<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Tuple<'b>> { + if let Expression::Tuple(tuple) = expression { + Ok(tuple) + } else { + bail!("Expected Expression::Tuple") + } +} + +pub(crate) fn match_list_comp<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a ListComp<'b>> { + if let Expression::ListComp(list_comp) = expression { + Ok(list_comp) + } else { + bail!("Expected Expression::ListComp") + } +} + +pub(crate) fn match_lambda<'a, 'b>(expression: &'a Expression<'b>) -> Result<&'a Lambda<'b>> { + if let Expression::Lambda(lambda) = expression { + Ok(lambda) + } else { + bail!("Expected Expression::Lambda") + } +} + +pub(crate) fn match_function_def<'a, 'b>( + statement: &'a mut Statement<'b>, +) -> Result<&'a mut FunctionDef<'b>> { + if let Statement::Compound(compound) = statement { + if let CompoundStatement::FunctionDef(function_def) = compound { + Ok(function_def) + } else { + bail!("Expected CompoundStatement::FunctionDef") + } + } else { + bail!("Expected Statement::Compound") + } +} + +pub(crate) fn match_indented_block<'a, 'b>( + suite: &'a mut Suite<'b>, +) -> Result<&'a mut IndentedBlock<'b>> { + if let Suite::IndentedBlock(indented_block) = suite { + Ok(indented_block) + } else { + bail!("Expected Suite::IndentedBlock") + } +} + +pub(crate) fn match_with<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut With<'b>> { + if let Statement::Compound(compound) = statement { + if let CompoundStatement::With(with) = compound { + Ok(with) + } else { + bail!("Expected CompoundStatement::With") + } + } else { + bail!("Expected Statement::Compound") + } +} + +pub(crate) fn match_if<'a, 'b>(statement: &'a mut Statement<'b>) -> Result<&'a mut If<'b>> { + if let Statement::Compound(compound) = statement { + if let CompoundStatement::If(if_) = compound { + Ok(if_) + } else { + bail!("Expected CompoundStatement::If") + } + } else { + bail!("Expected Statement::Compound") + } +} diff --git a/crates/ruff/src/directives.rs b/crates/ruff/src/directives.rs index 5ebf5c5a5667b..c626c5d8e2d09 100644 --- a/crates/ruff/src/directives.rs +++ b/crates/ruff/src/directives.rs @@ -83,11 +83,7 @@ pub fn extract_directives( } /// Extract a mapping from logical line to noqa line. -pub fn extract_noqa_line_for( - lxr: &[LexResult], - locator: &Locator, - indexer: &Indexer, -) -> NoqaMapping { +fn extract_noqa_line_for(lxr: &[LexResult], locator: &Locator, indexer: &Indexer) -> NoqaMapping { let mut string_mappings = Vec::new(); for (tok, range) in lxr.iter().flatten() { @@ -166,7 +162,7 @@ pub fn extract_noqa_line_for( } /// Extract a set of ranges over which to disable isort. -pub fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDirectives { +fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDirectives { let mut exclusions: Vec = Vec::default(); let mut splits: Vec = Vec::default(); let mut off: Option = None; diff --git a/crates/ruff/src/docstrings/mod.rs b/crates/ruff/src/docstrings/mod.rs index 6790264b9840b..b9df42d41598b 100644 --- a/crates/ruff/src/docstrings/mod.rs +++ b/crates/ruff/src/docstrings/mod.rs @@ -57,11 +57,6 @@ impl<'a> DocstringBody<'a> { self.range().start() } - #[inline] - pub(crate) fn end(self) -> TextSize { - self.range().end() - } - pub(crate) fn range(self) -> TextRange { self.docstring.body_range + self.docstring.start() } diff --git a/crates/ruff/src/docstrings/sections.rs b/crates/ruff/src/docstrings/sections.rs index 46ad1962a679d..5850389a7f116 100644 --- a/crates/ruff/src/docstrings/sections.rs +++ b/crates/ruff/src/docstrings/sections.rs @@ -1,15 +1,17 @@ -use ruff_python_ast::newlines::{StrExt, UniversalNewlineIterator}; -use ruff_text_size::{TextLen, TextRange, TextSize}; use std::fmt::{Debug, Formatter}; use std::iter::FusedIterator; + +use ruff_text_size::{TextLen, TextRange, TextSize}; use strum_macros::EnumIter; +use ruff_python_ast::newlines::{StrExt, UniversalNewlineIterator}; +use ruff_python_ast::whitespace; + use crate::docstrings::styles::SectionStyle; use crate::docstrings::{Docstring, DocstringBody}; -use ruff_python_ast::whitespace; #[derive(EnumIter, PartialEq, Eq, Debug, Clone, Copy)] -pub enum SectionKind { +pub(crate) enum SectionKind { Args, Arguments, Attention, @@ -48,7 +50,7 @@ pub enum SectionKind { } impl SectionKind { - pub fn from_str(s: &str) -> Option { + pub(crate) fn from_str(s: &str) -> Option { match s.to_ascii_lowercase().as_str() { "args" => Some(Self::Args), "arguments" => Some(Self::Arguments), @@ -89,7 +91,7 @@ impl SectionKind { } } - pub fn as_str(self) -> &'static str { + pub(crate) fn as_str(self) -> &'static str { match self { Self::Args => "Args", Self::Arguments => "Arguments", @@ -217,7 +219,7 @@ impl Debug for SectionContexts<'_> { } } -pub struct SectionContextsIter<'a> { +pub(crate) struct SectionContextsIter<'a> { docstring_body: DocstringBody<'a>, inner: std::slice::Iter<'a, SectionContextData>, } @@ -266,28 +268,24 @@ struct SectionContextData { summary_full_end: TextSize, } -pub struct SectionContext<'a> { +pub(crate) struct SectionContext<'a> { data: &'a SectionContextData, docstring_body: DocstringBody<'a>, } impl<'a> SectionContext<'a> { - pub fn is_last(&self) -> bool { - self.range().end() == self.docstring_body.end() - } - /// The `kind` of the section, e.g. [`SectionKind::Args`] or [`SectionKind::Returns`]. - pub const fn kind(&self) -> SectionKind { + pub(crate) const fn kind(&self) -> SectionKind { self.data.kind } /// The name of the section as it appears in the docstring, e.g. "Args" or "Returns". - pub fn section_name(&self) -> &'a str { + pub(crate) fn section_name(&self) -> &'a str { &self.docstring_body.as_str()[self.data.name_range] } /// Returns the rest of the summary line after the section name. - pub fn summary_after_section_name(&self) -> &'a str { + pub(crate) fn summary_after_section_name(&self) -> &'a str { &self.summary_line()[usize::from(self.data.name_range.end() - self.data.range.start())..] } @@ -296,17 +294,12 @@ impl<'a> SectionContext<'a> { } /// The absolute range of the section name - pub fn section_name_range(&self) -> TextRange { + pub(crate) fn section_name_range(&self) -> TextRange { self.data.name_range + self.offset() } - /// Summary range relative to the start of the document. Includes the trailing newline. - pub fn summary_full_range(&self) -> TextRange { - self.summary_full_range_relative() + self.offset() - } - /// The absolute range of the summary line, excluding any trailing newline character. - pub fn summary_range(&self) -> TextRange { + pub(crate) fn summary_range(&self) -> TextRange { TextRange::at(self.range().start(), self.summary_line().text_len()) } @@ -321,12 +314,12 @@ impl<'a> SectionContext<'a> { } /// The absolute range of the full-section. - pub fn range(&self) -> TextRange { + pub(crate) fn range(&self) -> TextRange { self.range_relative() + self.offset() } /// Summary line without the trailing newline characters - pub fn summary_line(&self) -> &'a str { + pub(crate) fn summary_line(&self) -> &'a str { let full_summary = &self.docstring_body.as_str()[self.summary_full_range_relative()]; let mut bytes = full_summary.bytes().rev(); @@ -347,14 +340,14 @@ impl<'a> SectionContext<'a> { } /// Returns the text of the last line of the previous section or an empty string if it is the first section. - pub fn previous_line(&self) -> Option<&'a str> { + pub(crate) fn previous_line(&self) -> Option<&'a str> { let previous = &self.docstring_body.as_str()[TextRange::up_to(self.range_relative().start())]; previous.universal_newlines().last().map(|l| l.as_str()) } /// Returns the lines belonging to this section after the summary line. - pub fn following_lines(&self) -> UniversalNewlineIterator<'a> { + pub(crate) fn following_lines(&self) -> UniversalNewlineIterator<'a> { let lines = self.following_lines_str(); UniversalNewlineIterator::with_offset(lines, self.offset() + self.data.summary_full_end) } @@ -369,7 +362,7 @@ impl<'a> SectionContext<'a> { } /// Returns the absolute range of the following lines. - pub fn following_range(&self) -> TextRange { + pub(crate) fn following_range(&self) -> TextRange { self.following_range_relative() + self.offset() } } diff --git a/crates/ruff/src/flake8_to_ruff/converter.rs b/crates/ruff/src/flake8_to_ruff/converter.rs index 8e54ae57bcfc7..5bccf6369199d 100644 --- a/crates/ruff/src/flake8_to_ruff/converter.rs +++ b/crates/ruff/src/flake8_to_ruff/converter.rs @@ -3,16 +3,14 @@ use std::collections::{HashMap, HashSet}; use anyhow::Result; use itertools::Itertools; -use super::external_config::ExternalConfig; -use super::plugin::Plugin; -use super::{parser, plugin}; +use crate::line_width::LineLength; use crate::registry::Linter; use crate::rule_selector::RuleSelector; use crate::rules::flake8_pytest_style::types::{ ParametrizeNameType, ParametrizeValuesRowType, ParametrizeValuesType, }; use crate::rules::flake8_quotes::settings::Quote; -use crate::rules::flake8_tidy_imports::relative_imports::Strictness; +use crate::rules::flake8_tidy_imports::settings::Strictness; use crate::rules::pydocstyle::settings::Convention; use crate::rules::{ flake8_annotations, flake8_bugbear, flake8_builtins, flake8_errmsg, flake8_pytest_style, @@ -23,6 +21,10 @@ use crate::settings::pyproject::Pyproject; use crate::settings::types::PythonVersion; use crate::warn_user; +use super::external_config::ExternalConfig; +use super::plugin::Plugin; +use super::{parser, plugin}; + const DEFAULT_SELECTORS: &[RuleSelector] = &[ RuleSelector::Linter(Linter::Pyflakes), RuleSelector::Linter(Linter::Pycodestyle), @@ -119,7 +121,9 @@ pub fn convert( options.builtins = Some(parser::parse_strings(value.as_ref())); } "max-line-length" | "max_line_length" => match value.parse::() { - Ok(line_length) => options.line_length = Some(line_length), + Ok(line_length) => { + options.line_length = Some(LineLength::from(line_length)); + } Err(e) => { warn_user!("Unable to parse '{key}' property: {e}"); } @@ -402,7 +406,7 @@ pub fn convert( // Extract any settings from the existing `pyproject.toml`. if let Some(black) = &external_config.black { if let Some(line_length) = &black.line_length { - options.line_length = Some(*line_length); + options.line_length = Some(LineLength::from(*line_length)); } if let Some(target_version) = &black.target_version { @@ -456,11 +460,10 @@ mod tests { use pep440_rs::VersionSpecifiers; use pretty_assertions::assert_eq; - use super::super::plugin::Plugin; - use super::convert; use crate::flake8_to_ruff::converter::DEFAULT_SELECTORS; use crate::flake8_to_ruff::pep621::Project; use crate::flake8_to_ruff::ExternalConfig; + use crate::line_width::LineLength; use crate::registry::Linter; use crate::rule_selector::RuleSelector; use crate::rules::pydocstyle::settings::Convention; @@ -469,6 +472,9 @@ mod tests { use crate::settings::pyproject::Pyproject; use crate::settings::types::PythonVersion; + use super::super::plugin::Plugin; + use super::convert; + fn default_options(plugins: impl IntoIterator) -> Options { Options { ignore: Some(vec![]), @@ -508,7 +514,7 @@ mod tests { Some(vec![]), )?; let expected = Pyproject::new(Options { - line_length: Some(100), + line_length: Some(LineLength::from(100)), ..default_options([]) }); assert_eq!(actual, expected); @@ -527,7 +533,7 @@ mod tests { Some(vec![]), )?; let expected = Pyproject::new(Options { - line_length: Some(100), + line_length: Some(LineLength::from(100)), ..default_options([]) }); assert_eq!(actual, expected); diff --git a/crates/ruff/src/flake8_to_ruff/mod.rs b/crates/ruff/src/flake8_to_ruff/mod.rs index 629b4831c2d9a..1b847b97d42b4 100644 --- a/crates/ruff/src/flake8_to_ruff/mod.rs +++ b/crates/ruff/src/flake8_to_ruff/mod.rs @@ -1,3 +1,8 @@ +pub use converter::convert; +pub use external_config::ExternalConfig; +pub use plugin::Plugin; +pub use pyproject::parse; + mod black; mod converter; mod external_config; @@ -6,8 +11,3 @@ mod parser; pub mod pep621; mod plugin; mod pyproject; - -pub use converter::convert; -pub use external_config::ExternalConfig; -pub use plugin::Plugin; -pub use pyproject::parse; diff --git a/crates/ruff/src/flake8_to_ruff/parser.rs b/crates/ruff/src/flake8_to_ruff/parser.rs index 8e71b8e2ab8b5..5c305aafcde65 100644 --- a/crates/ruff/src/flake8_to_ruff/parser.rs +++ b/crates/ruff/src/flake8_to_ruff/parser.rs @@ -195,12 +195,13 @@ pub(crate) fn collect_per_file_ignores( mod tests { use anyhow::Result; - use super::{parse_files_to_codes_mapping, parse_prefix_codes, parse_strings}; use crate::codes; use crate::registry::Linter; use crate::rule_selector::RuleSelector; use crate::settings::types::PatternPrefixPair; + use super::{parse_files_to_codes_mapping, parse_prefix_codes, parse_strings}; + #[test] fn it_parses_prefix_codes() { let actual = parse_prefix_codes(""); diff --git a/crates/ruff/src/importer/insertion.rs b/crates/ruff/src/importer/insertion.rs index 7b3258d6bf1e5..ca715a1d0ddd2 100644 --- a/crates/ruff/src/importer/insertion.rs +++ b/crates/ruff/src/importer/insertion.rs @@ -1,8 +1,8 @@ -use ruff_diagnostics::Edit; use ruff_text_size::TextSize; use rustpython_parser::ast::{Ranged, Stmt}; use rustpython_parser::{lexer, Mode, Tok}; +use ruff_diagnostics::Edit; use ruff_python_ast::helpers::is_docstring_stmt; use ruff_python_ast::source_code::{Locator, Stylist}; diff --git a/crates/ruff/src/importer/mod.rs b/crates/ruff/src/importer/mod.rs index 993751448a90b..7179459c5b2ae 100644 --- a/crates/ruff/src/importer/mod.rs +++ b/crates/ruff/src/importer/mod.rs @@ -1,15 +1,16 @@ //! Add and modify import statements to make module members available during fix execution. -use anyhow::Result; +use anyhow::{bail, Result}; use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute}; use ruff_text_size::TextSize; use rustpython_parser::ast::{self, Ranged, Stmt, Suite}; use ruff_diagnostics::Edit; -use ruff_python_ast::imports::AnyImport; +use ruff_python_ast::imports::{AnyImport, Import}; use ruff_python_ast::source_code::{Locator, Stylist}; +use ruff_python_semantic::model::SemanticModel; -use crate::cst::matchers::{match_aliases, match_import_from, match_module}; +use crate::cst::matchers::{match_aliases, match_import_from, match_statement}; use crate::importer::insertion::Insertion; mod insertion; @@ -40,14 +41,6 @@ impl<'a> Importer<'a> { self.ordered_imports.push(import); } - /// Return the import statement that precedes the given position, if any. - fn preceding_import(&self, at: TextSize) -> Option<&Stmt> { - self.ordered_imports - .partition_point(|stmt| stmt.start() < at) - .checked_sub(1) - .map(|idx| self.ordered_imports[idx]) - } - /// Add an import statement to import the given module. /// /// If there are no existing imports, the new import will be added at the top @@ -66,9 +59,123 @@ impl<'a> Importer<'a> { } } + /// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make + /// the symbol available in the current scope along with the bound name of the symbol. + /// + /// Attempts to reuse existing imports when possible. + pub(crate) fn get_or_import_symbol( + &self, + module: &str, + member: &str, + at: TextSize, + semantic_model: &SemanticModel, + ) -> Result<(Edit, String)> { + self.get_symbol(module, member, at, semantic_model)? + .map_or_else( + || self.import_symbol(module, member, at, semantic_model), + Ok, + ) + } + + /// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`]. + fn get_symbol( + &self, + module: &str, + member: &str, + at: TextSize, + semantic_model: &SemanticModel, + ) -> Result> { + // If the symbol is already available in the current scope, use it. + let Some((source, binding)) = semantic_model.resolve_qualified_import_name(module, member) else { + return Ok(None); + }; + + // The exception: the symbol source (i.e., the import statement) comes after the current + // location. For example, we could be generating an edit within a function, and the import + // could be defined in the module scope, but after the function definition. In this case, + // it's unclear whether we can use the symbol (the function could be called between the + // import and the current location, and thus the symbol would not be available). It's also + // unclear whether should add an import statement at the top of the file, since it could + // be shadowed between the import and the current location. + if source.start() > at { + bail!("Unable to use existing symbol `{binding}` due to late-import"); + } + + // We also add a no-op edit to force conflicts with any other fixes that might try to + // remove the import. Consider: + // + // ```py + // import sys + // + // quit() + // ``` + // + // Assume you omit this no-op edit. If you run Ruff with `unused-imports` and + // `sys-exit-alias` over this snippet, it will generate two fixes: (1) remove the unused + // `sys` import; and (2) replace `quit()` with `sys.exit()`, under the assumption that `sys` + // is already imported and available. + // + // By adding this no-op edit, we force the `unused-imports` fix to conflict with the + // `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass. + let import_edit = Edit::range_replacement( + self.locator.slice(source.range()).to_string(), + source.range(), + ); + Ok(Some((import_edit, binding))) + } + + /// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make + /// the symbol available in the current scope along with the bound name of the symbol. + /// + /// For example, assuming `module` is `"functools"` and `member` is `"lru_cache"`, this function + /// could return an [`Edit`] to add `import functools` to the top of the file, alongside with + /// the name on which the `lru_cache` symbol would be made available (`"functools.lru_cache"`). + fn import_symbol( + &self, + module: &str, + member: &str, + at: TextSize, + semantic_model: &SemanticModel, + ) -> Result<(Edit, String)> { + if let Some(stmt) = self.find_import_from(module, at) { + // Case 1: `from functools import lru_cache` is in scope, and we're trying to reference + // `functools.cache`; thus, we add `cache` to the import, and return `"cache"` as the + // bound name. + if semantic_model + .find_binding(member) + .map_or(true, |binding| binding.kind.is_builtin()) + { + let import_edit = self.add_member(stmt, member)?; + Ok((import_edit, member.to_string())) + } else { + bail!("Unable to insert `{member}` into scope due to name conflict") + } + } else { + // Case 2: No `functools` import is in scope; thus, we add `import functools`, and + // return `"functools.cache"` as the bound name. + if semantic_model + .find_binding(module) + .map_or(true, |binding| binding.kind.is_builtin()) + { + let import_edit = self.add_import(&AnyImport::Import(Import::module(module)), at); + Ok((import_edit, format!("{module}.{member}"))) + } else { + bail!("Unable to insert `{module}` into scope due to name conflict") + } + } + } + + /// Return the import statement that precedes the given position, if any. + fn preceding_import(&self, at: TextSize) -> Option<&Stmt> { + self.ordered_imports + .partition_point(|stmt| stmt.start() < at) + .checked_sub(1) + .map(|idx| self.ordered_imports[idx]) + } + /// Return the top-level [`Stmt`] that imports the given module using `Stmt::ImportFrom` /// preceding the given position, if any. - pub(crate) fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> { + fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> { let mut import_from = None; for stmt in &self.ordered_imports { if stmt.start() >= at { @@ -91,9 +198,9 @@ impl<'a> Importer<'a> { } /// Add the given member to an existing `Stmt::ImportFrom` statement. - pub(crate) fn add_member(&self, stmt: &Stmt, member: &str) -> Result { - let mut tree = match_module(self.locator.slice(stmt.range()))?; - let import_from = match_import_from(&mut tree)?; + fn add_member(&self, stmt: &Stmt, member: &str) -> Result { + let mut statement = match_statement(self.locator.slice(stmt.range()))?; + let import_from = match_import_from(&mut statement)?; let aliases = match_aliases(import_from)?; aliases.push(ImportAlias { name: NameOrAttribute::N(Box::new(Name { @@ -109,7 +216,7 @@ impl<'a> Importer<'a> { default_indent: self.stylist.indentation(), ..CodegenState::default() }; - tree.codegen(&mut state); + statement.codegen(&mut state); Ok(Edit::range_replacement(state.to_string(), stmt.range())) } } diff --git a/crates/ruff/src/jupyter/mod.rs b/crates/ruff/src/jupyter/mod.rs index 92ee1e8a03cfc..ce6b9ef3bcaca 100644 --- a/crates/ruff/src/jupyter/mod.rs +++ b/crates/ruff/src/jupyter/mod.rs @@ -1,7 +1,7 @@ //! Utils for reading and writing jupyter notebooks -mod notebook; -mod schema; - pub use notebook::*; pub use schema::*; + +mod notebook; +mod schema; diff --git a/crates/ruff/src/jupyter/notebook.rs b/crates/ruff/src/jupyter/notebook.rs index 85a97dd6e6b5d..81a75805d1a38 100644 --- a/crates/ruff/src/jupyter/notebook.rs +++ b/crates/ruff/src/jupyter/notebook.rs @@ -1,9 +1,9 @@ -use ruff_text_size::TextRange; use std::fs::File; use std::io::{BufReader, BufWriter}; use std::iter; use std::path::Path; +use ruff_text_size::TextRange; use serde::Serialize; use serde_json::error::Category; diff --git a/crates/ruff/src/lib.rs b/crates/ruff/src/lib.rs index e359ffdbd0d09..8c9e6772ce6ce 100644 --- a/crates/ruff/src/lib.rs +++ b/crates/ruff/src/lib.rs @@ -21,6 +21,7 @@ pub mod fs; mod importer; pub mod jupyter; mod lex; +pub mod line_width; pub mod linter; pub mod logging; pub mod message; diff --git a/crates/ruff/src/line_width.rs b/crates/ruff/src/line_width.rs new file mode 100644 index 0000000000000..8619b42aa36ac --- /dev/null +++ b/crates/ruff/src/line_width.rs @@ -0,0 +1,165 @@ +use serde::{Deserialize, Serialize}; +use unicode_width::UnicodeWidthChar; + +use ruff_macros::CacheKey; + +/// The length of a line of text that is considered too long. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, CacheKey)] +#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] +pub struct LineLength(usize); + +impl Default for LineLength { + /// The default line length. + fn default() -> Self { + Self(88) + } +} + +impl LineLength { + pub const fn get(&self) -> usize { + self.0 + } +} + +impl From for LineLength { + fn from(value: usize) -> Self { + Self(value) + } +} + +/// A measure of the width of a line of text. +/// +/// This is used to determine if a line is too long. +/// It should be compared to a [`LineLength`]. +#[derive(Clone, Copy, Debug)] +pub struct LineWidth { + /// The width of the line. + width: usize, + /// The column of the line. + /// This is used to calculate the width of tabs. + column: usize, + /// The tab size to use when calculating the width of tabs. + tab_size: TabSize, +} + +impl Default for LineWidth { + fn default() -> Self { + Self::new(TabSize::default()) + } +} + +impl PartialEq for LineWidth { + fn eq(&self, other: &Self) -> bool { + self.width == other.width + } +} + +impl Eq for LineWidth {} + +impl PartialOrd for LineWidth { + fn partial_cmp(&self, other: &Self) -> Option { + self.width.partial_cmp(&other.width) + } +} + +impl Ord for LineWidth { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.width.cmp(&other.width) + } +} + +impl LineWidth { + pub fn get(&self) -> usize { + self.width + } + + /// Creates a new `LineWidth` with the given tab size. + pub fn new(tab_size: TabSize) -> Self { + LineWidth { + width: 0, + column: 0, + tab_size, + } + } + + fn update(mut self, chars: impl Iterator) -> Self { + let tab_size: usize = self.tab_size.into(); + for c in chars { + match c { + '\t' => { + let tab_offset = tab_size - (self.column % tab_size); + self.width += tab_offset; + self.column += tab_offset; + } + '\n' | '\r' => { + self.width = 0; + self.column = 0; + } + _ => { + self.width += c.width().unwrap_or(0); + self.column += 1; + } + } + } + self + } + + /// Adds the given text to the line width. + #[must_use] + pub fn add_str(self, text: &str) -> Self { + self.update(text.chars()) + } + + /// Adds the given character to the line width. + #[must_use] + pub fn add_char(self, c: char) -> Self { + self.update(std::iter::once(c)) + } + + /// Adds the given width to the line width. + /// Also adds the given width to the column. + /// It is generally better to use [`LineWidth::add_str`] or [`LineWidth::add_char`]. + /// The width and column should be the same for the corresponding text. + /// Currently, this is only used to add spaces. + #[must_use] + pub fn add_width(mut self, width: usize) -> Self { + self.width += width; + self.column += width; + self + } +} + +impl PartialEq for LineWidth { + fn eq(&self, other: &LineLength) -> bool { + self.width == other.0 + } +} + +impl PartialOrd for LineWidth { + fn partial_cmp(&self, other: &LineLength) -> Option { + self.width.partial_cmp(&other.0) + } +} + +/// The size of a tab. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, CacheKey)] +#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] +pub struct TabSize(pub u8); + +impl Default for TabSize { + fn default() -> Self { + Self(4) + } +} + +impl From for TabSize { + fn from(tab_size: u8) -> Self { + Self(tab_size) + } +} + +impl From for usize { + fn from(tab_size: TabSize) -> Self { + tab_size.0 as usize + } +} diff --git a/crates/ruff/src/linter.rs b/crates/ruff/src/linter.rs index 88a6c7decefaa..79ed8fba701d4 100644 --- a/crates/ruff/src/linter.rs +++ b/crates/ruff/src/linter.rs @@ -98,7 +98,7 @@ pub fn check_path( .any(|rule_code| rule_code.lint_source().is_tokens()) { let is_stub = is_python_stub_file(path); - diagnostics.extend(check_tokens(locator, &tokens, settings, is_stub)); + diagnostics.extend(check_tokens(locator, indexer, &tokens, settings, is_stub)); } // Run the filesystem-based rules. diff --git a/crates/ruff/src/logging.rs b/crates/ruff/src/logging.rs index dbf719aa809f7..ede7c8f43bc0d 100644 --- a/crates/ruff/src/logging.rs +++ b/crates/ruff/src/logging.rs @@ -2,15 +2,17 @@ use std::fmt::{Display, Formatter, Write}; use std::path::Path; use std::sync::Mutex; -use crate::fs; use anyhow::Result; use colored::Colorize; use fern; use log::Level; use once_cell::sync::Lazy; -use ruff_python_ast::source_code::SourceCode; use rustpython_parser::{ParseError, ParseErrorType}; +use ruff_python_ast::source_code::SourceCode; + +use crate::fs; + pub(crate) static WARNINGS: Lazy>> = Lazy::new(Mutex::default); /// Warn a user once, with uniqueness determined by the given ID. diff --git a/crates/ruff/src/message/azure.rs b/crates/ruff/src/message/azure.rs index 2f2fd019db1b4..d5119faca03aa 100644 --- a/crates/ruff/src/message/azure.rs +++ b/crates/ruff/src/message/azure.rs @@ -1,7 +1,9 @@ +use std::io::Write; + +use ruff_python_ast::source_code::SourceLocation; + use crate::message::{Emitter, EmitterContext, Message}; use crate::registry::AsRule; -use ruff_python_ast::source_code::SourceLocation; -use std::io::Write; /// Generate error logging commands for Azure Pipelines format. /// See [documentation](https://learn.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash#logissue-log-an-error-or-warning) @@ -42,9 +44,10 @@ impl Emitter for AzureEmitter { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::AzureEmitter; - use insta::assert_snapshot; #[test] fn output() { diff --git a/crates/ruff/src/message/diff.rs b/crates/ruff/src/message/diff.rs index d3c56d7d24b6b..9676e200895bf 100644 --- a/crates/ruff/src/message/diff.rs +++ b/crates/ruff/src/message/diff.rs @@ -1,11 +1,14 @@ -use crate::message::Message; +use std::fmt::{Display, Formatter}; +use std::num::NonZeroUsize; + use colored::{Color, ColoredString, Colorize, Styles}; -use ruff_diagnostics::{Applicability, Fix}; -use ruff_python_ast::source_code::{OneIndexed, SourceFile}; use ruff_text_size::{TextRange, TextSize}; use similar::{ChangeTag, TextDiff}; -use std::fmt::{Display, Formatter}; -use std::num::NonZeroUsize; + +use ruff_diagnostics::{Applicability, Fix}; +use ruff_python_ast::source_code::{OneIndexed, SourceFile}; + +use crate::message::Message; /// Renders a diff that shows the code fixes. /// diff --git a/crates/ruff/src/message/github.rs b/crates/ruff/src/message/github.rs index ad7b064f27ef9..23ddae5d6701d 100644 --- a/crates/ruff/src/message/github.rs +++ b/crates/ruff/src/message/github.rs @@ -1,8 +1,10 @@ +use std::io::Write; + +use ruff_python_ast::source_code::SourceLocation; + use crate::fs::relativize_path; use crate::message::{Emitter, EmitterContext, Message}; use crate::registry::AsRule; -use ruff_python_ast::source_code::SourceLocation; -use std::io::Write; /// Generate error workflow command in GitHub Actions format. /// See: [GitHub documentation](https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message) @@ -57,9 +59,10 @@ impl Emitter for GithubEmitter { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::GithubEmitter; - use insta::assert_snapshot; #[test] fn output() { diff --git a/crates/ruff/src/message/gitlab.rs b/crates/ruff/src/message/gitlab.rs index 355bdf0dbdea2..8538245b391c8 100644 --- a/crates/ruff/src/message/gitlab.rs +++ b/crates/ruff/src/message/gitlab.rs @@ -1,14 +1,17 @@ -use crate::fs::{relativize_path, relativize_path_to}; -use crate::message::{Emitter, EmitterContext, Message}; -use crate::registry::AsRule; -use ruff_python_ast::source_code::SourceLocation; -use serde::ser::SerializeSeq; -use serde::{Serialize, Serializer}; -use serde_json::json; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::io::Write; +use serde::ser::SerializeSeq; +use serde::{Serialize, Serializer}; +use serde_json::json; + +use ruff_python_ast::source_code::SourceLocation; + +use crate::fs::{relativize_path, relativize_path_to}; +use crate::message::{Emitter, EmitterContext, Message}; +use crate::registry::AsRule; + /// Generate JSON with violations in GitLab CI format // https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool pub struct GitlabEmitter { @@ -122,9 +125,10 @@ fn fingerprint( #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::GitlabEmitter; - use insta::assert_snapshot; #[test] fn output() { diff --git a/crates/ruff/src/message/grouped.rs b/crates/ruff/src/message/grouped.rs index 444fd5563a5c5..44dc4b39c6753 100644 --- a/crates/ruff/src/message/grouped.rs +++ b/crates/ruff/src/message/grouped.rs @@ -1,3 +1,11 @@ +use std::fmt::{Display, Formatter}; +use std::io::Write; +use std::num::NonZeroUsize; + +use colored::Colorize; + +use ruff_python_ast::source_code::OneIndexed; + use crate::fs::relativize_path; use crate::jupyter::JupyterIndex; use crate::message::diff::calculate_print_width; @@ -5,11 +13,6 @@ use crate::message::text::{MessageCodeFrame, RuleCodeAndBody}; use crate::message::{ group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation, }; -use colored::Colorize; -use ruff_python_ast::source_code::OneIndexed; -use std::fmt::{Display, Formatter}; -use std::io::Write; -use std::num::NonZeroUsize; #[derive(Default)] pub struct GroupedEmitter { @@ -175,9 +178,10 @@ impl std::fmt::Write for PadAdapter<'_> { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::GroupedEmitter; - use insta::assert_snapshot; #[test] fn default() { diff --git a/crates/ruff/src/message/json.rs b/crates/ruff/src/message/json.rs index 623501bf23176..c3adda51ba3d8 100644 --- a/crates/ruff/src/message/json.rs +++ b/crates/ruff/src/message/json.rs @@ -1,11 +1,14 @@ -use crate::message::{Emitter, EmitterContext, Message}; -use crate::registry::AsRule; -use ruff_diagnostics::Edit; -use ruff_python_ast::source_code::SourceCode; +use std::io::Write; + use serde::ser::SerializeSeq; use serde::{Serialize, Serializer}; use serde_json::json; -use std::io::Write; + +use ruff_diagnostics::Edit; +use ruff_python_ast::source_code::SourceCode; + +use crate::message::{Emitter, EmitterContext, Message}; +use crate::registry::AsRule; #[derive(Default)] pub struct JsonEmitter; @@ -94,9 +97,10 @@ impl Serialize for ExpandedEdits<'_> { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::JsonEmitter; - use insta::assert_snapshot; #[test] fn output() { diff --git a/crates/ruff/src/message/junit.rs b/crates/ruff/src/message/junit.rs index 745e6c6f06caf..f910b7e6ed4b0 100644 --- a/crates/ruff/src/message/junit.rs +++ b/crates/ruff/src/message/junit.rs @@ -1,11 +1,14 @@ +use std::io::Write; +use std::path::Path; + +use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite}; + +use ruff_python_ast::source_code::SourceLocation; + use crate::message::{ group_messages_by_filename, Emitter, EmitterContext, Message, MessageWithLocation, }; use crate::registry::AsRule; -use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite}; -use ruff_python_ast::source_code::SourceLocation; -use std::io::Write; -use std::path::Path; #[derive(Default)] pub struct JunitEmitter; @@ -19,49 +22,60 @@ impl Emitter for JunitEmitter { ) -> anyhow::Result<()> { let mut report = Report::new("ruff"); - for (filename, messages) in group_messages_by_filename(messages) { - let mut test_suite = TestSuite::new(filename); + if messages.is_empty() { + let mut test_suite = TestSuite::new("ruff"); test_suite .extra .insert("package".to_string(), "org.ruff".to_string()); + let mut case = TestCase::new("No errors found", TestCaseStatus::success()); + case.set_classname("ruff"); + test_suite.add_test_case(case); + report.add_test_suite(test_suite); + } else { + for (filename, messages) in group_messages_by_filename(messages) { + let mut test_suite = TestSuite::new(filename); + test_suite + .extra + .insert("package".to_string(), "org.ruff".to_string()); - for message in messages { - let MessageWithLocation { - message, - start_location, - } = message; - let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure); - status.set_message(message.kind.body.clone()); - let location = if context.is_jupyter_notebook(message.filename()) { - // We can't give a reasonable location for the structured formats, - // so we show one that's clearly a fallback - SourceLocation::default() - } else { - start_location - }; + for message in messages { + let MessageWithLocation { + message, + start_location, + } = message; + let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure); + status.set_message(message.kind.body.clone()); + let location = if context.is_jupyter_notebook(message.filename()) { + // We can't give a reasonable location for the structured formats, + // so we show one that's clearly a fallback + SourceLocation::default() + } else { + start_location + }; - status.set_description(format!( - "line {row}, col {col}, {body}", - row = location.row, - col = location.column, - body = message.kind.body - )); - let mut case = TestCase::new( - format!("org.ruff.{}", message.kind.rule().noqa_code()), - status, - ); - let file_path = Path::new(filename); - let file_stem = file_path.file_stem().unwrap().to_str().unwrap(); - let classname = file_path.parent().unwrap().join(file_stem); - case.set_classname(classname.to_str().unwrap()); - case.extra - .insert("line".to_string(), location.row.to_string()); - case.extra - .insert("column".to_string(), location.column.to_string()); + status.set_description(format!( + "line {row}, col {col}, {body}", + row = location.row, + col = location.column, + body = message.kind.body + )); + let mut case = TestCase::new( + format!("org.ruff.{}", message.kind.rule().noqa_code()), + status, + ); + let file_path = Path::new(filename); + let file_stem = file_path.file_stem().unwrap().to_str().unwrap(); + let classname = file_path.parent().unwrap().join(file_stem); + case.set_classname(classname.to_str().unwrap()); + case.extra + .insert("line".to_string(), location.row.to_string()); + case.extra + .insert("column".to_string(), location.column.to_string()); - test_suite.add_test_case(case); + test_suite.add_test_case(case); + } + report.add_test_suite(test_suite); } - report.add_test_suite(test_suite); } report.serialize(writer)?; @@ -72,9 +86,10 @@ impl Emitter for JunitEmitter { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::JunitEmitter; - use insta::assert_snapshot; #[test] fn output() { diff --git a/crates/ruff/src/message/mod.rs b/crates/ruff/src/message/mod.rs index 82f063ad6c370..2765347e168ce 100644 --- a/crates/ruff/src/message/mod.rs +++ b/crates/ruff/src/message/mod.rs @@ -1,20 +1,12 @@ -mod azure; -mod diff; -mod github; -mod gitlab; -mod grouped; -mod json; -mod junit; -mod pylint; -mod text; - -use ruff_text_size::{TextRange, TextSize}; -use rustc_hash::FxHashMap; use std::cmp::Ordering; use std::collections::BTreeMap; use std::io::Write; use std::ops::Deref; +use ruff_text_size::{TextRange, TextSize}; +use rustc_hash::FxHashMap; + +use crate::jupyter::JupyterIndex; pub use azure::AzureEmitter; pub use github::GithubEmitter; pub use gitlab::GitlabEmitter; @@ -22,12 +14,19 @@ pub use grouped::GroupedEmitter; pub use json::JsonEmitter; pub use junit::JunitEmitter; pub use pylint::PylintEmitter; -pub use text::TextEmitter; - -use crate::jupyter::JupyterIndex; -use crate::registry::AsRule; use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix}; use ruff_python_ast::source_code::{SourceFile, SourceLocation}; +pub use text::TextEmitter; + +mod azure; +mod diff; +mod github; +mod gitlab; +mod grouped; +mod json; +mod junit; +mod pylint; +mod text; #[derive(Debug, PartialEq, Eq)] pub struct Message { @@ -76,11 +75,7 @@ impl Message { impl Ord for Message { fn cmp(&self, other: &Self) -> Ordering { - (self.filename(), self.start(), self.kind.rule()).cmp(&( - other.filename(), - other.start(), - other.kind.rule(), - )) + (&self.file, self.start()).cmp(&(&other.file, other.start())) } } @@ -152,13 +147,15 @@ impl<'a> EmitterContext<'a> { #[cfg(test)] mod tests { - use crate::message::{Emitter, EmitterContext, Message}; - use crate::rules::pyflakes::rules::{UndefinedName, UnusedImport, UnusedVariable}; - use ruff_diagnostics::{Diagnostic, Edit, Fix}; - use ruff_python_ast::source_code::SourceFileBuilder; use ruff_text_size::{TextRange, TextSize}; use rustc_hash::FxHashMap; + use ruff_diagnostics::{Diagnostic, Edit, Fix}; + use ruff_python_ast::source_code::SourceFileBuilder; + + use crate::message::{Emitter, EmitterContext, Message}; + use crate::rules::pyflakes::rules::{UndefinedName, UnusedImport, UnusedVariable}; + pub(super) fn create_messages() -> Vec { let fib = r#"import os diff --git a/crates/ruff/src/message/pylint.rs b/crates/ruff/src/message/pylint.rs index d456fa6e1083c..edede90422f16 100644 --- a/crates/ruff/src/message/pylint.rs +++ b/crates/ruff/src/message/pylint.rs @@ -1,8 +1,10 @@ +use std::io::Write; + +use ruff_python_ast::source_code::OneIndexed; + use crate::fs::relativize_path; use crate::message::{Emitter, EmitterContext, Message}; use crate::registry::AsRule; -use ruff_python_ast::source_code::OneIndexed; -use std::io::Write; /// Generate violations in Pylint format. /// See: [Flake8 documentation](https://flake8.pycqa.org/en/latest/internal/formatters.html#pylint-formatter) @@ -40,9 +42,10 @@ impl Emitter for PylintEmitter { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::PylintEmitter; - use insta::assert_snapshot; #[test] fn output() { diff --git a/crates/ruff/src/message/text.rs b/crates/ruff/src/message/text.rs index 23249499ed0a7..d467926a67d61 100644 --- a/crates/ruff/src/message/text.rs +++ b/crates/ruff/src/message/text.rs @@ -1,22 +1,29 @@ -use crate::fs::relativize_path; -use crate::message::diff::Diff; -use crate::message::{Emitter, EmitterContext, Message}; -use crate::registry::AsRule; +use std::borrow::Cow; +use std::fmt::{Display, Formatter}; +use std::io::Write; + use annotate_snippets::display_list::{DisplayList, FormatOptions}; use annotate_snippets::snippet::{Annotation, AnnotationType, Slice, Snippet, SourceAnnotation}; use bitflags::bitflags; use colored::Colorize; -use ruff_python_ast::source_code::{OneIndexed, SourceLocation}; use ruff_text_size::{TextRange, TextSize}; -use std::borrow::Cow; -use std::fmt::{Display, Formatter}; -use std::io::Write; + +use ruff_python_ast::source_code::{OneIndexed, SourceLocation}; + +use crate::fs::relativize_path; +use crate::line_width::{LineWidth, TabSize}; +use crate::message::diff::Diff; +use crate::message::{Emitter, EmitterContext, Message}; +use crate::registry::AsRule; bitflags! { #[derive(Default)] struct EmitterFlags: u8 { + /// Whether to show the fix status of a diagnostic. const SHOW_FIX_STATUS = 0b0000_0001; - const SHOW_FIX = 0b0000_0010; + /// Whether to show the diff of a fix, for diagnostics that have a fix. + const SHOW_FIX_DIFF = 0b0000_0010; + /// Whether to show the source code of a diagnostic. const SHOW_SOURCE = 0b0000_0100; } } @@ -35,8 +42,8 @@ impl TextEmitter { } #[must_use] - pub fn with_show_fix(mut self, show_fix: bool) -> Self { - self.flags.set(EmitterFlags::SHOW_FIX, show_fix); + pub fn with_show_fix_diff(mut self, show_fix_diff: bool) -> Self { + self.flags.set(EmitterFlags::SHOW_FIX_DIFF, show_fix_diff); self } @@ -101,7 +108,7 @@ impl Emitter for TextEmitter { writeln!(writer, "{}", MessageCodeFrame { message })?; } - if self.flags.contains(EmitterFlags::SHOW_FIX) { + if self.flags.contains(EmitterFlags::SHOW_FIX_DIFF) { if let Some(diff) = Diff::from_message(message) { writeln!(writer, "{diff}")?; } @@ -234,39 +241,35 @@ impl Display for MessageCodeFrame<'_> { } fn replace_whitespace(source: &str, annotation_range: TextRange) -> SourceCode { - static TAB_SIZE: u32 = 4; // TODO(jonathan): use `pycodestyle.tab-size` + static TAB_SIZE: TabSize = TabSize(4); // TODO(jonathan): use `tab-size` let mut result = String::new(); let mut last_end = 0; let mut range = annotation_range; - let mut column = 0; - - for (index, c) in source.chars().enumerate() { - match c { - '\t' => { - let tab_width = TAB_SIZE - column % TAB_SIZE; - column += tab_width; - - if index < usize::from(annotation_range.start()) { - range += TextSize::new(tab_width - 1); - } else if index < usize::from(annotation_range.end()) { - range = range.add_end(TextSize::new(tab_width - 1)); - } + let mut line_width = LineWidth::new(TAB_SIZE); - result.push_str(&source[last_end..index]); + for (index, c) in source.char_indices() { + let old_width = line_width.get(); + line_width = line_width.add_char(c); - for _ in 0..tab_width { - result.push(' '); - } + if matches!(c, '\t') { + // SAFETY: The difference is a value in the range [1..TAB_SIZE] which is guaranteed to be less than `u32`. + #[allow(clippy::cast_possible_truncation)] + let tab_width = (line_width.get() - old_width) as u32; - last_end = index + 1; + if index < usize::from(annotation_range.start()) { + range += TextSize::new(tab_width - 1); + } else if index < usize::from(annotation_range.end()) { + range = range.add_end(TextSize::new(tab_width - 1)); } - '\n' | '\r' => { - column = 0; - } - _ => { - column += 1; + + result.push_str(&source[last_end..index]); + + for _ in 0..tab_width { + result.push(' '); } + + last_end = index + 1; } } @@ -292,9 +295,10 @@ struct SourceCode<'a> { #[cfg(test)] mod tests { + use insta::assert_snapshot; + use crate::message::tests::{capture_emitter_output, create_messages}; use crate::message::TextEmitter; - use insta::assert_snapshot; #[test] fn default() { diff --git a/crates/ruff/src/noqa.rs b/crates/ruff/src/noqa.rs index 808ea4ee19126..9dbf993ae3fe6 100644 --- a/crates/ruff/src/noqa.rs +++ b/crates/ruff/src/noqa.rs @@ -24,7 +24,6 @@ static NOQA_LINE_REGEX: Lazy = Lazy::new(|| { ) .unwrap() }); -static SPLIT_COMMA_REGEX: Lazy = Lazy::new(|| Regex::new(r"[,\s]").unwrap()); #[derive(Debug)] pub(crate) enum Directive<'a> { @@ -46,12 +45,12 @@ pub(crate) fn extract_noqa_directive<'a>(range: TextRange, locator: &'a Locator) caps.name("trailing_spaces"), ) { (Some(leading_spaces), Some(noqa), Some(codes), Some(trailing_spaces)) => { - let codes: Vec<&str> = SPLIT_COMMA_REGEX - .split(codes.as_str().trim()) + let codes = codes + .as_str() + .split(|c: char| c.is_whitespace() || c == ',') .map(str::trim) .filter(|code| !code.is_empty()) - .collect(); - + .collect_vec(); let start = range.start() + TextSize::try_from(noqa.start()).unwrap(); if codes.is_empty() { #[allow(deprecated)] @@ -105,11 +104,11 @@ fn parse_file_exemption(line: &str) -> ParsedExemption { if remainder.is_empty() { return ParsedExemption::All; } else if let Some(codes) = remainder.strip_prefix(':') { - let codes: Vec<&str> = SPLIT_COMMA_REGEX - .split(codes.trim()) + let codes = codes + .split(|c: char| c.is_whitespace() || c == ',') .map(str::trim) .filter(|code| !code.is_empty()) - .collect(); + .collect_vec(); if codes.is_empty() { warn!("Expected rule codes on `noqa` directive: \"{line}\""); } diff --git a/crates/ruff/src/registry.rs b/crates/ruff/src/registry.rs index 2de02bde30020..bc7c6cdea214a 100644 --- a/crates/ruff/src/registry.rs +++ b/crates/ruff/src/registry.rs @@ -1,15 +1,15 @@ //! Registry of all [`Rule`] implementations. -mod rule_set; - use strum_macros::{AsRefStr, EnumIter}; use ruff_diagnostics::Violation; use ruff_macros::RuleNamespace; +pub use rule_set::{RuleSet, RuleSetIterator}; use crate::codes::{self, RuleCodePrefix}; use crate::rules; -pub use rule_set::{RuleSet, RuleSetIterator}; + +mod rule_set; ruff_macros::register_rules!( // pycodestyle errors @@ -159,7 +159,9 @@ ruff_macros::register_rules!( rules::pylint::rules::LoggingTooManyArgs, rules::pylint::rules::UnexpectedSpecialMethodSignature, rules::pylint::rules::NestedMinMax, + rules::pylint::rules::DuplicateValue, rules::pylint::rules::DuplicateBases, + rules::pylint::rules::NamedExprWithoutContext, // flake8-async rules::flake8_async::rules::BlockingHttpCallInAsyncFunction, rules::flake8_async::rules::OpenSleepOrSubprocessInAsyncFunction, @@ -228,8 +230,8 @@ ruff_macros::register_rules!( // mccabe rules::mccabe::rules::ComplexStructure, // flake8-tidy-imports - rules::flake8_tidy_imports::banned_api::BannedApi, - rules::flake8_tidy_imports::relative_imports::RelativeImports, + rules::flake8_tidy_imports::rules::BannedApi, + rules::flake8_tidy_imports::rules::RelativeImports, // flake8-return rules::flake8_return::rules::UnnecessaryReturnNone, rules::flake8_return::rules::ImplicitReturnValue, @@ -422,6 +424,7 @@ ruff_macros::register_rules!( rules::flake8_bandit::rules::HardcodedTempFile, rules::flake8_bandit::rules::HashlibInsecureHashFunction, rules::flake8_bandit::rules::Jinja2AutoescapeFalse, + rules::flake8_bandit::rules::ParamikoCall, rules::flake8_bandit::rules::LoggingConfigInsecureListen, rules::flake8_bandit::rules::RequestWithNoCertValidation, rules::flake8_bandit::rules::RequestWithoutTimeout, @@ -510,6 +513,7 @@ ruff_macros::register_rules!( rules::flake8_pyi::rules::BadVersionInfoComparison, rules::flake8_pyi::rules::DocstringInStub, rules::flake8_pyi::rules::DuplicateUnionMember, + rules::flake8_pyi::rules::EllipsisInNonEmptyClassBody, rules::flake8_pyi::rules::NonEmptyStubBody, rules::flake8_pyi::rules::PassInClassBody, rules::flake8_pyi::rules::PassStatementStubBody, @@ -808,7 +812,7 @@ pub enum Linter { Flake8UsePathlib, /// [flake8-todos](https://github.com/orsinium-labs/flake8-todos/) #[prefix = "TD"] - Flake8Todo, + Flake8Todos, /// [eradicate](https://pypi.org/project/eradicate/) #[prefix = "ERA"] Eradicate, @@ -999,6 +1003,7 @@ pub const INCOMPATIBLE_CODES: &[(Rule, Rule, &str); 2] = &[ #[cfg(test)] mod tests { use std::mem::size_of; + use strum::IntoEnumIterator; use super::{Linter, Rule, RuleNamespace}; diff --git a/crates/ruff/src/rules/eradicate/detection.rs b/crates/ruff/src/rules/eradicate/detection.rs index 71fa7a77b666e..be063c7353dcc 100644 --- a/crates/ruff/src/rules/eradicate/detection.rs +++ b/crates/ruff/src/rules/eradicate/detection.rs @@ -5,7 +5,7 @@ use rustpython_parser as parser; static ALLOWLIST_REGEX: Lazy = Lazy::new(|| { Regex::new( - r"^(?i)(?:pylint|pyright|noqa|nosec|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:)" + r"^(?i)(?:pylint|pyright|noqa|nosec|region|endregion|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:)" ).unwrap() }); static BRACKET_REGEX: Lazy = Lazy::new(|| Regex::new(r"^[()\[\]{}\s]+$").unwrap()); @@ -224,6 +224,11 @@ mod tests { assert!(!comment_contains_code("# noqa: A123", &[])); assert!(!comment_contains_code("# noqa:A123", &[])); assert!(!comment_contains_code("# nosec", &[])); + assert!(!comment_contains_code("# region", &[])); + assert!(!comment_contains_code("# endregion", &[])); + assert!(!comment_contains_code("# region.name", &[])); + assert!(!comment_contains_code("# region name", &[])); + assert!(!comment_contains_code("# region: name", &[])); assert!(!comment_contains_code("# fmt: on", &[])); assert!(!comment_contains_code("# fmt: off", &[])); assert!(!comment_contains_code("# fmt:on", &[])); diff --git a/crates/ruff/src/rules/eradicate/mod.rs b/crates/ruff/src/rules/eradicate/mod.rs index b355e2bda32ce..3f26ac808e20e 100644 --- a/crates/ruff/src/rules/eradicate/mod.rs +++ b/crates/ruff/src/rules/eradicate/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/eradicate/rules.rs b/crates/ruff/src/rules/eradicate/rules/commented_out_code.rs similarity index 56% rename from crates/ruff/src/rules/eradicate/rules.rs rename to crates/ruff/src/rules/eradicate/rules/commented_out_code.rs index 02b42b1d88aef..cc757b6c10226 100644 --- a/crates/ruff/src/rules/eradicate/rules.rs +++ b/crates/ruff/src/rules/eradicate/rules/commented_out_code.rs @@ -1,13 +1,11 @@ -use ruff_text_size::TextRange; - use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; -use ruff_python_ast::source_code::Locator; +use ruff_python_ast::source_code::{Indexer, Locator}; use crate::registry::Rule; use crate::settings::Settings; -use super::detection::comment_contains_code; +use super::super::detection::comment_contains_code; /// ## What it does /// Checks for commented-out Python code. @@ -47,24 +45,28 @@ fn is_standalone_comment(line: &str) -> bool { /// ERA001 pub(crate) fn commented_out_code( + indexer: &Indexer, locator: &Locator, - range: TextRange, settings: &Settings, -) -> Option { - let line = locator.full_lines(range); +) -> Vec { + let mut diagnostics = vec![]; + + for range in indexer.comment_ranges() { + let line = locator.full_lines(*range); - // Verify that the comment is on its own line, and that it contains code. - if is_standalone_comment(line) && comment_contains_code(line, &settings.task_tags[..]) { - let mut diagnostic = Diagnostic::new(CommentedOutCode, range); + // Verify that the comment is on its own line, and that it contains code. + if is_standalone_comment(line) && comment_contains_code(line, &settings.task_tags[..]) { + let mut diagnostic = Diagnostic::new(CommentedOutCode, *range); - if settings.rules.should_fix(Rule::CommentedOutCode) { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_deletion( - locator.full_lines_range(range), - ))); + if settings.rules.should_fix(Rule::CommentedOutCode) { + #[allow(deprecated)] + diagnostic.set_fix(Fix::unspecified(Edit::range_deletion( + locator.full_lines_range(*range), + ))); + } + diagnostics.push(diagnostic); } - Some(diagnostic) - } else { - None } + + diagnostics } diff --git a/crates/ruff/src/rules/eradicate/rules/mod.rs b/crates/ruff/src/rules/eradicate/rules/mod.rs new file mode 100644 index 0000000000000..8ec37813d9f35 --- /dev/null +++ b/crates/ruff/src/rules/eradicate/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use commented_out_code::{commented_out_code, CommentedOutCode}; + +mod commented_out_code; diff --git a/crates/ruff/src/rules/flake8_2020/helpers.rs b/crates/ruff/src/rules/flake8_2020/helpers.rs new file mode 100644 index 0000000000000..e5bb8fd4b60c5 --- /dev/null +++ b/crates/ruff/src/rules/flake8_2020/helpers.rs @@ -0,0 +1,8 @@ +use ruff_python_semantic::model::SemanticModel; +use rustpython_parser::ast::Expr; + +pub(super) fn is_sys(model: &SemanticModel, expr: &Expr, target: &str) -> bool { + model + .resolve_call_path(expr) + .map_or(false, |call_path| call_path.as_slice() == ["sys", target]) +} diff --git a/crates/ruff/src/rules/flake8_2020/mod.rs b/crates/ruff/src/rules/flake8_2020/mod.rs index b34b124e70938..35b09c2da83ce 100644 --- a/crates/ruff/src/rules/flake8_2020/mod.rs +++ b/crates/ruff/src/rules/flake8_2020/mod.rs @@ -1,4 +1,5 @@ //! Rules from [flake8-2020](https://pypi.org/project/flake8-2020/). +mod helpers; pub(crate) mod rules; #[cfg(test)] @@ -6,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_2020/rules.rs b/crates/ruff/src/rules/flake8_2020/rules/compare.rs similarity index 50% rename from crates/ruff/src/rules/flake8_2020/rules.rs rename to crates/ruff/src/rules/flake8_2020/rules/compare.rs index bd19cccd91e8f..9877be4fae83b 100644 --- a/crates/ruff/src/rules/flake8_2020/rules.rs +++ b/crates/ruff/src/rules/flake8_2020/rules/compare.rs @@ -7,25 +7,7 @@ use ruff_macros::{derive_message_formats, violation}; use crate::checkers::ast::Checker; use crate::registry::Rule; -#[violation] -pub struct SysVersionSlice3; - -impl Violation for SysVersionSlice3 { - #[derive_message_formats] - fn message(&self) -> String { - format!("`sys.version[:3]` referenced (python3.10), use `sys.version_info`") - } -} - -#[violation] -pub struct SysVersion2; - -impl Violation for SysVersion2 { - #[derive_message_formats] - fn message(&self) -> String { - format!("`sys.version[2]` referenced (python3.10), use `sys.version_info`") - } -} +use super::super::helpers::is_sys; #[violation] pub struct SysVersionCmpStr3; @@ -47,16 +29,6 @@ impl Violation for SysVersionInfo0Eq3 { } } -#[violation] -pub struct SixPY3; - -impl Violation for SixPY3 { - #[derive_message_formats] - fn message(&self) -> String { - format!("`six.PY3` referenced (python4), use `not six.PY2`") - } -} - #[violation] pub struct SysVersionInfo1CmpInt; @@ -83,16 +55,6 @@ impl Violation for SysVersionInfoMinorCmpInt { } } -#[violation] -pub struct SysVersion0; - -impl Violation for SysVersion0 { - #[derive_message_formats] - fn message(&self) -> String { - format!("`sys.version[0]` referenced (python10), use `sys.version_info`") - } -} - #[violation] pub struct SysVersionCmpStr10; @@ -103,80 +65,11 @@ impl Violation for SysVersionCmpStr10 { } } -#[violation] -pub struct SysVersionSlice1; - -impl Violation for SysVersionSlice1 { - #[derive_message_formats] - fn message(&self) -> String { - format!("`sys.version[:1]` referenced (python10), use `sys.version_info`") - } -} - -fn is_sys(checker: &Checker, expr: &Expr, target: &str) -> bool { - checker - .ctx - .resolve_call_path(expr) - .map_or(false, |call_path| call_path.as_slice() == ["sys", target]) -} - -/// YTT101, YTT102, YTT301, YTT303 -pub(crate) fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) { - if is_sys(checker, value, "version") { - match slice { - Expr::Slice(ast::ExprSlice { - lower: None, - upper: Some(upper), - step: None, - range: _, - }) => { - if let Expr::Constant(ast::ExprConstant { - value: Constant::Int(i), - .. - }) = upper.as_ref() - { - if *i == BigInt::from(1) - && checker.settings.rules.enabled(Rule::SysVersionSlice1) - { - checker - .diagnostics - .push(Diagnostic::new(SysVersionSlice1, value.range())); - } else if *i == BigInt::from(3) - && checker.settings.rules.enabled(Rule::SysVersionSlice3) - { - checker - .diagnostics - .push(Diagnostic::new(SysVersionSlice3, value.range())); - } - } - } - - Expr::Constant(ast::ExprConstant { - value: Constant::Int(i), - .. - }) => { - if *i == BigInt::from(2) && checker.settings.rules.enabled(Rule::SysVersion2) { - checker - .diagnostics - .push(Diagnostic::new(SysVersion2, value.range())); - } else if *i == BigInt::from(0) && checker.settings.rules.enabled(Rule::SysVersion0) - { - checker - .diagnostics - .push(Diagnostic::new(SysVersion0, value.range())); - } - } - - _ => {} - } - } -} - /// YTT103, YTT201, YTT203, YTT204, YTT302 pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &[Expr]) { match left { Expr::Subscript(ast::ExprSubscript { value, slice, .. }) - if is_sys(checker, value, "version_info") => + if is_sys(checker.semantic_model(), value, "version_info") => { if let Expr::Constant(ast::ExprConstant { value: Constant::Int(i), @@ -192,9 +85,7 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara })], ) = (ops, comparators) { - if *n == BigInt::from(3) - && checker.settings.rules.enabled(Rule::SysVersionInfo0Eq3) - { + if *n == BigInt::from(3) && checker.enabled(Rule::SysVersionInfo0Eq3) { checker .diagnostics .push(Diagnostic::new(SysVersionInfo0Eq3, left.range())); @@ -209,7 +100,7 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara })], ) = (ops, comparators) { - if checker.settings.rules.enabled(Rule::SysVersionInfo1CmpInt) { + if checker.enabled(Rule::SysVersionInfo1CmpInt) { checker .diagnostics .push(Diagnostic::new(SysVersionInfo1CmpInt, left.range())); @@ -220,7 +111,7 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara } Expr::Attribute(ast::ExprAttribute { value, attr, .. }) - if is_sys(checker, value, "version_info") && attr == "minor" => + if is_sys(checker.semantic_model(), value, "version_info") && attr == "minor" => { if let ( [Cmpop::Lt | Cmpop::LtE | Cmpop::Gt | Cmpop::GtE], @@ -230,11 +121,7 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara })], ) = (ops, comparators) { - if checker - .settings - .rules - .enabled(Rule::SysVersionInfoMinorCmpInt) - { + if checker.enabled(Rule::SysVersionInfoMinorCmpInt) { checker .diagnostics .push(Diagnostic::new(SysVersionInfoMinorCmpInt, left.range())); @@ -245,7 +132,7 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara _ => {} } - if is_sys(checker, left, "version") { + if is_sys(checker.semantic_model(), left, "version") { if let ( [Cmpop::Lt | Cmpop::LtE | Cmpop::Gt | Cmpop::GtE], [Expr::Constant(ast::ExprConstant { @@ -255,12 +142,12 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara ) = (ops, comparators) { if s.len() == 1 { - if checker.settings.rules.enabled(Rule::SysVersionCmpStr10) { + if checker.enabled(Rule::SysVersionCmpStr10) { checker .diagnostics .push(Diagnostic::new(SysVersionCmpStr10, left.range())); } - } else if checker.settings.rules.enabled(Rule::SysVersionCmpStr3) { + } else if checker.enabled(Rule::SysVersionCmpStr3) { checker .diagnostics .push(Diagnostic::new(SysVersionCmpStr3, left.range())); @@ -268,16 +155,3 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara } } } - -/// YTT202 -pub(crate) fn name_or_attribute(checker: &mut Checker, expr: &Expr) { - if checker - .ctx - .resolve_call_path(expr) - .map_or(false, |call_path| call_path.as_slice() == ["six", "PY3"]) - { - checker - .diagnostics - .push(Diagnostic::new(SixPY3, expr.range())); - } -} diff --git a/crates/ruff/src/rules/flake8_2020/rules/mod.rs b/crates/ruff/src/rules/flake8_2020/rules/mod.rs new file mode 100644 index 0000000000000..cb77bcc0ddda0 --- /dev/null +++ b/crates/ruff/src/rules/flake8_2020/rules/mod.rs @@ -0,0 +1,12 @@ +pub(crate) use compare::{ + compare, SysVersionCmpStr10, SysVersionCmpStr3, SysVersionInfo0Eq3, SysVersionInfo1CmpInt, + SysVersionInfoMinorCmpInt, +}; +pub(crate) use name_or_attribute::{name_or_attribute, SixPY3}; +pub(crate) use subscript::{ + subscript, SysVersion0, SysVersion2, SysVersionSlice1, SysVersionSlice3, +}; + +mod compare; +mod name_or_attribute; +mod subscript; diff --git a/crates/ruff/src/rules/flake8_2020/rules/name_or_attribute.rs b/crates/ruff/src/rules/flake8_2020/rules/name_or_attribute.rs new file mode 100644 index 0000000000000..d861abd262f51 --- /dev/null +++ b/crates/ruff/src/rules/flake8_2020/rules/name_or_attribute.rs @@ -0,0 +1,29 @@ +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct SixPY3; + +impl Violation for SixPY3 { + #[derive_message_formats] + fn message(&self) -> String { + format!("`six.PY3` referenced (python4), use `not six.PY2`") + } +} + +/// YTT202 +pub(crate) fn name_or_attribute(checker: &mut Checker, expr: &Expr) { + if checker + .semantic_model() + .resolve_call_path(expr) + .map_or(false, |call_path| call_path.as_slice() == ["six", "PY3"]) + { + checker + .diagnostics + .push(Diagnostic::new(SixPY3, expr.range())); + } +} diff --git a/crates/ruff/src/rules/flake8_2020/rules/subscript.rs b/crates/ruff/src/rules/flake8_2020/rules/subscript.rs new file mode 100644 index 0000000000000..b55a602423a6f --- /dev/null +++ b/crates/ruff/src/rules/flake8_2020/rules/subscript.rs @@ -0,0 +1,96 @@ +use num_bigint::BigInt; +use rustpython_parser::ast::{self, Constant, Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; +use crate::registry::Rule; +use crate::rules::flake8_2020::helpers::is_sys; + +#[violation] +pub struct SysVersionSlice3; + +impl Violation for SysVersionSlice3 { + #[derive_message_formats] + fn message(&self) -> String { + format!("`sys.version[:3]` referenced (python3.10), use `sys.version_info`") + } +} + +#[violation] +pub struct SysVersion2; + +impl Violation for SysVersion2 { + #[derive_message_formats] + fn message(&self) -> String { + format!("`sys.version[2]` referenced (python3.10), use `sys.version_info`") + } +} + +#[violation] +pub struct SysVersion0; + +impl Violation for SysVersion0 { + #[derive_message_formats] + fn message(&self) -> String { + format!("`sys.version[0]` referenced (python10), use `sys.version_info`") + } +} + +#[violation] +pub struct SysVersionSlice1; + +impl Violation for SysVersionSlice1 { + #[derive_message_formats] + fn message(&self) -> String { + format!("`sys.version[:1]` referenced (python10), use `sys.version_info`") + } +} + +/// YTT101, YTT102, YTT301, YTT303 +pub(crate) fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) { + if is_sys(checker.semantic_model(), value, "version") { + match slice { + Expr::Slice(ast::ExprSlice { + lower: None, + upper: Some(upper), + step: None, + range: _, + }) => { + if let Expr::Constant(ast::ExprConstant { + value: Constant::Int(i), + .. + }) = upper.as_ref() + { + if *i == BigInt::from(1) && checker.enabled(Rule::SysVersionSlice1) { + checker + .diagnostics + .push(Diagnostic::new(SysVersionSlice1, value.range())); + } else if *i == BigInt::from(3) && checker.enabled(Rule::SysVersionSlice3) { + checker + .diagnostics + .push(Diagnostic::new(SysVersionSlice3, value.range())); + } + } + } + + Expr::Constant(ast::ExprConstant { + value: Constant::Int(i), + .. + }) => { + if *i == BigInt::from(2) && checker.enabled(Rule::SysVersion2) { + checker + .diagnostics + .push(Diagnostic::new(SysVersion2, value.range())); + } else if *i == BigInt::from(0) && checker.enabled(Rule::SysVersion0) { + checker + .diagnostics + .push(Diagnostic::new(SysVersion0, value.range())); + } + } + + _ => {} + } + } +} diff --git a/crates/ruff/src/rules/flake8_annotations/helpers.rs b/crates/ruff/src/rules/flake8_annotations/helpers.rs index 6abc9efd4ca45..6813af958180a 100644 --- a/crates/ruff/src/rules/flake8_annotations/helpers.rs +++ b/crates/ruff/src/rules/flake8_annotations/helpers.rs @@ -3,8 +3,7 @@ use rustpython_parser::ast::{self, Arguments, Expr, Stmt}; use ruff_python_ast::cast; use ruff_python_semantic::analyze::visibility; use ruff_python_semantic::definition::{Definition, Member, MemberKind}; - -use crate::checkers::ast::Checker; +use ruff_python_semantic::model::SemanticModel; pub(super) fn match_function_def( stmt: &Stmt, @@ -37,14 +36,14 @@ pub(super) fn match_function_def( } /// Return the name of the function, if it's overloaded. -pub(crate) fn overloaded_name(checker: &Checker, definition: &Definition) -> Option { +pub(crate) fn overloaded_name(model: &SemanticModel, definition: &Definition) -> Option { if let Definition::Member(Member { kind: MemberKind::Function | MemberKind::NestedFunction | MemberKind::Method, stmt, .. }) = definition { - if visibility::is_overload(&checker.ctx, cast::decorator_list(stmt)) { + if visibility::is_overload(model, cast::decorator_list(stmt)) { let (name, ..) = match_function_def(stmt); Some(name.to_string()) } else { @@ -58,7 +57,7 @@ pub(crate) fn overloaded_name(checker: &Checker, definition: &Definition) -> Opt /// Return `true` if the definition is the implementation for an overloaded /// function. pub(crate) fn is_overload_impl( - checker: &Checker, + model: &SemanticModel, definition: &Definition, overloaded_name: &str, ) -> bool { @@ -68,7 +67,7 @@ pub(crate) fn is_overload_impl( .. }) = definition { - if visibility::is_overload(&checker.ctx, cast::decorator_list(stmt)) { + if visibility::is_overload(model, cast::decorator_list(stmt)) { false } else { let (name, ..) = match_function_def(stmt); diff --git a/crates/ruff/src/rules/flake8_annotations/mod.rs b/crates/ruff/src/rules/flake8_annotations/mod.rs index ecfab2d4be75a..9928a48fb1434 100644 --- a/crates/ruff/src/rules/flake8_annotations/mod.rs +++ b/crates/ruff/src/rules/flake8_annotations/mod.rs @@ -8,9 +8,9 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/flake8_annotations/rules.rs b/crates/ruff/src/rules/flake8_annotations/rules/definition.rs similarity index 89% rename from crates/ruff/src/rules/flake8_annotations/rules.rs rename to crates/ruff/src/rules/flake8_annotations/rules/definition.rs index 93a43f858e8f9..11bfd64781541 100644 --- a/crates/ruff/src/rules/flake8_annotations/rules.rs +++ b/crates/ruff/src/rules/flake8_annotations/rules/definition.rs @@ -8,13 +8,14 @@ use ruff_python_ast::{cast, helpers}; use ruff_python_semantic::analyze::visibility; use ruff_python_semantic::analyze::visibility::Visibility; use ruff_python_semantic::definition::{Definition, Member, MemberKind}; +use ruff_python_semantic::model::SemanticModel; use ruff_python_stdlib::typing::SIMPLE_MAGIC_RETURN_TYPES; use crate::checkers::ast::Checker; use crate::registry::{AsRule, Rule}; -use super::fixes; -use super::helpers::match_function_def; +use super::super::fixes; +use super::super::helpers::match_function_def; /// ## What it does /// Checks that function arguments have type annotations. @@ -430,7 +431,7 @@ fn is_none_returning(body: &[Stmt]) -> bool { /// ANN401 fn check_dynamically_typed( - checker: &Checker, + model: &SemanticModel, annotation: &Expr, func: F, diagnostics: &mut Vec, @@ -438,7 +439,7 @@ fn check_dynamically_typed( ) where F: FnOnce() -> String, { - if !is_overridden && checker.ctx.match_typing_expr(annotation, "Any") { + if !is_overridden && model.match_typing_expr(annotation, "Any") { diagnostics.push(Diagnostic::new( AnyType { name: func() }, annotation.range(), @@ -479,7 +480,7 @@ pub(crate) fn definition( // unless configured to suppress ANN* for declarations that are fully untyped. let mut diagnostics = Vec::new(); - let is_overridden = visibility::is_override(&checker.ctx, decorator_list); + let is_overridden = visibility::is_override(checker.semantic_model(), decorator_list); // ANN001, ANN401 for arg in args @@ -490,16 +491,20 @@ pub(crate) fn definition( .skip( // If this is a non-static method, skip `cls` or `self`. usize::from( - is_method && !visibility::is_staticmethod(&checker.ctx, cast::decorator_list(stmt)), + is_method + && !visibility::is_staticmethod( + checker.semantic_model(), + cast::decorator_list(stmt), + ), ), ) { // ANN401 for dynamically typed arguments if let Some(annotation) = &arg.annotation { has_any_typed_arg = true; - if checker.settings.rules.enabled(Rule::AnyType) { + if checker.enabled(Rule::AnyType) { check_dynamically_typed( - checker, + checker.semantic_model(), annotation, || arg.arg.to_string(), &mut diagnostics, @@ -510,11 +515,7 @@ pub(crate) fn definition( if !(checker.settings.flake8_annotations.suppress_dummy_args && checker.settings.dummy_variable_rgx.is_match(&arg.arg)) { - if checker - .settings - .rules - .enabled(Rule::MissingTypeFunctionArgument) - { + if checker.enabled(Rule::MissingTypeFunctionArgument) { diagnostics.push(Diagnostic::new( MissingTypeFunctionArgument { name: arg.arg.to_string(), @@ -531,10 +532,10 @@ pub(crate) fn definition( if let Some(expr) = &arg.annotation { has_any_typed_arg = true; if !checker.settings.flake8_annotations.allow_star_arg_any { - if checker.settings.rules.enabled(Rule::AnyType) { + if checker.enabled(Rule::AnyType) { let name = &arg.arg; check_dynamically_typed( - checker, + checker.semantic_model(), expr, || format!("*{name}"), &mut diagnostics, @@ -546,7 +547,7 @@ pub(crate) fn definition( if !(checker.settings.flake8_annotations.suppress_dummy_args && checker.settings.dummy_variable_rgx.is_match(&arg.arg)) { - if checker.settings.rules.enabled(Rule::MissingTypeArgs) { + if checker.enabled(Rule::MissingTypeArgs) { diagnostics.push(Diagnostic::new( MissingTypeArgs { name: arg.arg.to_string(), @@ -563,10 +564,10 @@ pub(crate) fn definition( if let Some(expr) = &arg.annotation { has_any_typed_arg = true; if !checker.settings.flake8_annotations.allow_star_arg_any { - if checker.settings.rules.enabled(Rule::AnyType) { + if checker.enabled(Rule::AnyType) { let name = &arg.arg; check_dynamically_typed( - checker, + checker.semantic_model(), expr, || format!("**{name}"), &mut diagnostics, @@ -578,7 +579,7 @@ pub(crate) fn definition( if !(checker.settings.flake8_annotations.suppress_dummy_args && checker.settings.dummy_variable_rgx.is_match(&arg.arg)) { - if checker.settings.rules.enabled(Rule::MissingTypeKwargs) { + if checker.enabled(Rule::MissingTypeKwargs) { diagnostics.push(Diagnostic::new( MissingTypeKwargs { name: arg.arg.to_string(), @@ -591,11 +592,14 @@ pub(crate) fn definition( } // ANN101, ANN102 - if is_method && !visibility::is_staticmethod(&checker.ctx, cast::decorator_list(stmt)) { + if is_method + && !visibility::is_staticmethod(checker.semantic_model(), cast::decorator_list(stmt)) + { if let Some(arg) = args.posonlyargs.first().or_else(|| args.args.first()) { if arg.annotation.is_none() { - if visibility::is_classmethod(&checker.ctx, cast::decorator_list(stmt)) { - if checker.settings.rules.enabled(Rule::MissingTypeCls) { + if visibility::is_classmethod(checker.semantic_model(), cast::decorator_list(stmt)) + { + if checker.enabled(Rule::MissingTypeCls) { diagnostics.push(Diagnostic::new( MissingTypeCls { name: arg.arg.to_string(), @@ -604,7 +608,7 @@ pub(crate) fn definition( )); } } else { - if checker.settings.rules.enabled(Rule::MissingTypeSelf) { + if checker.enabled(Rule::MissingTypeSelf) { diagnostics.push(Diagnostic::new( MissingTypeSelf { name: arg.arg.to_string(), @@ -622,9 +626,9 @@ pub(crate) fn definition( // ANN201, ANN202, ANN401 if let Some(expr) = &returns { has_typed_return = true; - if checker.settings.rules.enabled(Rule::AnyType) { + if checker.enabled(Rule::AnyType) { check_dynamically_typed( - checker, + checker.semantic_model(), expr, || name.to_string(), &mut diagnostics, @@ -636,12 +640,10 @@ pub(crate) fn definition( // (explicitly or implicitly). checker.settings.flake8_annotations.suppress_none_returning && is_none_returning(body) ) { - if is_method && visibility::is_classmethod(&checker.ctx, cast::decorator_list(stmt)) { - if checker - .settings - .rules - .enabled(Rule::MissingReturnTypeClassMethod) - { + if is_method + && visibility::is_classmethod(checker.semantic_model(), cast::decorator_list(stmt)) + { + if checker.enabled(Rule::MissingReturnTypeClassMethod) { diagnostics.push(Diagnostic::new( MissingReturnTypeClassMethod { name: name.to_string(), @@ -649,13 +651,10 @@ pub(crate) fn definition( helpers::identifier_range(stmt, checker.locator), )); } - } else if is_method && visibility::is_staticmethod(&checker.ctx, cast::decorator_list(stmt)) + } else if is_method + && visibility::is_staticmethod(checker.semantic_model(), cast::decorator_list(stmt)) { - if checker - .settings - .rules - .enabled(Rule::MissingReturnTypeStaticMethod) - { + if checker.enabled(Rule::MissingReturnTypeStaticMethod) { diagnostics.push(Diagnostic::new( MissingReturnTypeStaticMethod { name: name.to_string(), @@ -666,11 +665,7 @@ pub(crate) fn definition( } else if is_method && visibility::is_init(name) { // Allow omission of return annotation in `__init__` functions, as long as at // least one argument is typed. - if checker - .settings - .rules - .enabled(Rule::MissingReturnTypeSpecialMethod) - { + if checker.enabled(Rule::MissingReturnTypeSpecialMethod) { if !(checker.settings.flake8_annotations.mypy_init_return && has_any_typed_arg) { let mut diagnostic = Diagnostic::new( MissingReturnTypeSpecialMethod { @@ -688,11 +683,7 @@ pub(crate) fn definition( } } } else if is_method && visibility::is_magic(name) { - if checker - .settings - .rules - .enabled(Rule::MissingReturnTypeSpecialMethod) - { + if checker.enabled(Rule::MissingReturnTypeSpecialMethod) { let mut diagnostic = Diagnostic::new( MissingReturnTypeSpecialMethod { name: name.to_string(), @@ -713,11 +704,7 @@ pub(crate) fn definition( } else { match visibility { Visibility::Public => { - if checker - .settings - .rules - .enabled(Rule::MissingReturnTypeUndocumentedPublicFunction) - { + if checker.enabled(Rule::MissingReturnTypeUndocumentedPublicFunction) { diagnostics.push(Diagnostic::new( MissingReturnTypeUndocumentedPublicFunction { name: name.to_string(), @@ -727,11 +714,7 @@ pub(crate) fn definition( } } Visibility::Private => { - if checker - .settings - .rules - .enabled(Rule::MissingReturnTypePrivateFunction) - { + if checker.enabled(Rule::MissingReturnTypePrivateFunction) { diagnostics.push(Diagnostic::new( MissingReturnTypePrivateFunction { name: name.to_string(), diff --git a/crates/ruff/src/rules/flake8_annotations/rules/mod.rs b/crates/ruff/src/rules/flake8_annotations/rules/mod.rs new file mode 100644 index 0000000000000..b57c156b183dc --- /dev/null +++ b/crates/ruff/src/rules/flake8_annotations/rules/mod.rs @@ -0,0 +1,8 @@ +pub(crate) use definition::{ + definition, AnyType, MissingReturnTypeClassMethod, MissingReturnTypePrivateFunction, + MissingReturnTypeSpecialMethod, MissingReturnTypeStaticMethod, + MissingReturnTypeUndocumentedPublicFunction, MissingTypeArgs, MissingTypeCls, + MissingTypeFunctionArgument, MissingTypeKwargs, MissingTypeSelf, +}; + +mod definition; diff --git a/crates/ruff/src/rules/flake8_async/helpers.rs b/crates/ruff/src/rules/flake8_async/helpers.rs new file mode 100644 index 0000000000000..5bd8b0f2c5ec7 --- /dev/null +++ b/crates/ruff/src/rules/flake8_async/helpers.rs @@ -0,0 +1,18 @@ +use ruff_python_semantic::{ + model::SemanticModel, + scope::{FunctionDef, ScopeKind}, +}; + +/// Return `true` if the [`SemanticModel`] is inside an async function definition. +pub(crate) fn in_async_function(model: &SemanticModel) -> bool { + model + .scopes() + .find_map(|scope| { + if let ScopeKind::Function(FunctionDef { async_, .. }) = &scope.kind { + Some(*async_) + } else { + None + } + }) + .unwrap_or(false) +} diff --git a/crates/ruff/src/rules/flake8_async/mod.rs b/crates/ruff/src/rules/flake8_async/mod.rs index 8842526400e42..c5293dc043993 100644 --- a/crates/ruff/src/rules/flake8_async/mod.rs +++ b/crates/ruff/src/rules/flake8_async/mod.rs @@ -1,4 +1,5 @@ //! Rules from [flake8-async](https://pypi.org/project/flake8-async/). +mod helpers; pub(crate) mod rules; #[cfg(test)] diff --git a/crates/ruff/src/rules/flake8_async/rules.rs b/crates/ruff/src/rules/flake8_async/rules.rs deleted file mode 100644 index 69abff7291893..0000000000000 --- a/crates/ruff/src/rules/flake8_async/rules.rs +++ /dev/null @@ -1,225 +0,0 @@ -use rustpython_parser::ast; -use rustpython_parser::ast::{Expr, Ranged}; - -use ruff_diagnostics::{Diagnostic, Violation}; -use ruff_macros::{derive_message_formats, violation}; -use ruff_python_semantic::context::Context; -use ruff_python_semantic::scope::{FunctionDef, ScopeKind}; - -use crate::checkers::ast::Checker; - -/// ## What it does -/// Checks that async functions do not contain blocking HTTP calls. -/// -/// ## Why is this bad? -/// Blocking an async function via a blocking HTTP call will block the entire -/// event loop, preventing it from executing other tasks while waiting for the -/// HTTP response, negating the benefits of asynchronous programming. -/// -/// Instead of making a blocking HTTP call, use an asynchronous HTTP client -/// library such as `aiohttp` or `httpx`. -/// -/// ## Example -/// ```python -/// async def fetch(): -/// urllib.request.urlopen("https://example.com/foo/bar").read() -/// ``` -/// -/// Use instead: -/// ```python -/// async def fetch(): -/// async with aiohttp.ClientSession() as session: -/// async with session.get("https://example.com/foo/bar") as resp: -/// ... -/// ``` -#[violation] -pub struct BlockingHttpCallInAsyncFunction; - -impl Violation for BlockingHttpCallInAsyncFunction { - #[derive_message_formats] - fn message(&self) -> String { - format!("Async functions should not call blocking HTTP methods") - } -} - -const BLOCKING_HTTP_CALLS: &[&[&str]] = &[ - &["urllib", "request", "urlopen"], - &["httpx", "get"], - &["httpx", "post"], - &["httpx", "delete"], - &["httpx", "patch"], - &["httpx", "put"], - &["httpx", "head"], - &["httpx", "connect"], - &["httpx", "options"], - &["httpx", "trace"], - &["requests", "get"], - &["requests", "post"], - &["requests", "delete"], - &["requests", "patch"], - &["requests", "put"], - &["requests", "head"], - &["requests", "connect"], - &["requests", "options"], - &["requests", "trace"], -]; - -/// ASYNC100 -pub(crate) fn blocking_http_call(checker: &mut Checker, expr: &Expr) { - if in_async_function(&checker.ctx) { - if let Expr::Call(ast::ExprCall { func, .. }) = expr { - if let Some(call_path) = checker.ctx.resolve_call_path(func) { - if BLOCKING_HTTP_CALLS.contains(&call_path.as_slice()) { - checker.diagnostics.push(Diagnostic::new( - BlockingHttpCallInAsyncFunction, - func.range(), - )); - } - } - } - } -} - -/// ## What it does -/// Checks that async functions do not contain calls to `open`, `time.sleep`, -/// or `subprocess` methods. -/// -/// ## Why is this bad? -/// Blocking an async function via a blocking call will block the entire -/// event loop, preventing it from executing other tasks while waiting for the -/// call to complete, negating the benefits of asynchronous programming. -/// -/// Instead of making a blocking call, use an equivalent asynchronous library -/// or function. -/// -/// ## Example -/// ```python -/// async def foo(): -/// time.sleep(1000) -/// ``` -/// -/// Use instead: -/// ```python -/// async def foo(): -/// await asyncio.sleep(1000) -/// ``` -#[violation] -pub struct OpenSleepOrSubprocessInAsyncFunction; - -impl Violation for OpenSleepOrSubprocessInAsyncFunction { - #[derive_message_formats] - fn message(&self) -> String { - format!("Async functions should not call `open`, `time.sleep`, or `subprocess` methods") - } -} - -const OPEN_SLEEP_OR_SUBPROCESS_CALL: &[&[&str]] = &[ - &["", "open"], - &["time", "sleep"], - &["subprocess", "run"], - &["subprocess", "Popen"], - // Deprecated subprocess calls: - &["subprocess", "call"], - &["subprocess", "check_call"], - &["subprocess", "check_output"], - &["subprocess", "getoutput"], - &["subprocess", "getstatusoutput"], - &["os", "wait"], - &["os", "wait3"], - &["os", "wait4"], - &["os", "waitid"], - &["os", "waitpid"], -]; - -/// ASYNC101 -pub(crate) fn open_sleep_or_subprocess_call(checker: &mut Checker, expr: &Expr) { - if in_async_function(&checker.ctx) { - if let Expr::Call(ast::ExprCall { func, .. }) = expr { - if let Some(call_path) = checker.ctx.resolve_call_path(func) { - if OPEN_SLEEP_OR_SUBPROCESS_CALL.contains(&call_path.as_slice()) { - checker.diagnostics.push(Diagnostic::new( - OpenSleepOrSubprocessInAsyncFunction, - func.range(), - )); - } - } - } - } -} - -/// ## What it does -/// Checks that async functions do not contain calls to blocking synchronous -/// process calls via the `os` module. -/// -/// ## Why is this bad? -/// Blocking an async function via a blocking call will block the entire -/// event loop, preventing it from executing other tasks while waiting for the -/// call to complete, negating the benefits of asynchronous programming. -/// -/// Instead of making a blocking call, use an equivalent asynchronous library -/// or function. -/// -/// ## Example -/// ```python -/// async def foo(): -/// os.popen() -/// ``` -/// -/// Use instead: -/// ```python -/// def foo(): -/// os.popen() -/// ``` -#[violation] -pub struct BlockingOsCallInAsyncFunction; - -impl Violation for BlockingOsCallInAsyncFunction { - #[derive_message_formats] - fn message(&self) -> String { - format!("Async functions should not call synchronous `os` methods") - } -} - -const UNSAFE_OS_METHODS: &[&[&str]] = &[ - &["os", "popen"], - &["os", "posix_spawn"], - &["os", "posix_spawnp"], - &["os", "spawnl"], - &["os", "spawnle"], - &["os", "spawnlp"], - &["os", "spawnlpe"], - &["os", "spawnv"], - &["os", "spawnve"], - &["os", "spawnvp"], - &["os", "spawnvpe"], - &["os", "system"], -]; - -/// ASYNC102 -pub(crate) fn blocking_os_call(checker: &mut Checker, expr: &Expr) { - if in_async_function(&checker.ctx) { - if let Expr::Call(ast::ExprCall { func, .. }) = expr { - if let Some(call_path) = checker.ctx.resolve_call_path(func) { - if UNSAFE_OS_METHODS.contains(&call_path.as_slice()) { - checker - .diagnostics - .push(Diagnostic::new(BlockingOsCallInAsyncFunction, func.range())); - } - } - } - } -} - -/// Return `true` if the [`Context`] is inside an async function definition. -fn in_async_function(context: &Context) -> bool { - context - .scopes() - .find_map(|scope| { - if let ScopeKind::Function(FunctionDef { async_, .. }) = &scope.kind { - Some(*async_) - } else { - None - } - }) - .unwrap_or(false) -} diff --git a/crates/ruff/src/rules/flake8_async/rules/blocking_http_call.rs b/crates/ruff/src/rules/flake8_async/rules/blocking_http_call.rs new file mode 100644 index 0000000000000..457f696d4eaa3 --- /dev/null +++ b/crates/ruff/src/rules/flake8_async/rules/blocking_http_call.rs @@ -0,0 +1,83 @@ +use rustpython_parser::ast; +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +use super::super::helpers::in_async_function; + +/// ## What it does +/// Checks that async functions do not contain blocking HTTP calls. +/// +/// ## Why is this bad? +/// Blocking an async function via a blocking HTTP call will block the entire +/// event loop, preventing it from executing other tasks while waiting for the +/// HTTP response, negating the benefits of asynchronous programming. +/// +/// Instead of making a blocking HTTP call, use an asynchronous HTTP client +/// library such as `aiohttp` or `httpx`. +/// +/// ## Example +/// ```python +/// async def fetch(): +/// urllib.request.urlopen("https://example.com/foo/bar").read() +/// ``` +/// +/// Use instead: +/// ```python +/// async def fetch(): +/// async with aiohttp.ClientSession() as session: +/// async with session.get("https://example.com/foo/bar") as resp: +/// ... +/// ``` +#[violation] +pub struct BlockingHttpCallInAsyncFunction; + +impl Violation for BlockingHttpCallInAsyncFunction { + #[derive_message_formats] + fn message(&self) -> String { + format!("Async functions should not call blocking HTTP methods") + } +} + +const BLOCKING_HTTP_CALLS: &[&[&str]] = &[ + &["urllib", "request", "urlopen"], + &["httpx", "get"], + &["httpx", "post"], + &["httpx", "delete"], + &["httpx", "patch"], + &["httpx", "put"], + &["httpx", "head"], + &["httpx", "connect"], + &["httpx", "options"], + &["httpx", "trace"], + &["requests", "get"], + &["requests", "post"], + &["requests", "delete"], + &["requests", "patch"], + &["requests", "put"], + &["requests", "head"], + &["requests", "connect"], + &["requests", "options"], + &["requests", "trace"], +]; + +/// ASYNC100 +pub(crate) fn blocking_http_call(checker: &mut Checker, expr: &Expr) { + if in_async_function(checker.semantic_model()) { + if let Expr::Call(ast::ExprCall { func, .. }) = expr { + let call_path = checker.semantic_model().resolve_call_path(func); + let is_blocking = + call_path.map_or(false, |path| BLOCKING_HTTP_CALLS.contains(&path.as_slice())); + + if is_blocking { + checker.diagnostics.push(Diagnostic::new( + BlockingHttpCallInAsyncFunction, + func.range(), + )); + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_async/rules/blocking_os_call.rs b/crates/ruff/src/rules/flake8_async/rules/blocking_os_call.rs new file mode 100644 index 0000000000000..3d0fbcf567c2e --- /dev/null +++ b/crates/ruff/src/rules/flake8_async/rules/blocking_os_call.rs @@ -0,0 +1,75 @@ +use rustpython_parser::ast; +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +use super::super::helpers::in_async_function; + +/// ## What it does +/// Checks that async functions do not contain calls to blocking synchronous +/// process calls via the `os` module. +/// +/// ## Why is this bad? +/// Blocking an async function via a blocking call will block the entire +/// event loop, preventing it from executing other tasks while waiting for the +/// call to complete, negating the benefits of asynchronous programming. +/// +/// Instead of making a blocking call, use an equivalent asynchronous library +/// or function. +/// +/// ## Example +/// ```python +/// async def foo(): +/// os.popen() +/// ``` +/// +/// Use instead: +/// ```python +/// def foo(): +/// os.popen() +/// ``` +#[violation] +pub struct BlockingOsCallInAsyncFunction; + +impl Violation for BlockingOsCallInAsyncFunction { + #[derive_message_formats] + fn message(&self) -> String { + format!("Async functions should not call synchronous `os` methods") + } +} + +const UNSAFE_OS_METHODS: &[&[&str]] = &[ + &["os", "popen"], + &["os", "posix_spawn"], + &["os", "posix_spawnp"], + &["os", "spawnl"], + &["os", "spawnle"], + &["os", "spawnlp"], + &["os", "spawnlpe"], + &["os", "spawnv"], + &["os", "spawnve"], + &["os", "spawnvp"], + &["os", "spawnvpe"], + &["os", "system"], +]; + +/// ASYNC102 +pub(crate) fn blocking_os_call(checker: &mut Checker, expr: &Expr) { + if in_async_function(checker.semantic_model()) { + if let Expr::Call(ast::ExprCall { func, .. }) = expr { + let is_unsafe_os_method = checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |path| UNSAFE_OS_METHODS.contains(&path.as_slice())); + + if is_unsafe_os_method { + checker + .diagnostics + .push(Diagnostic::new(BlockingOsCallInAsyncFunction, func.range())); + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_async/rules/mod.rs b/crates/ruff/src/rules/flake8_async/rules/mod.rs new file mode 100644 index 0000000000000..0f6e8faaca1e6 --- /dev/null +++ b/crates/ruff/src/rules/flake8_async/rules/mod.rs @@ -0,0 +1,9 @@ +pub(crate) use blocking_http_call::{blocking_http_call, BlockingHttpCallInAsyncFunction}; +pub(crate) use blocking_os_call::{blocking_os_call, BlockingOsCallInAsyncFunction}; +pub(crate) use open_sleep_or_subprocess_call::{ + open_sleep_or_subprocess_call, OpenSleepOrSubprocessInAsyncFunction, +}; + +mod blocking_http_call; +mod blocking_os_call; +mod open_sleep_or_subprocess_call; diff --git a/crates/ruff/src/rules/flake8_async/rules/open_sleep_or_subprocess_call.rs b/crates/ruff/src/rules/flake8_async/rules/open_sleep_or_subprocess_call.rs new file mode 100644 index 0000000000000..6935a5a45be6a --- /dev/null +++ b/crates/ruff/src/rules/flake8_async/rules/open_sleep_or_subprocess_call.rs @@ -0,0 +1,81 @@ +use rustpython_parser::ast; +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +use super::super::helpers::in_async_function; + +/// ## What it does +/// Checks that async functions do not contain calls to `open`, `time.sleep`, +/// or `subprocess` methods. +/// +/// ## Why is this bad? +/// Blocking an async function via a blocking call will block the entire +/// event loop, preventing it from executing other tasks while waiting for the +/// call to complete, negating the benefits of asynchronous programming. +/// +/// Instead of making a blocking call, use an equivalent asynchronous library +/// or function. +/// +/// ## Example +/// ```python +/// async def foo(): +/// time.sleep(1000) +/// ``` +/// +/// Use instead: +/// ```python +/// async def foo(): +/// await asyncio.sleep(1000) +/// ``` +#[violation] +pub struct OpenSleepOrSubprocessInAsyncFunction; + +impl Violation for OpenSleepOrSubprocessInAsyncFunction { + #[derive_message_formats] + fn message(&self) -> String { + format!("Async functions should not call `open`, `time.sleep`, or `subprocess` methods") + } +} + +const OPEN_SLEEP_OR_SUBPROCESS_CALL: &[&[&str]] = &[ + &["", "open"], + &["time", "sleep"], + &["subprocess", "run"], + &["subprocess", "Popen"], + // Deprecated subprocess calls: + &["subprocess", "call"], + &["subprocess", "check_call"], + &["subprocess", "check_output"], + &["subprocess", "getoutput"], + &["subprocess", "getstatusoutput"], + &["os", "wait"], + &["os", "wait3"], + &["os", "wait4"], + &["os", "waitid"], + &["os", "waitpid"], +]; + +/// ASYNC101 +pub(crate) fn open_sleep_or_subprocess_call(checker: &mut Checker, expr: &Expr) { + if in_async_function(checker.semantic_model()) { + if let Expr::Call(ast::ExprCall { func, .. }) = expr { + let is_open_sleep_or_subprocess_call = checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |path| { + OPEN_SLEEP_OR_SUBPROCESS_CALL.contains(&path.as_slice()) + }); + + if is_open_sleep_or_subprocess_call { + checker.diagnostics.push(Diagnostic::new( + OpenSleepOrSubprocessInAsyncFunction, + func.range(), + )); + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_bandit/helpers.rs b/crates/ruff/src/rules/flake8_bandit/helpers.rs index 3158586e6b828..1de1cc92e16ac 100644 --- a/crates/ruff/src/rules/flake8_bandit/helpers.rs +++ b/crates/ruff/src/rules/flake8_bandit/helpers.rs @@ -2,7 +2,7 @@ use once_cell::sync::Lazy; use regex::Regex; use rustpython_parser::ast::{self, Constant, Expr}; -use crate::checkers::ast::Checker; +use ruff_python_semantic::model::SemanticModel; static PASSWORD_CANDIDATE_REGEX: Lazy = Lazy::new(|| { Regex::new(r"(^|_)(?i)(pas+wo?r?d|pass(phrase)?|pwd|token|secrete?)($|_)").unwrap() @@ -22,26 +22,20 @@ pub(crate) fn matches_password_name(string: &str) -> bool { PASSWORD_CANDIDATE_REGEX.is_match(string) } -pub(crate) fn is_untyped_exception(type_: Option<&Expr>, checker: &Checker) -> bool { +pub(crate) fn is_untyped_exception(type_: Option<&Expr>, model: &SemanticModel) -> bool { type_.map_or(true, |type_| { if let Expr::Tuple(ast::ExprTuple { elts, .. }) = &type_ { elts.iter().any(|type_| { - checker - .ctx - .resolve_call_path(type_) - .map_or(false, |call_path| { - call_path.as_slice() == ["", "Exception"] - || call_path.as_slice() == ["", "BaseException"] - }) - }) - } else { - checker - .ctx - .resolve_call_path(type_) - .map_or(false, |call_path| { + model.resolve_call_path(type_).map_or(false, |call_path| { call_path.as_slice() == ["", "Exception"] || call_path.as_slice() == ["", "BaseException"] }) + }) + } else { + model.resolve_call_path(type_).map_or(false, |call_path| { + call_path.as_slice() == ["", "Exception"] + || call_path.as_slice() == ["", "BaseException"] + }) } }) } diff --git a/crates/ruff/src/rules/flake8_bandit/mod.rs b/crates/ruff/src/rules/flake8_bandit/mod.rs index 91cd61a150715..fc7e2b6e28fcb 100644 --- a/crates/ruff/src/rules/flake8_bandit/mod.rs +++ b/crates/ruff/src/rules/flake8_bandit/mod.rs @@ -7,11 +7,10 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; @@ -43,6 +42,7 @@ mod tests { #[test_case(Rule::TryExceptContinue, Path::new("S112.py"); "S112")] #[test_case(Rule::TryExceptPass, Path::new("S110.py"); "S110")] #[test_case(Rule::UnsafeYAMLLoad, Path::new("S506.py"); "S506")] + #[test_case(Rule::ParamikoCall, Path::new("S601.py"); "S601")] fn rules(rule_code: Rule, path: &Path) -> Result<()> { let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy()); let diagnostics = test_path( diff --git a/crates/ruff/src/rules/flake8_bandit/rules/bad_file_permissions.rs b/crates/ruff/src/rules/flake8_bandit/rules/bad_file_permissions.rs index 95e0d898071db..7bcf81dff6b48 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/bad_file_permissions.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/bad_file_permissions.rs @@ -108,7 +108,7 @@ pub(crate) fn bad_file_permissions( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["os", "chmod"]) { diff --git a/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_bind_all_interfaces.rs b/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_bind_all_interfaces.rs index 47057ee468263..86f68e10b8c42 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_bind_all_interfaces.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_bind_all_interfaces.rs @@ -1,6 +1,7 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; #[violation] pub struct HardcodedBindAllInterfaces; diff --git a/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_sql_expression.rs b/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_sql_expression.rs index b0b6e60c22c0d..4595dce6c73d7 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_sql_expression.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/hardcoded_sql_expression.rs @@ -60,7 +60,7 @@ fn unparse_string_format_expression(checker: &mut Checker, expr: &Expr) -> Optio op: Operator::Add | Operator::Mod, .. }) => { - let Some(parent) = checker.ctx.expr_parent() else { + let Some(parent) = checker.semantic_model().expr_parent() else { if any_over_expr(expr, &has_string_literal) { return Some(checker.generator().expr(expr)); } diff --git a/crates/ruff/src/rules/flake8_bandit/rules/hashlib_insecure_hash_functions.rs b/crates/ruff/src/rules/flake8_bandit/rules/hashlib_insecure_hash_functions.rs index ff6dcd88a995c..6948f6323b310 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/hashlib_insecure_hash_functions.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/hashlib_insecure_hash_functions.rs @@ -48,16 +48,21 @@ pub(crate) fn hashlib_insecure_hash_functions( args: &[Expr], keywords: &[Keyword], ) { - if let Some(hashlib_call) = checker.ctx.resolve_call_path(func).and_then(|call_path| { - if call_path.as_slice() == ["hashlib", "new"] { - Some(HashlibCall::New) - } else { - WEAK_HASHES - .iter() - .find(|hash| call_path.as_slice() == ["hashlib", hash]) - .map(|hash| HashlibCall::WeakHash(hash)) - } - }) { + if let Some(hashlib_call) = + checker + .semantic_model() + .resolve_call_path(func) + .and_then(|call_path| { + if call_path.as_slice() == ["hashlib", "new"] { + Some(HashlibCall::New) + } else { + WEAK_HASHES + .iter() + .find(|hash| call_path.as_slice() == ["hashlib", hash]) + .map(|hash| HashlibCall::WeakHash(hash)) + } + }) + { match hashlib_call { HashlibCall::New => { let call_args = SimpleCallArgs::new(args, keywords); diff --git a/crates/ruff/src/rules/flake8_bandit/rules/jinja2_autoescape_false.rs b/crates/ruff/src/rules/flake8_bandit/rules/jinja2_autoescape_false.rs index 2855cd5022fdc..06a2e770812b8 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/jinja2_autoescape_false.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/jinja2_autoescape_false.rs @@ -37,7 +37,7 @@ pub(crate) fn jinja2_autoescape_false( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["jinja2", "Environment"] diff --git a/crates/ruff/src/rules/flake8_bandit/rules/logging_config_insecure_listen.rs b/crates/ruff/src/rules/flake8_bandit/rules/logging_config_insecure_listen.rs index 762e6093f9880..6fc645bb88119 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/logging_config_insecure_listen.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/logging_config_insecure_listen.rs @@ -24,7 +24,7 @@ pub(crate) fn logging_config_insecure_listen( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["logging", "config", "listen"] diff --git a/crates/ruff/src/rules/flake8_bandit/rules/mod.rs b/crates/ruff/src/rules/flake8_bandit/rules/mod.rs index a7a3f4f92e8b5..5653a946e6f31 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/mod.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/mod.rs @@ -20,6 +20,7 @@ pub(crate) use jinja2_autoescape_false::{jinja2_autoescape_false, Jinja2Autoesca pub(crate) use logging_config_insecure_listen::{ logging_config_insecure_listen, LoggingConfigInsecureListen, }; +pub(crate) use paramiko_calls::{paramiko_call, ParamikoCall}; pub(crate) use request_with_no_cert_validation::{ request_with_no_cert_validation, RequestWithNoCertValidation, }; @@ -57,6 +58,7 @@ mod hardcoded_tmp_directory; mod hashlib_insecure_hash_functions; mod jinja2_autoescape_false; mod logging_config_insecure_listen; +mod paramiko_calls; mod request_with_no_cert_validation; mod request_without_timeout; mod shell_injection; diff --git a/crates/ruff/src/rules/flake8_bandit/rules/paramiko_calls.rs b/crates/ruff/src/rules/flake8_bandit/rules/paramiko_calls.rs new file mode 100644 index 0000000000000..e340a09ca4d41 --- /dev/null +++ b/crates/ruff/src/rules/flake8_bandit/rules/paramiko_calls.rs @@ -0,0 +1,31 @@ +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct ParamikoCall; + +impl Violation for ParamikoCall { + #[derive_message_formats] + fn message(&self) -> String { + format!("Possible shell injection via Paramiko call; check inputs are properly sanitized") + } +} + +/// S601 +pub(crate) fn paramiko_call(checker: &mut Checker, func: &Expr) { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["paramiko", "exec_command"] + }) + { + checker + .diagnostics + .push(Diagnostic::new(ParamikoCall, func.range())); + } +} diff --git a/crates/ruff/src/rules/flake8_bandit/rules/request_with_no_cert_validation.rs b/crates/ruff/src/rules/flake8_bandit/rules/request_with_no_cert_validation.rs index 9870928984c36..fe45fb6e769a7 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/request_with_no_cert_validation.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/request_with_no_cert_validation.rs @@ -43,17 +43,21 @@ pub(crate) fn request_with_no_cert_validation( args: &[Expr], keywords: &[Keyword], ) { - if let Some(target) = checker.ctx.resolve_call_path(func).and_then(|call_path| { - if call_path.len() == 2 { - if call_path[0] == "requests" && REQUESTS_HTTP_VERBS.contains(&call_path[1]) { - return Some("requests"); + if let Some(target) = checker + .semantic_model() + .resolve_call_path(func) + .and_then(|call_path| { + if call_path.len() == 2 { + if call_path[0] == "requests" && REQUESTS_HTTP_VERBS.contains(&call_path[1]) { + return Some("requests"); + } + if call_path[0] == "httpx" && HTTPX_METHODS.contains(&call_path[1]) { + return Some("httpx"); + } } - if call_path[0] == "httpx" && HTTPX_METHODS.contains(&call_path[1]) { - return Some("httpx"); - } - } - None - }) { + None + }) + { let call_args = SimpleCallArgs::new(args, keywords); if let Some(verify_arg) = call_args.keyword_argument("verify") { if let Expr::Constant(ast::ExprConstant { diff --git a/crates/ruff/src/rules/flake8_bandit/rules/request_without_timeout.rs b/crates/ruff/src/rules/flake8_bandit/rules/request_without_timeout.rs index fee08cd188292..b08edbd4faf22 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/request_without_timeout.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/request_without_timeout.rs @@ -34,7 +34,7 @@ pub(crate) fn request_without_timeout( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { HTTP_VERBS diff --git a/crates/ruff/src/rules/flake8_bandit/rules/shell_injection.rs b/crates/ruff/src/rules/flake8_bandit/rules/shell_injection.rs index b571173c19745..5be6818183686 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/shell_injection.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/shell_injection.rs @@ -7,7 +7,7 @@ use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::helpers::Truthiness; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::{ checkers::ast::Checker, registry::Rule, rules::flake8_bandit::helpers::string_literal, @@ -97,8 +97,8 @@ enum CallKind { } /// Return the [`CallKind`] of the given function call. -fn get_call_kind(func: &Expr, context: &Context) -> Option { - context +fn get_call_kind(func: &Expr, model: &SemanticModel) -> Option { + model .resolve_call_path(func) .and_then(|call_path| match call_path.as_slice() { &[module, submodule] => match module { @@ -138,12 +138,15 @@ struct ShellKeyword<'a> { } /// Return the `shell` keyword argument to the given function call, if any. -fn find_shell_keyword<'a>(ctx: &Context, keywords: &'a [Keyword]) -> Option> { +fn find_shell_keyword<'a>( + model: &SemanticModel, + keywords: &'a [Keyword], +) -> Option> { keywords .iter() .find(|keyword| keyword.arg.as_ref().map_or(false, |arg| arg == "shell")) .map(|keyword| ShellKeyword { - truthiness: Truthiness::from_expr(&keyword.value, |id| ctx.is_builtin(id)), + truthiness: Truthiness::from_expr(&keyword.value, |id| model.is_builtin(id)), keyword, }) } @@ -181,21 +184,17 @@ pub(crate) fn shell_injection( args: &[Expr], keywords: &[Keyword], ) { - let call_kind = get_call_kind(func, &checker.ctx); + let call_kind = get_call_kind(func, checker.semantic_model()); if matches!(call_kind, Some(CallKind::Subprocess)) { if let Some(arg) = args.first() { - match find_shell_keyword(&checker.ctx, keywords) { + match find_shell_keyword(checker.semantic_model(), keywords) { // S602 Some(ShellKeyword { truthiness: Truthiness::Truthy, keyword, }) => { - if checker - .settings - .rules - .enabled(Rule::SubprocessPopenWithShellEqualsTrue) - { + if checker.enabled(Rule::SubprocessPopenWithShellEqualsTrue) { checker.diagnostics.push(Diagnostic::new( SubprocessPopenWithShellEqualsTrue { seems_safe: shell_call_seems_safe(arg), @@ -209,11 +208,7 @@ pub(crate) fn shell_injection( truthiness: Truthiness::Falsey | Truthiness::Unknown, keyword, }) => { - if checker - .settings - .rules - .enabled(Rule::SubprocessWithoutShellEqualsTrue) - { + if checker.enabled(Rule::SubprocessWithoutShellEqualsTrue) { checker.diagnostics.push(Diagnostic::new( SubprocessWithoutShellEqualsTrue, keyword.range(), @@ -222,11 +217,7 @@ pub(crate) fn shell_injection( } // S603 None => { - if checker - .settings - .rules - .enabled(Rule::SubprocessWithoutShellEqualsTrue) - { + if checker.enabled(Rule::SubprocessWithoutShellEqualsTrue) { checker.diagnostics.push(Diagnostic::new( SubprocessWithoutShellEqualsTrue, arg.range(), @@ -238,14 +229,10 @@ pub(crate) fn shell_injection( } else if let Some(ShellKeyword { truthiness: Truthiness::Truthy, keyword, - }) = find_shell_keyword(&checker.ctx, keywords) + }) = find_shell_keyword(checker.semantic_model(), keywords) { // S604 - if checker - .settings - .rules - .enabled(Rule::CallWithShellEqualsTrue) - { + if checker.enabled(Rule::CallWithShellEqualsTrue) { checker .diagnostics .push(Diagnostic::new(CallWithShellEqualsTrue, keyword.range())); @@ -255,7 +242,7 @@ pub(crate) fn shell_injection( // S605 if matches!(call_kind, Some(CallKind::Shell)) { if let Some(arg) = args.first() { - if checker.settings.rules.enabled(Rule::StartProcessWithAShell) { + if checker.enabled(Rule::StartProcessWithAShell) { checker.diagnostics.push(Diagnostic::new( StartProcessWithAShell { seems_safe: shell_call_seems_safe(arg), @@ -268,11 +255,7 @@ pub(crate) fn shell_injection( // S606 if matches!(call_kind, Some(CallKind::NoShell)) { - if checker - .settings - .rules - .enabled(Rule::StartProcessWithNoShell) - { + if checker.enabled(Rule::StartProcessWithNoShell) { checker .diagnostics .push(Diagnostic::new(StartProcessWithNoShell, func.range())); @@ -282,11 +265,7 @@ pub(crate) fn shell_injection( // S607 if call_kind.is_some() { if let Some(arg) = args.first() { - if checker - .settings - .rules - .enabled(Rule::StartProcessWithPartialPath) - { + if checker.enabled(Rule::StartProcessWithPartialPath) { if let Some(value) = try_string_literal(arg) { if FULL_PATH_REGEX.find(value).is_none() { checker diff --git a/crates/ruff/src/rules/flake8_bandit/rules/snmp_insecure_version.rs b/crates/ruff/src/rules/flake8_bandit/rules/snmp_insecure_version.rs index 2add3b236f107..60fc3b33dda77 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/snmp_insecure_version.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/snmp_insecure_version.rs @@ -25,7 +25,7 @@ pub(crate) fn snmp_insecure_version( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["pysnmp", "hlapi", "CommunityData"] diff --git a/crates/ruff/src/rules/flake8_bandit/rules/snmp_weak_cryptography.rs b/crates/ruff/src/rules/flake8_bandit/rules/snmp_weak_cryptography.rs index 68d6e09d1abcf..313c32a187e9d 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/snmp_weak_cryptography.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/snmp_weak_cryptography.rs @@ -27,7 +27,7 @@ pub(crate) fn snmp_weak_cryptography( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["pysnmp", "hlapi", "UsmUserData"] diff --git a/crates/ruff/src/rules/flake8_bandit/rules/suspicious_function_call.rs b/crates/ruff/src/rules/flake8_bandit/rules/suspicious_function_call.rs index c0eb38e09a5df..6c6eba80feb99 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/suspicious_function_call.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/suspicious_function_call.rs @@ -470,7 +470,7 @@ pub(crate) fn suspicious_function_call(checker: &mut Checker, expr: &Expr) { return; }; - let Some(reason) = checker.ctx.resolve_call_path(func).and_then(|call_path| { + let Some(reason) = checker.semantic_model().resolve_call_path(func).and_then(|call_path| { for module in SUSPICIOUS_MEMBERS { for member in module.members { if call_path.as_slice() == *member { @@ -512,7 +512,7 @@ pub(crate) fn suspicious_function_call(checker: &mut Checker, expr: &Expr) { Reason::FTPLib => SuspiciousFTPLibUsage.into(), }; let diagnostic = Diagnostic::new::(diagnostic_kind, expr.range()); - if checker.settings.rules.enabled(diagnostic.kind.rule()) { + if checker.enabled(diagnostic.kind.rule()) { checker.diagnostics.push(diagnostic); } } diff --git a/crates/ruff/src/rules/flake8_bandit/rules/try_except_continue.rs b/crates/ruff/src/rules/flake8_bandit/rules/try_except_continue.rs index 4dd0c0b4a73e6..a64c33bc39bec 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/try_except_continue.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/try_except_continue.rs @@ -27,7 +27,7 @@ pub(crate) fn try_except_continue( ) { if body.len() == 1 && body[0].is_continue_stmt() - && (check_typed_exception || is_untyped_exception(type_, checker)) + && (check_typed_exception || is_untyped_exception(type_, checker.semantic_model())) { checker .diagnostics diff --git a/crates/ruff/src/rules/flake8_bandit/rules/try_except_pass.rs b/crates/ruff/src/rules/flake8_bandit/rules/try_except_pass.rs index 429f3638581ce..c740399349316 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/try_except_pass.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/try_except_pass.rs @@ -27,7 +27,7 @@ pub(crate) fn try_except_pass( ) { if body.len() == 1 && body[0].is_pass_stmt() - && (check_typed_exception || is_untyped_exception(type_, checker)) + && (check_typed_exception || is_untyped_exception(type_, checker.semantic_model())) { checker .diagnostics diff --git a/crates/ruff/src/rules/flake8_bandit/rules/unsafe_yaml_load.rs b/crates/ruff/src/rules/flake8_bandit/rules/unsafe_yaml_load.rs index f25b242026b07..e5225eb0d49c1 100644 --- a/crates/ruff/src/rules/flake8_bandit/rules/unsafe_yaml_load.rs +++ b/crates/ruff/src/rules/flake8_bandit/rules/unsafe_yaml_load.rs @@ -38,14 +38,14 @@ pub(crate) fn unsafe_yaml_load( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["yaml", "load"]) { let call_args = SimpleCallArgs::new(args, keywords); if let Some(loader_arg) = call_args.argument("Loader", 1) { if !checker - .ctx + .semantic_model() .resolve_call_path(loader_arg) .map_or(false, |call_path| { call_path.as_slice() == ["yaml", "SafeLoader"] diff --git a/crates/ruff/src/rules/flake8_bandit/snapshots/ruff__rules__flake8_bandit__tests__S601_S601.py.snap b/crates/ruff/src/rules/flake8_bandit/snapshots/ruff__rules__flake8_bandit__tests__S601_S601.py.snap new file mode 100644 index 0000000000000..8e11bc8541f8a --- /dev/null +++ b/crates/ruff/src/rules/flake8_bandit/snapshots/ruff__rules__flake8_bandit__tests__S601_S601.py.snap @@ -0,0 +1,12 @@ +--- +source: crates/ruff/src/rules/flake8_bandit/mod.rs +--- +S601.py:3:1: S601 Possible shell injection via Paramiko call; check inputs are properly sanitized + | +3 | import paramiko +4 | +5 | paramiko.exec_command('something; really; unsafe') + | ^^^^^^^^^^^^^^^^^^^^^ S601 + | + + diff --git a/crates/ruff/src/rules/flake8_blind_except/mod.rs b/crates/ruff/src/rules/flake8_blind_except/mod.rs index ba972c84711c5..1ade0f4092a9f 100644 --- a/crates/ruff/src/rules/flake8_blind_except/mod.rs +++ b/crates/ruff/src/rules/flake8_blind_except/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_blind_except/rules.rs b/crates/ruff/src/rules/flake8_blind_except/rules/blind_except.rs similarity index 94% rename from crates/ruff/src/rules/flake8_blind_except/rules.rs rename to crates/ruff/src/rules/flake8_blind_except/rules/blind_except.rs index 6837e1d3c50e6..ef4f52524ece7 100644 --- a/crates/ruff/src/rules/flake8_blind_except/rules.rs +++ b/crates/ruff/src/rules/flake8_blind_except/rules/blind_except.rs @@ -34,7 +34,7 @@ pub(crate) fn blind_except( return; }; for exception in ["BaseException", "Exception"] { - if id == exception && checker.ctx.is_builtin(exception) { + if id == exception && checker.semantic_model().is_builtin(exception) { // If the exception is re-raised, don't flag an error. if body.iter().any(|stmt| { if let Stmt::Raise(ast::StmtRaise { exc, .. }) = stmt { @@ -58,7 +58,7 @@ pub(crate) fn blind_except( if body.iter().any(|stmt| { if let Stmt::Expr(ast::StmtExpr { value, range: _ }) = stmt { if let Expr::Call(ast::ExprCall { func, keywords, .. }) = value.as_ref() { - if logging::is_logger_candidate(&checker.ctx, func) { + if logging::is_logger_candidate(func, checker.semantic_model()) { if let Some(attribute) = func.as_attribute_expr() { let attr = attribute.attr.as_str(); if attr == "exception" { diff --git a/crates/ruff/src/rules/flake8_blind_except/rules/mod.rs b/crates/ruff/src/rules/flake8_blind_except/rules/mod.rs new file mode 100644 index 0000000000000..520b3ece06db7 --- /dev/null +++ b/crates/ruff/src/rules/flake8_blind_except/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use blind_except::{blind_except, BlindExcept}; + +mod blind_except; diff --git a/crates/ruff/src/rules/flake8_boolean_trap/helpers.rs b/crates/ruff/src/rules/flake8_boolean_trap/helpers.rs new file mode 100644 index 0000000000000..463ad690abde4 --- /dev/null +++ b/crates/ruff/src/rules/flake8_boolean_trap/helpers.rs @@ -0,0 +1,66 @@ +use rustpython_parser::ast::{self, Constant, Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, DiagnosticKind}; + +use crate::checkers::ast::Checker; + +pub(super) const FUNC_CALL_NAME_ALLOWLIST: &[&str] = &[ + "append", + "assertEqual", + "assertEquals", + "assertNotEqual", + "assertNotEquals", + "bytes", + "count", + "failIfEqual", + "failUnlessEqual", + "float", + "fromkeys", + "get", + "getattr", + "getboolean", + "getfloat", + "getint", + "index", + "insert", + "int", + "param", + "pop", + "remove", + "setattr", + "setdefault", + "str", +]; + +pub(super) const FUNC_DEF_NAME_ALLOWLIST: &[&str] = &["__setitem__"]; + +/// Returns `true` if an argument is allowed to use a boolean trap. To return +/// `true`, the function name must be explicitly allowed, and the argument must +/// be either the first or second argument in the call. +pub(super) fn allow_boolean_trap(func: &Expr) -> bool { + if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = func { + return FUNC_CALL_NAME_ALLOWLIST.contains(&attr.as_ref()); + } + + if let Expr::Name(ast::ExprName { id, .. }) = func { + return FUNC_CALL_NAME_ALLOWLIST.contains(&id.as_ref()); + } + + false +} + +const fn is_boolean_arg(arg: &Expr) -> bool { + matches!( + &arg, + Expr::Constant(ast::ExprConstant { + value: Constant::Bool(_), + .. + }) + ) +} + +pub(super) fn add_if_boolean(checker: &mut Checker, arg: &Expr, kind: DiagnosticKind) { + if is_boolean_arg(arg) { + checker.diagnostics.push(Diagnostic::new(kind, arg.range())); + } +} diff --git a/crates/ruff/src/rules/flake8_boolean_trap/mod.rs b/crates/ruff/src/rules/flake8_boolean_trap/mod.rs index a90031bcee6a4..1e86c51cbf32c 100644 --- a/crates/ruff/src/rules/flake8_boolean_trap/mod.rs +++ b/crates/ruff/src/rules/flake8_boolean_trap/mod.rs @@ -1,4 +1,5 @@ //! Rules from [flake8-boolean-trap](https://pypi.org/project/flake8-boolean-trap/). +mod helpers; pub(crate) mod rules; #[cfg(test)] @@ -6,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_boolean_trap/rules.rs b/crates/ruff/src/rules/flake8_boolean_trap/rules.rs deleted file mode 100644 index d8096410df064..0000000000000 --- a/crates/ruff/src/rules/flake8_boolean_trap/rules.rs +++ /dev/null @@ -1,176 +0,0 @@ -use rustpython_parser::ast::{self, Arguments, Constant, Expr, Ranged}; - -use ruff_diagnostics::Violation; -use ruff_diagnostics::{Diagnostic, DiagnosticKind}; -use ruff_macros::{derive_message_formats, violation}; -use ruff_python_ast::call_path::collect_call_path; - -use crate::checkers::ast::Checker; - -#[violation] -pub struct BooleanPositionalArgInFunctionDefinition; - -impl Violation for BooleanPositionalArgInFunctionDefinition { - #[derive_message_formats] - fn message(&self) -> String { - format!("Boolean positional arg in function definition") - } -} - -#[violation] -pub struct BooleanDefaultValueInFunctionDefinition; - -impl Violation for BooleanDefaultValueInFunctionDefinition { - #[derive_message_formats] - fn message(&self) -> String { - format!("Boolean default value in function definition") - } -} - -#[violation] -pub struct BooleanPositionalValueInFunctionCall; - -impl Violation for BooleanPositionalValueInFunctionCall { - #[derive_message_formats] - fn message(&self) -> String { - format!("Boolean positional value in function call") - } -} - -const FUNC_CALL_NAME_ALLOWLIST: &[&str] = &[ - "append", - "assertEqual", - "assertEquals", - "assertNotEqual", - "assertNotEquals", - "bytes", - "count", - "failIfEqual", - "failUnlessEqual", - "float", - "fromkeys", - "get", - "getattr", - "getboolean", - "getfloat", - "getint", - "index", - "insert", - "int", - "param", - "pop", - "remove", - "setattr", - "setdefault", - "str", -]; - -const FUNC_DEF_NAME_ALLOWLIST: &[&str] = &["__setitem__"]; - -/// Returns `true` if an argument is allowed to use a boolean trap. To return -/// `true`, the function name must be explicitly allowed, and the argument must -/// be either the first or second argument in the call. -fn allow_boolean_trap(func: &Expr) -> bool { - if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = func { - return FUNC_CALL_NAME_ALLOWLIST.contains(&attr.as_ref()); - } - - if let Expr::Name(ast::ExprName { id, .. }) = func { - return FUNC_CALL_NAME_ALLOWLIST.contains(&id.as_ref()); - } - - false -} - -const fn is_boolean_arg(arg: &Expr) -> bool { - matches!( - &arg, - Expr::Constant(ast::ExprConstant { - value: Constant::Bool(_), - .. - }) - ) -} - -fn add_if_boolean(checker: &mut Checker, arg: &Expr, kind: DiagnosticKind) { - if is_boolean_arg(arg) { - checker.diagnostics.push(Diagnostic::new(kind, arg.range())); - } -} - -pub(crate) fn check_positional_boolean_in_def( - checker: &mut Checker, - name: &str, - decorator_list: &[Expr], - arguments: &Arguments, -) { - if FUNC_DEF_NAME_ALLOWLIST.contains(&name) { - return; - } - - if decorator_list.iter().any(|expr| { - collect_call_path(expr).map_or(false, |call_path| call_path.as_slice() == [name, "setter"]) - }) { - return; - } - - for arg in arguments.posonlyargs.iter().chain(arguments.args.iter()) { - if arg.annotation.is_none() { - continue; - } - let Some(expr) = &arg.annotation else { - continue; - }; - - // check for both bool (python class) and 'bool' (string annotation) - let hint = match expr.as_ref() { - Expr::Name(name) => &name.id == "bool", - Expr::Constant(ast::ExprConstant { - value: Constant::Str(value), - .. - }) => value == "bool", - _ => false, - }; - if !hint { - continue; - } - checker.diagnostics.push(Diagnostic::new( - BooleanPositionalArgInFunctionDefinition, - arg.range(), - )); - } -} - -pub(crate) fn check_boolean_default_value_in_function_definition( - checker: &mut Checker, - name: &str, - decorator_list: &[Expr], - arguments: &Arguments, -) { - if FUNC_DEF_NAME_ALLOWLIST.contains(&name) { - return; - } - - if decorator_list.iter().any(|expr| { - collect_call_path(expr).map_or(false, |call_path| call_path.as_slice() == [name, "setter"]) - }) { - return; - } - - for arg in &arguments.defaults { - add_if_boolean(checker, arg, BooleanDefaultValueInFunctionDefinition.into()); - } -} - -pub(crate) fn check_boolean_positional_value_in_function_call( - checker: &mut Checker, - args: &[Expr], - func: &Expr, -) { - if allow_boolean_trap(func) { - return; - } - for arg in args { - add_if_boolean(checker, arg, BooleanPositionalValueInFunctionCall.into()); - } -} diff --git a/crates/ruff/src/rules/flake8_boolean_trap/rules/check_boolean_default_value_in_function_definition.rs b/crates/ruff/src/rules/flake8_boolean_trap/rules/check_boolean_default_value_in_function_definition.rs new file mode 100644 index 0000000000000..610a6c8491f30 --- /dev/null +++ b/crates/ruff/src/rules/flake8_boolean_trap/rules/check_boolean_default_value_in_function_definition.rs @@ -0,0 +1,80 @@ +use rustpython_parser::ast::{Arguments, Expr}; + +use ruff_diagnostics::Violation; + +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::call_path::collect_call_path; + +use crate::checkers::ast::Checker; +use crate::rules::flake8_boolean_trap::helpers::add_if_boolean; + +use super::super::helpers::FUNC_DEF_NAME_ALLOWLIST; + +/// ## What it does +/// Checks for the use of booleans as default values in function definitions. +/// +/// ## Why is this bad? +/// Calling a function with boolean default means that the keyword argument +/// argument can be omitted, which makes the function call ambiguous. +/// +/// Instead, define the relevant argument as keyword-only. +/// +/// ## Example +/// ```python +/// from math import ceil, floor +/// +/// +/// def round_number(number: float, *, up: bool = True) -> int: +/// return ceil(number) if up else floor(number) +/// +/// +/// round_number(1.5) +/// round_number(1.5, up=False) +/// ``` +/// +/// Use instead: +/// ```python +/// from math import ceil, floor +/// +/// +/// def round_number(number: float, *, up: bool) -> int: +/// return ceil(number) if up else floor(number) +/// +/// +/// round_number(1.5, up=True) +/// round_number(1.5, up=False) +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#calls) +/// - [_How to Avoid β€œThe Boolean Trap”_ by Adam Johnson](https://adamj.eu/tech/2021/07/10/python-type-hints-how-to-avoid-the-boolean-trap/) +#[violation] +pub struct BooleanDefaultValueInFunctionDefinition; + +impl Violation for BooleanDefaultValueInFunctionDefinition { + #[derive_message_formats] + fn message(&self) -> String { + format!("Boolean default value in function definition") + } +} + +pub(crate) fn check_boolean_default_value_in_function_definition( + checker: &mut Checker, + name: &str, + decorator_list: &[Expr], + arguments: &Arguments, +) { + if FUNC_DEF_NAME_ALLOWLIST.contains(&name) { + return; + } + + if decorator_list.iter().any(|expr| { + collect_call_path(expr).map_or(false, |call_path| call_path.as_slice() == [name, "setter"]) + }) { + return; + } + + for arg in &arguments.defaults { + add_if_boolean(checker, arg, BooleanDefaultValueInFunctionDefinition.into()); + } +} diff --git a/crates/ruff/src/rules/flake8_boolean_trap/rules/check_boolean_positional_value_in_function_call.rs b/crates/ruff/src/rules/flake8_boolean_trap/rules/check_boolean_positional_value_in_function_call.rs new file mode 100644 index 0000000000000..5b39f0f24a308 --- /dev/null +++ b/crates/ruff/src/rules/flake8_boolean_trap/rules/check_boolean_positional_value_in_function_call.rs @@ -0,0 +1,60 @@ +use rustpython_parser::ast::Expr; + +use ruff_diagnostics::Violation; + +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; +use crate::rules::flake8_boolean_trap::helpers::{add_if_boolean, allow_boolean_trap}; + +/// ## What it does +/// Checks for boolean positional arguments in function calls. +/// +/// ## Why is this bad? +/// Calling a function with boolean positional arguments is confusing as the +/// meaning of the boolean value is not clear to the caller, and to future +/// readers of the code. +/// +/// ## Example +/// ```python +/// def foo(flag: bool) -> None: +/// ... +/// +/// +/// foo(True) +/// ``` +/// +/// Use instead: +/// ```python +/// def foo(flag: bool) -> None: +/// ... +/// +/// +/// foo(flag=True) +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#calls) +/// - [_How to Avoid β€œThe Boolean Trap”_ by Adam Johnson](https://adamj.eu/tech/2021/07/10/python-type-hints-how-to-avoid-the-boolean-trap/) +#[violation] +pub struct BooleanPositionalValueInFunctionCall; + +impl Violation for BooleanPositionalValueInFunctionCall { + #[derive_message_formats] + fn message(&self) -> String { + format!("Boolean positional value in function call") + } +} + +pub(crate) fn check_boolean_positional_value_in_function_call( + checker: &mut Checker, + args: &[Expr], + func: &Expr, +) { + if allow_boolean_trap(func) { + return; + } + for arg in args { + add_if_boolean(checker, arg, BooleanPositionalValueInFunctionCall.into()); + } +} diff --git a/crates/ruff/src/rules/flake8_boolean_trap/rules/check_positional_boolean_in_def.rs b/crates/ruff/src/rules/flake8_boolean_trap/rules/check_positional_boolean_in_def.rs new file mode 100644 index 0000000000000..3a6f2d4df36eb --- /dev/null +++ b/crates/ruff/src/rules/flake8_boolean_trap/rules/check_positional_boolean_in_def.rs @@ -0,0 +1,120 @@ +use rustpython_parser::ast::{self, Arguments, Constant, Expr, Ranged}; + +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::call_path::collect_call_path; + +use crate::checkers::ast::Checker; +use crate::rules::flake8_boolean_trap::helpers::FUNC_DEF_NAME_ALLOWLIST; + +/// ## What it does +/// Checks for boolean positional arguments in function definitions. +/// +/// ## Why is this bad? +/// Calling a function with boolean positional arguments is confusing as the +/// meaning of the boolean value is not clear to the caller, and to future +/// readers of the code. +/// +/// The use of a boolean will also limit the function to only two possible +/// behaviors, which makes the function difficult to extend in the future. +/// +/// ## Example +/// ```python +/// from math import ceil, floor +/// +/// +/// def round_number(number: float, up: bool) -> int: +/// return ceil(number) if up else floor(number) +/// +/// +/// round_number(1.5, True) # What does `True` mean? +/// round_number(1.5, False) # What does `False` mean? +/// ``` +/// +/// Instead, refactor into separate implementations: +/// ```python +/// from math import ceil, floor +/// +/// +/// def round_up(number: float) -> int: +/// return ceil(number) +/// +/// +/// def round_down(number: float) -> int: +/// return floor(number) +/// +/// +/// round_up(1.5) +/// round_down(1.5) +/// ``` +/// +/// Or, refactor to use an `Enum`: +/// ```python +/// from enum import Enum +/// +/// +/// class RoundingMethod(Enum): +/// UP = 1 +/// DOWN = 2 +/// +/// +/// def round_number(value: float, method: RoundingMethod) -> float: +/// ... +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#calls) +/// - [_How to Avoid β€œThe Boolean Trap”_ by Adam Johnson](https://adamj.eu/tech/2021/07/10/python-type-hints-how-to-avoid-the-boolean-trap/) +#[violation] +pub struct BooleanPositionalArgInFunctionDefinition; + +impl Violation for BooleanPositionalArgInFunctionDefinition { + #[derive_message_formats] + fn message(&self) -> String { + format!("Boolean positional arg in function definition") + } +} + +pub(crate) fn check_positional_boolean_in_def( + checker: &mut Checker, + name: &str, + decorator_list: &[Expr], + arguments: &Arguments, +) { + if FUNC_DEF_NAME_ALLOWLIST.contains(&name) { + return; + } + + if decorator_list.iter().any(|expr| { + collect_call_path(expr).map_or(false, |call_path| call_path.as_slice() == [name, "setter"]) + }) { + return; + } + + for arg in arguments.posonlyargs.iter().chain(arguments.args.iter()) { + if arg.annotation.is_none() { + continue; + } + let Some(expr) = &arg.annotation else { + continue; + }; + + // check for both bool (python class) and 'bool' (string annotation) + let hint = match expr.as_ref() { + Expr::Name(name) => &name.id == "bool", + Expr::Constant(ast::ExprConstant { + value: Constant::Str(value), + .. + }) => value == "bool", + _ => false, + }; + if !hint { + continue; + } + checker.diagnostics.push(Diagnostic::new( + BooleanPositionalArgInFunctionDefinition, + arg.range(), + )); + } +} diff --git a/crates/ruff/src/rules/flake8_boolean_trap/rules/mod.rs b/crates/ruff/src/rules/flake8_boolean_trap/rules/mod.rs new file mode 100644 index 0000000000000..a0e9b8bd66727 --- /dev/null +++ b/crates/ruff/src/rules/flake8_boolean_trap/rules/mod.rs @@ -0,0 +1,13 @@ +pub(crate) use check_boolean_default_value_in_function_definition::{ + check_boolean_default_value_in_function_definition, BooleanDefaultValueInFunctionDefinition, +}; +pub(crate) use check_boolean_positional_value_in_function_call::{ + check_boolean_positional_value_in_function_call, BooleanPositionalValueInFunctionCall, +}; +pub(crate) use check_positional_boolean_in_def::{ + check_positional_boolean_in_def, BooleanPositionalArgInFunctionDefinition, +}; + +mod check_boolean_default_value_in_function_definition; +mod check_boolean_positional_value_in_function_call; +mod check_positional_boolean_in_def; diff --git a/crates/ruff/src/rules/flake8_bugbear/mod.rs b/crates/ruff/src/rules/flake8_bugbear/mod.rs index a25369f28463b..386b591a8f3c2 100644 --- a/crates/ruff/src/rules/flake8_bugbear/mod.rs +++ b/crates/ruff/src/rules/flake8_bugbear/mod.rs @@ -6,11 +6,10 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs b/crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs index fcc849b2b625b..8659d0718cbec 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs @@ -3,7 +3,7 @@ use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged, Stmt}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_semantic::analyze::visibility::{is_abstract, is_overload}; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; use crate::registry::Rule; @@ -35,16 +35,16 @@ impl Violation for EmptyMethodWithoutAbstractDecorator { } } -fn is_abc_class(context: &Context, bases: &[Expr], keywords: &[Keyword]) -> bool { +fn is_abc_class(model: &SemanticModel, bases: &[Expr], keywords: &[Keyword]) -> bool { keywords.iter().any(|keyword| { keyword.arg.as_ref().map_or(false, |arg| arg == "metaclass") - && context + && model .resolve_call_path(&keyword.value) .map_or(false, |call_path| { call_path.as_slice() == ["abc", "ABCMeta"] }) }) || bases.iter().any(|base| { - context + model .resolve_call_path(base) .map_or(false, |call_path| call_path.as_slice() == ["abc", "ABC"]) }) @@ -79,7 +79,7 @@ pub(crate) fn abstract_base_class( if bases.len() + keywords.len() != 1 { return; } - if !is_abc_class(&checker.ctx, bases, keywords) { + if !is_abc_class(checker.semantic_model(), bases, keywords) { return; } @@ -108,20 +108,16 @@ pub(crate) fn abstract_base_class( continue; }; - let has_abstract_decorator = is_abstract(&checker.ctx, decorator_list); + let has_abstract_decorator = is_abstract(checker.semantic_model(), decorator_list); has_abstract_method |= has_abstract_decorator; - if !checker - .settings - .rules - .enabled(Rule::EmptyMethodWithoutAbstractDecorator) - { + if !checker.enabled(Rule::EmptyMethodWithoutAbstractDecorator) { continue; } if !has_abstract_decorator && is_empty_body(body) - && !is_overload(&checker.ctx, decorator_list) + && !is_overload(checker.semantic_model(), decorator_list) { checker.diagnostics.push(Diagnostic::new( EmptyMethodWithoutAbstractDecorator { @@ -131,11 +127,7 @@ pub(crate) fn abstract_base_class( )); } } - if checker - .settings - .rules - .enabled(Rule::AbstractBaseClassWithoutAbstractMethod) - { + if checker.enabled(Rule::AbstractBaseClassWithoutAbstractMethod) { if !has_abstract_method { checker.diagnostics.push(Diagnostic::new( AbstractBaseClassWithoutAbstractMethod { diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs b/crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs index de7f05ef51b28..817483b00eb1b 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs @@ -54,7 +54,7 @@ pub(crate) fn assert_false(checker: &mut Checker, stmt: &Stmt, test: &Expr, msg: let mut diagnostic = Diagnostic::new(AssertFalse, test.range()); if checker.patch(diagnostic.kind.rule()) { #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( checker.generator().stmt(&assertion_error(msg)), stmt.range(), ))); diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/assert_raises_exception.rs b/crates/ruff/src/rules/flake8_bugbear/rules/assert_raises_exception.rs index 04c6255c786cc..be7a22f54e7d4 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/assert_raises_exception.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/assert_raises_exception.rs @@ -66,7 +66,7 @@ pub(crate) fn assert_raises_exception(checker: &mut Checker, stmt: &Stmt, items: } if !checker - .ctx + .semantic_model() .resolve_call_path(args.first().unwrap()) .map_or(false, |call_path| call_path.as_slice() == ["", "Exception"]) { @@ -78,7 +78,7 @@ pub(crate) fn assert_raises_exception(checker: &mut Checker, stmt: &Stmt, items: { AssertionKind::AssertRaises } else if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["pytest", "raises"] diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/cached_instance_method.rs b/crates/ruff/src/rules/flake8_bugbear/rules/cached_instance_method.rs index 3d438f01c0cd7..4c2032ec8fa66 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/cached_instance_method.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/cached_instance_method.rs @@ -2,6 +2,7 @@ use rustpython_parser::ast::{self, Expr, Ranged}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_semantic::model::SemanticModel; use ruff_python_semantic::scope::ScopeKind; use crate::checkers::ast::Checker; @@ -18,19 +19,16 @@ impl Violation for CachedInstanceMethod { } } -fn is_cache_func(checker: &Checker, expr: &Expr) -> bool { - checker - .ctx - .resolve_call_path(expr) - .map_or(false, |call_path| { - call_path.as_slice() == ["functools", "lru_cache"] - || call_path.as_slice() == ["functools", "cache"] - }) +fn is_cache_func(model: &SemanticModel, expr: &Expr) -> bool { + model.resolve_call_path(expr).map_or(false, |call_path| { + call_path.as_slice() == ["functools", "lru_cache"] + || call_path.as_slice() == ["functools", "cache"] + }) } /// B019 pub(crate) fn cached_instance_method(checker: &mut Checker, decorator_list: &[Expr]) { - if !matches!(checker.ctx.scope().kind, ScopeKind::Class(_)) { + if !matches!(checker.semantic_model().scope().kind, ScopeKind::Class(_)) { return; } for decorator in decorator_list { @@ -44,7 +42,7 @@ pub(crate) fn cached_instance_method(checker: &mut Checker, decorator_list: &[Ex } for decorator in decorator_list { if is_cache_func( - checker, + checker.semantic_model(), match decorator { Expr::Call(ast::ExprCall { func, .. }) => func, _ => decorator, diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/duplicate_exceptions.rs b/crates/ruff/src/rules/flake8_bugbear/rules/duplicate_exceptions.rs index 3a4c8c1f69ca4..95eed78d56089 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/duplicate_exceptions.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/duplicate_exceptions.rs @@ -75,11 +75,7 @@ fn duplicate_handler_exceptions<'a>( } } - if checker - .settings - .rules - .enabled(Rule::DuplicateHandlerException) - { + if checker.enabled(Rule::DuplicateHandlerException) { // TODO(charlie): Handle "BaseException" and redundant exception aliases. if !duplicates.is_empty() { let mut diagnostic = Diagnostic::new( @@ -94,7 +90,7 @@ fn duplicate_handler_exceptions<'a>( ); if checker.patch(diagnostic.kind.rule()) { #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( if unique_elts.len() == 1 { checker.generator().expr(unique_elts[0]) } else { @@ -140,11 +136,7 @@ pub(crate) fn duplicate_exceptions(checker: &mut Checker, handlers: &[Excepthand } } - if checker - .settings - .rules - .enabled(Rule::DuplicateTryBlockException) - { + if checker.enabled(Rule::DuplicateTryBlockException) { for (name, exprs) in duplicates { for expr in exprs { checker.diagnostics.push(Diagnostic::new( diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/function_call_argument_default.rs b/crates/ruff/src/rules/flake8_bugbear/rules/function_call_argument_default.rs index 2dd097d3f7db7..0d013ec7d3a8e 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/function_call_argument_default.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/function_call_argument_default.rs @@ -4,11 +4,11 @@ use rustpython_parser::ast::{self, Arguments, Constant, Expr, Ranged}; use ruff_diagnostics::Violation; use ruff_diagnostics::{Diagnostic, DiagnosticKind}; use ruff_macros::{derive_message_formats, violation}; -use ruff_python_ast::call_path::from_qualified_name; -use ruff_python_ast::call_path::{compose_call_path, CallPath}; +use ruff_python_ast::call_path::{compose_call_path, from_qualified_name, CallPath}; use ruff_python_ast::visitor; use ruff_python_ast::visitor::Visitor; use ruff_python_semantic::analyze::typing::is_immutable_func; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; use crate::rules::flake8_bugbear::rules::mutable_argument_default::is_mutable_func; @@ -73,9 +73,19 @@ impl Violation for FunctionCallInDefaultArgument { } struct ArgumentDefaultVisitor<'a> { - checker: &'a Checker<'a>, - diagnostics: Vec<(DiagnosticKind, TextRange)>, + model: &'a SemanticModel<'a>, extend_immutable_calls: Vec>, + diagnostics: Vec<(DiagnosticKind, TextRange)>, +} + +impl<'a> ArgumentDefaultVisitor<'a> { + fn new(model: &'a SemanticModel<'a>, extend_immutable_calls: Vec>) -> Self { + Self { + model, + extend_immutable_calls, + diagnostics: Vec::new(), + } + } } impl<'a, 'b> Visitor<'b> for ArgumentDefaultVisitor<'b> @@ -85,8 +95,8 @@ where fn visit_expr(&mut self, expr: &'b Expr) { match expr { Expr::Call(ast::ExprCall { func, args, .. }) => { - if !is_mutable_func(self.checker, func) - && !is_immutable_func(&self.checker.ctx, func, &self.extend_immutable_calls) + if !is_mutable_func(self.model, func) + && !is_immutable_func(self.model, func, &self.extend_immutable_calls) && !is_nan_or_infinity(func, args) { self.diagnostics.push(( @@ -139,11 +149,8 @@ pub(crate) fn function_call_argument_default(checker: &mut Checker, arguments: & .map(|target| from_qualified_name(target)) .collect(); let diagnostics = { - let mut visitor = ArgumentDefaultVisitor { - checker, - diagnostics: vec![], - extend_immutable_calls, - }; + let mut visitor = + ArgumentDefaultVisitor::new(checker.semantic_model(), extend_immutable_calls); for expr in arguments .defaults .iter() diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/getattr_with_constant.rs b/crates/ruff/src/rules/flake8_bugbear/rules/getattr_with_constant.rs index 0c150609afa29..ff57ce600fa3c 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/getattr_with_constant.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/getattr_with_constant.rs @@ -3,7 +3,6 @@ use rustpython_parser::ast::{self, Constant, Expr, ExprContext, Ranged}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; use crate::checkers::ast::Checker; @@ -65,10 +64,8 @@ pub(crate) fn getattr_with_constant( } let mut diagnostic = Diagnostic::new(GetAttrWithConstant, expr.range()); - if checker.patch(diagnostic.kind.rule()) { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( checker.generator().expr(&attribute(obj, value)), expr.range(), ))); diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/mutable_argument_default.rs b/crates/ruff/src/rules/flake8_bugbear/rules/mutable_argument_default.rs index 6c680ed630814..19698f8120e5e 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/mutable_argument_default.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/mutable_argument_default.rs @@ -3,6 +3,7 @@ use rustpython_parser::ast::{self, Arguments, Expr, Ranged}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_semantic::analyze::typing::is_immutable_annotation; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; @@ -25,18 +26,15 @@ const MUTABLE_FUNCS: &[&[&str]] = &[ &["collections", "deque"], ]; -pub(crate) fn is_mutable_func(checker: &Checker, func: &Expr) -> bool { - checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - MUTABLE_FUNCS - .iter() - .any(|target| call_path.as_slice() == *target) - }) +pub(crate) fn is_mutable_func(model: &SemanticModel, func: &Expr) -> bool { + model.resolve_call_path(func).map_or(false, |call_path| { + MUTABLE_FUNCS + .iter() + .any(|target| call_path.as_slice() == *target) + }) } -fn is_mutable_expr(checker: &Checker, expr: &Expr) -> bool { +fn is_mutable_expr(model: &SemanticModel, expr: &Expr) -> bool { match expr { Expr::List(_) | Expr::Dict(_) @@ -44,7 +42,7 @@ fn is_mutable_expr(checker: &Checker, expr: &Expr) -> bool { | Expr::ListComp(_) | Expr::DictComp(_) | Expr::SetComp(_) => true, - Expr::Call(ast::ExprCall { func, .. }) => is_mutable_func(checker, func), + Expr::Call(ast::ExprCall { func, .. }) => is_mutable_func(model, func), _ => false, } } @@ -66,11 +64,10 @@ pub(crate) fn mutable_argument_default(checker: &mut Checker, arguments: &Argume .zip(arguments.defaults.iter().rev()), ) { - if is_mutable_expr(checker, default) - && !arg - .annotation - .as_ref() - .map_or(false, |expr| is_immutable_annotation(&checker.ctx, expr)) + if is_mutable_expr(checker.semantic_model(), default) + && !arg.annotation.as_ref().map_or(false, |expr| { + is_immutable_annotation(checker.semantic_model(), expr) + }) { checker .diagnostics diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/no_explicit_stacklevel.rs b/crates/ruff/src/rules/flake8_bugbear/rules/no_explicit_stacklevel.rs index e76c085cbcf49..a7d20c2801f15 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/no_explicit_stacklevel.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/no_explicit_stacklevel.rs @@ -45,7 +45,7 @@ pub(crate) fn no_explicit_stacklevel( keywords: &[Keyword], ) { if !checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["warnings", "warn"] diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/redundant_tuple_in_exception_handler.rs b/crates/ruff/src/rules/flake8_bugbear/rules/redundant_tuple_in_exception_handler.rs index 3781b306e904f..9b0bbe6f29f45 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/redundant_tuple_in_exception_handler.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/redundant_tuple_in_exception_handler.rs @@ -50,7 +50,7 @@ pub(crate) fn redundant_tuple_in_exception_handler( ); if checker.patch(diagnostic.kind.rule()) { #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + diagnostic.set_fix(Fix::automatic(Edit::range_replacement( checker.generator().expr(elt), type_.range(), ))); diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/reuse_of_groupby_generator.rs b/crates/ruff/src/rules/flake8_bugbear/rules/reuse_of_groupby_generator.rs index 93c0d3c9b86c2..618af8019568b 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/reuse_of_groupby_generator.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/reuse_of_groupby_generator.rs @@ -342,7 +342,7 @@ pub(crate) fn reuse_of_groupby_generator( }; // Check if the function call is `itertools.groupby` if !checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["itertools", "groupby"] diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/setattr_with_constant.rs b/crates/ruff/src/rules/flake8_bugbear/rules/setattr_with_constant.rs index e68d2db100d4e..9229339bf3de0 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/setattr_with_constant.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/setattr_with_constant.rs @@ -3,7 +3,6 @@ use rustpython_parser::ast::{self, Constant, Expr, ExprContext, Ranged, Stmt}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_ast::source_code::Generator; use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; @@ -76,14 +75,12 @@ pub(crate) fn setattr_with_constant( if let Stmt::Expr(ast::StmtExpr { value: child, range: _, - }) = &checker.ctx.stmt() + }) = checker.semantic_model().stmt() { if expr == child.as_ref() { let mut diagnostic = Diagnostic::new(SetAttrWithConstant, expr.range()); - if checker.patch(diagnostic.kind.rule()) { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( assignment(obj, name, value, checker.generator()), expr.range(), ))); diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/unused_loop_control_variable.rs b/crates/ruff/src/rules/flake8_bugbear/rules/unused_loop_control_variable.rs index 42638a1b7815b..ad64d880b6925 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/unused_loop_control_variable.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/unused_loop_control_variable.rs @@ -31,7 +31,7 @@ use crate::checkers::ast::Checker; use crate::registry::AsRule; #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, result_like::BoolLike)] -pub enum Certainty { +enum Certainty { Certain, Uncertain, } @@ -39,15 +39,15 @@ pub enum Certainty { #[violation] pub struct UnusedLoopControlVariable { /// The name of the loop control variable. - pub name: String, + name: String, /// The name to which the variable should be renamed, if it can be /// safely renamed. - pub rename: Option, + rename: Option, /// Whether the variable is certain to be unused in the loop body, or /// merely suspect. A variable _may_ be used, but undetectably /// so, if the loop incorporates by magic control flow (e.g., /// `locals()`). - pub certainty: Certainty, + certainty: Certainty, } impl Violation for UnusedLoopControlVariable { @@ -129,7 +129,7 @@ pub(crate) fn unused_loop_control_variable(checker: &mut Checker, target: &Expr, // Avoid fixing any variables that _may_ be used, but undetectably so. let certainty = Certainty::from(!helpers::uses_magic_variable_access(body, |id| { - checker.ctx.is_builtin(id) + checker.semantic_model().is_builtin(id) })); // Attempt to rename the variable by prepending an underscore, but avoid @@ -152,24 +152,18 @@ pub(crate) fn unused_loop_control_variable(checker: &mut Checker, target: &Expr, ); if let Some(rename) = rename { if certainty.into() && checker.patch(diagnostic.kind.rule()) { - // Find the `BindingKind::LoopVar` corresponding to the name. - let scope = checker.ctx.scope(); - let binding = scope.bindings_for_name(name).find_map(|index| { - let binding = &checker.ctx.bindings[*index]; - binding - .source - .and_then(|source| (Some(source) == checker.ctx.stmt_id).then_some(binding)) - }); - if let Some(binding) = binding { - if binding.kind.is_loop_var() { - if !binding.used() { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( - rename, - expr.range(), - ))); - } - } + // Avoid fixing if the variable, or any future bindings to the variable, are + // used _after_ the loop. + let scope = checker.semantic_model().scope(); + if scope + .bindings_for_name(name) + .map(|binding_id| &checker.semantic_model().bindings[binding_id]) + .all(|binding| !binding.is_used()) + { + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( + rename, + expr.range(), + ))); } } } diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/useless_contextlib_suppress.rs b/crates/ruff/src/rules/flake8_bugbear/rules/useless_contextlib_suppress.rs index ad2061ba7f306..456d47af047c2 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/useless_contextlib_suppress.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/useless_contextlib_suppress.rs @@ -1,9 +1,10 @@ use rustpython_parser::ast::{Expr, Ranged}; -use crate::checkers::ast::Checker; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use crate::checkers::ast::Checker; + #[violation] pub struct UselessContextlibSuppress; @@ -26,7 +27,7 @@ pub(crate) fn useless_contextlib_suppress( ) { if args.is_empty() && checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["contextlib", "suppress"] diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/useless_expression.rs b/crates/ruff/src/rules/flake8_bugbear/rules/useless_expression.rs index eb845b78eb19d..00376dc8b04db 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/useless_expression.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/useless_expression.rs @@ -53,7 +53,7 @@ pub(crate) fn useless_expression(checker: &mut Checker, value: &Expr) { } // Ignore statements that have side effects. - if contains_effect(value, |id| checker.ctx.is_builtin(id)) { + if contains_effect(value, |id| checker.semantic_model().is_builtin(id)) { // Flag attributes as useless expressions, even if they're attached to calls or other // expressions. if matches!(value, Expr::Attribute(_)) { diff --git a/crates/ruff/src/rules/flake8_bugbear/rules/zip_without_explicit_strict.rs b/crates/ruff/src/rules/flake8_bugbear/rules/zip_without_explicit_strict.rs index 5db77e2d381fe..e1314239e081e 100644 --- a/crates/ruff/src/rules/flake8_bugbear/rules/zip_without_explicit_strict.rs +++ b/crates/ruff/src/rules/flake8_bugbear/rules/zip_without_explicit_strict.rs @@ -1,9 +1,10 @@ use rustpython_parser::ast::{self, Expr, Keyword, Ranged}; -use crate::checkers::ast::Checker; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use crate::checkers::ast::Checker; + #[violation] pub struct ZipWithoutExplicitStrict; @@ -23,7 +24,7 @@ pub(crate) fn zip_without_explicit_strict( ) { if let Expr::Name(ast::ExprName { id, .. }) = func { if id == "zip" - && checker.ctx.is_builtin("zip") + && checker.semantic_model().is_builtin("zip") && !kwargs .iter() .any(|keyword| keyword.arg.as_ref().map_or(false, |name| name == "strict")) diff --git a/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B007_B007.py.snap b/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B007_B007.py.snap index bb9c28d9e3c9c..f263105c6083e 100644 --- a/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B007_B007.py.snap +++ b/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B007_B007.py.snap @@ -152,10 +152,10 @@ B007.py:68:14: B007 [*] Loop control variable `bar` not used within loop body 70 70 | break 71 71 | -B007.py:77:14: B007 [*] Loop control variable `bar` not used within loop body +B007.py:77:14: B007 Loop control variable `bar` not used within loop body | 77 | def f(): -78 | # Fixable. +78 | # Unfixable. 79 | for foo, bar, baz in (["1", "2", "3"],): | ^^^ B007 80 | if foo or baz: @@ -163,23 +163,24 @@ B007.py:77:14: B007 [*] Loop control variable `bar` not used within loop body | = help: Rename unused `bar` to `_bar` -β„Ή Suggested fix -74 74 | -75 75 | def f(): -76 76 | # Fixable. -77 |- for foo, bar, baz in (["1", "2", "3"],): - 77 |+ for foo, _bar, baz in (["1", "2", "3"],): -78 78 | if foo or baz: -79 79 | break -80 80 | - -B007.py:87:5: B007 Loop control variable `line_` not used within loop body - | -87 | # Unfixable due to trailing underscore (`_line_` wouldn't be considered an ignorable -88 | # variable name). -89 | for line_ in range(self.header_lines): - | ^^^^^ B007 -90 | fp.readline() +B007.py:88:14: B007 Loop control variable `bar` not used within loop body + | +88 | def f(): +89 | # Unfixable (false negative) due to usage of `bar` outside of loop. +90 | for foo, bar, baz in (["1", "2", "3"],): + | ^^^ B007 +91 | if foo or baz: +92 | break | + = help: Rename unused `bar` to `_bar` + +B007.py:98:5: B007 Loop control variable `line_` not used within loop body + | + 98 | # Unfixable due to trailing underscore (`_line_` wouldn't be considered an ignorable + 99 | # variable name). +100 | for line_ in range(self.header_lines): + | ^^^^^ B007 +101 | fp.readline() + | diff --git a/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B013_B013.py.snap b/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B013_B013.py.snap index da61eecc72a3c..698bf0d0f33dd 100644 --- a/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B013_B013.py.snap +++ b/crates/ruff/src/rules/flake8_bugbear/snapshots/ruff__rules__flake8_bugbear__tests__B013_B013.py.snap @@ -1,5 +1,6 @@ --- source: crates/ruff/src/rules/flake8_bugbear/mod.rs +assertion_line: 57 --- B013.py:3:8: B013 [*] A length-one tuple literal is redundant. Write `except ValueError` instead of `except (ValueError,)`. | @@ -12,7 +13,7 @@ B013.py:3:8: B013 [*] A length-one tuple literal is redundant. Write `except Val | = help: Replace with `except ValueError` -β„Ή Suggested fix +β„Ή Fix 1 1 | try: 2 2 | pass 3 |-except (ValueError,): diff --git a/crates/ruff/src/rules/flake8_builtins/helpers.rs b/crates/ruff/src/rules/flake8_builtins/helpers.rs new file mode 100644 index 0000000000000..1f1eb0f3baffd --- /dev/null +++ b/crates/ruff/src/rules/flake8_builtins/helpers.rs @@ -0,0 +1,45 @@ +use rustpython_parser::ast::{Excepthandler, Expr, Ranged, Stmt}; + +use ruff_python_ast::helpers::identifier_range; +use ruff_python_ast::source_code::Locator; +use ruff_python_stdlib::builtins::BUILTINS; +use ruff_text_size::TextRange; + +pub(super) fn shadows_builtin(name: &str, ignorelist: &[String]) -> bool { + BUILTINS.contains(&name) && ignorelist.iter().all(|ignore| ignore != name) +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub(crate) enum AnyShadowing<'a> { + Expression(&'a Expr), + Statement(&'a Stmt), + ExceptHandler(&'a Excepthandler), +} + +impl AnyShadowing<'_> { + pub(crate) fn range(self, locator: &Locator) -> TextRange { + match self { + AnyShadowing::Expression(expr) => expr.range(), + AnyShadowing::Statement(stmt) => identifier_range(stmt, locator), + AnyShadowing::ExceptHandler(handler) => handler.range(), + } + } +} + +impl<'a> From<&'a Stmt> for AnyShadowing<'a> { + fn from(value: &'a Stmt) -> Self { + AnyShadowing::Statement(value) + } +} + +impl<'a> From<&'a Expr> for AnyShadowing<'a> { + fn from(value: &'a Expr) -> Self { + AnyShadowing::Expression(value) + } +} + +impl<'a> From<&'a Excepthandler> for AnyShadowing<'a> { + fn from(value: &'a Excepthandler) -> Self { + AnyShadowing::ExceptHandler(value) + } +} diff --git a/crates/ruff/src/rules/flake8_builtins/mod.rs b/crates/ruff/src/rules/flake8_builtins/mod.rs index 5229d00171c8f..58aea5fba469e 100644 --- a/crates/ruff/src/rules/flake8_builtins/mod.rs +++ b/crates/ruff/src/rules/flake8_builtins/mod.rs @@ -1,4 +1,5 @@ //! Rules from [flake8-builtins](https://pypi.org/project/flake8-builtins/). +pub(crate) mod helpers; pub(crate) mod rules; pub mod settings; diff --git a/crates/ruff/src/rules/flake8_builtins/rules.rs b/crates/ruff/src/rules/flake8_builtins/rules.rs deleted file mode 100644 index 46193c1e37a40..0000000000000 --- a/crates/ruff/src/rules/flake8_builtins/rules.rs +++ /dev/null @@ -1,216 +0,0 @@ -use ruff_diagnostics::Diagnostic; -use ruff_diagnostics::Violation; -use ruff_macros::{derive_message_formats, violation}; -use ruff_python_stdlib::builtins::BUILTINS; -use rustpython_parser::ast::Ranged; - -use crate::checkers::ast::Checker; - -/// ## What it does -/// Checks for variable (and function) assignments that use the same name -/// as a builtin. -/// -/// ## Why is this bad? -/// Reusing a builtin name for the name of a variable increases the -/// difficulty of reading and maintaining the code, and can cause -/// non-obvious errors, as readers may mistake the variable for the -/// builtin and vice versa. -/// -/// Builtins can be marked as exceptions to this rule via the -/// [`flake8-builtins.builtins-ignorelist`] configuration option. -/// -/// ## Options -/// -/// - `flake8-builtins.builtins-ignorelist` -/// -/// ## Example -/// ```python -/// def find_max(list_of_lists): -/// max = 0 -/// for flat_list in list_of_lists: -/// for value in flat_list: -/// max = max(max, value) # TypeError: 'int' object is not callable -/// return max -/// ``` -/// -/// Use instead: -/// ```python -/// def find_max(list_of_lists): -/// result = 0 -/// for flat_list in list_of_lists: -/// for value in flat_list: -/// result = max(result, value) -/// return result -/// ``` -/// -/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python) -#[violation] -pub struct BuiltinVariableShadowing { - name: String, -} - -impl Violation for BuiltinVariableShadowing { - #[derive_message_formats] - fn message(&self) -> String { - let BuiltinVariableShadowing { name } = self; - format!("Variable `{name}` is shadowing a Python builtin") - } -} - -/// ## What it does -/// Checks for any function arguments that use the same name as a builtin. -/// -/// ## Why is this bad? -/// Reusing a builtin name for the name of an argument increases the -/// difficulty of reading and maintaining the code, and can cause -/// non-obvious errors, as readers may mistake the argument for the -/// builtin and vice versa. -/// -/// Builtins can be marked as exceptions to this rule via the -/// [`flake8-builtins.builtins-ignorelist`] configuration option. -/// -/// ## Options -/// -/// - `flake8-builtins.builtins-ignorelist` -/// -/// ## Example -/// ```python -/// def remove_duplicates(list, list2): -/// result = set() -/// for value in list: -/// result.add(value) -/// for value in list2: -/// result.add(value) -/// return list(result) # TypeError: 'list' object is not callable -/// ``` -/// -/// Use instead: -/// ```python -/// def remove_duplicates(list1, list2): -/// result = set() -/// for value in list1: -/// result.add(value) -/// for value in list2: -/// result.add(value) -/// return list(result) -/// ``` -/// -/// ## References -/// - [_Is it bad practice to use a built-in function name as an attribute or method identifier?_](https://stackoverflow.com/questions/9109333/is-it-bad-practice-to-use-a-built-in-function-name-as-an-attribute-or-method-ide) -/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python) -#[violation] -pub struct BuiltinArgumentShadowing { - name: String, -} - -impl Violation for BuiltinArgumentShadowing { - #[derive_message_formats] - fn message(&self) -> String { - let BuiltinArgumentShadowing { name } = self; - format!("Argument `{name}` is shadowing a Python builtin") - } -} - -/// ## What it does -/// Checks for any class attributes that use the same name as a builtin. -/// -/// ## Why is this bad? -/// Reusing a builtin name for the name of an attribute increases the -/// difficulty of reading and maintaining the code, and can cause -/// non-obvious errors, as readers may mistake the attribute for the -/// builtin and vice versa. -/// -/// Builtins can be marked as exceptions to this rule via the -/// [`flake8-builtins.builtins-ignorelist`] configuration option, or -/// converted to the appropriate dunder method. -/// -/// ## Options -/// -/// - `flake8-builtins.builtins-ignorelist` -/// -/// ## Example -/// ```python -/// class Shadow: -/// def int(): -/// return 0 -/// ``` -/// -/// Use instead: -/// ```python -/// class Shadow: -/// def to_int(): -/// return 0 -/// ``` -/// -/// Or: -/// ```python -/// class Shadow: -/// # Callable as `int(shadow)` -/// def __int__(): -/// return 0 -/// ``` -/// -/// ## References -/// - [_Is it bad practice to use a built-in function name as an attribute or method identifier?_](https://stackoverflow.com/questions/9109333/is-it-bad-practice-to-use-a-built-in-function-name-as-an-attribute-or-method-ide) -/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python) -#[violation] -pub struct BuiltinAttributeShadowing { - name: String, -} - -impl Violation for BuiltinAttributeShadowing { - #[derive_message_formats] - fn message(&self) -> String { - let BuiltinAttributeShadowing { name } = self; - format!("Class attribute `{name}` is shadowing a Python builtin") - } -} - -fn shadows_builtin(name: &str, ignorelist: &[String]) -> bool { - BUILTINS.contains(&name) && ignorelist.iter().all(|ignore| ignore != name) -} - -/// A001 -pub(crate) fn builtin_variable_shadowing(checker: &mut Checker, name: &str, attributed: &T) -where - T: Ranged, -{ - if shadows_builtin(name, &checker.settings.flake8_builtins.builtins_ignorelist) { - checker.diagnostics.push(Diagnostic::new( - BuiltinVariableShadowing { - name: name.to_string(), - }, - attributed.range(), - )); - } -} - -/// A002 -pub(crate) fn builtin_argument_shadowing(checker: &mut Checker, name: &str, attributed: &T) -where - T: Ranged, -{ - if shadows_builtin(name, &checker.settings.flake8_builtins.builtins_ignorelist) { - checker.diagnostics.push(Diagnostic::new( - BuiltinArgumentShadowing { - name: name.to_string(), - }, - attributed.range(), - )); - } -} - -/// A003 -pub(crate) fn builtin_attribute_shadowing(checker: &mut Checker, name: &str, attributed: &T) -where - T: Ranged, -{ - if shadows_builtin(name, &checker.settings.flake8_builtins.builtins_ignorelist) { - checker.diagnostics.push(Diagnostic::new( - BuiltinAttributeShadowing { - name: name.to_string(), - }, - attributed.range(), - )); - } -} diff --git a/crates/ruff/src/rules/flake8_builtins/rules/builtin_argument_shadowing.rs b/crates/ruff/src/rules/flake8_builtins/rules/builtin_argument_shadowing.rs new file mode 100644 index 0000000000000..e6aea40d3d1ec --- /dev/null +++ b/crates/ruff/src/rules/flake8_builtins/rules/builtin_argument_shadowing.rs @@ -0,0 +1,78 @@ +use rustpython_parser::ast::{Arg, Ranged}; + +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +use super::super::helpers::shadows_builtin; + +/// ## What it does +/// Checks for any function arguments that use the same name as a builtin. +/// +/// ## Why is this bad? +/// Reusing a builtin name for the name of an argument increases the +/// difficulty of reading and maintaining the code, and can cause +/// non-obvious errors, as readers may mistake the argument for the +/// builtin and vice versa. +/// +/// Builtins can be marked as exceptions to this rule via the +/// [`flake8-builtins.builtins-ignorelist`] configuration option. +/// +/// ## Options +/// +/// - `flake8-builtins.builtins-ignorelist` +/// +/// ## Example +/// ```python +/// def remove_duplicates(list, list2): +/// result = set() +/// for value in list: +/// result.add(value) +/// for value in list2: +/// result.add(value) +/// return list(result) # TypeError: 'list' object is not callable +/// ``` +/// +/// Use instead: +/// ```python +/// def remove_duplicates(list1, list2): +/// result = set() +/// for value in list1: +/// result.add(value) +/// for value in list2: +/// result.add(value) +/// return list(result) +/// ``` +/// +/// ## References +/// - [_Is it bad practice to use a built-in function name as an attribute or method identifier?_](https://stackoverflow.com/questions/9109333/is-it-bad-practice-to-use-a-built-in-function-name-as-an-attribute-or-method-ide) +/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python) +#[violation] +pub struct BuiltinArgumentShadowing { + name: String, +} + +impl Violation for BuiltinArgumentShadowing { + #[derive_message_formats] + fn message(&self) -> String { + let BuiltinArgumentShadowing { name } = self; + format!("Argument `{name}` is shadowing a Python builtin") + } +} + +/// A002 +pub(crate) fn builtin_argument_shadowing(checker: &mut Checker, argument: &Arg) { + if shadows_builtin( + argument.arg.as_str(), + &checker.settings.flake8_builtins.builtins_ignorelist, + ) { + checker.diagnostics.push(Diagnostic::new( + BuiltinArgumentShadowing { + name: argument.arg.to_string(), + }, + argument.range(), + )); + } +} diff --git a/crates/ruff/src/rules/flake8_builtins/rules/builtin_attribute_shadowing.rs b/crates/ruff/src/rules/flake8_builtins/rules/builtin_attribute_shadowing.rs new file mode 100644 index 0000000000000..701fa4edd3c93 --- /dev/null +++ b/crates/ruff/src/rules/flake8_builtins/rules/builtin_attribute_shadowing.rs @@ -0,0 +1,78 @@ +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +use super::super::helpers::{shadows_builtin, AnyShadowing}; + +/// ## What it does +/// Checks for any class attributes that use the same name as a builtin. +/// +/// ## Why is this bad? +/// Reusing a builtin name for the name of an attribute increases the +/// difficulty of reading and maintaining the code, and can cause +/// non-obvious errors, as readers may mistake the attribute for the +/// builtin and vice versa. +/// +/// Builtins can be marked as exceptions to this rule via the +/// [`flake8-builtins.builtins-ignorelist`] configuration option, or +/// converted to the appropriate dunder method. +/// +/// ## Options +/// +/// - `flake8-builtins.builtins-ignorelist` +/// +/// ## Example +/// ```python +/// class Shadow: +/// def int(): +/// return 0 +/// ``` +/// +/// Use instead: +/// ```python +/// class Shadow: +/// def to_int(): +/// return 0 +/// ``` +/// +/// Or: +/// ```python +/// class Shadow: +/// # Callable as `int(shadow)` +/// def __int__(): +/// return 0 +/// ``` +/// +/// ## References +/// - [_Is it bad practice to use a built-in function name as an attribute or method identifier?_](https://stackoverflow.com/questions/9109333/is-it-bad-practice-to-use-a-built-in-function-name-as-an-attribute-or-method-ide) +/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python) +#[violation] +pub struct BuiltinAttributeShadowing { + name: String, +} + +impl Violation for BuiltinAttributeShadowing { + #[derive_message_formats] + fn message(&self) -> String { + let BuiltinAttributeShadowing { name } = self; + format!("Class attribute `{name}` is shadowing a Python builtin") + } +} + +/// A003 +pub(crate) fn builtin_attribute_shadowing( + checker: &mut Checker, + name: &str, + shadowing: AnyShadowing, +) { + if shadows_builtin(name, &checker.settings.flake8_builtins.builtins_ignorelist) { + checker.diagnostics.push(Diagnostic::new( + BuiltinAttributeShadowing { + name: name.to_string(), + }, + shadowing.range(checker.locator), + )); + } +} diff --git a/crates/ruff/src/rules/flake8_builtins/rules/builtin_variable_shadowing.rs b/crates/ruff/src/rules/flake8_builtins/rules/builtin_variable_shadowing.rs new file mode 100644 index 0000000000000..a965af53cafb1 --- /dev/null +++ b/crates/ruff/src/rules/flake8_builtins/rules/builtin_variable_shadowing.rs @@ -0,0 +1,74 @@ +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +use super::super::helpers::{shadows_builtin, AnyShadowing}; + +/// ## What it does +/// Checks for variable (and function) assignments that use the same name +/// as a builtin. +/// +/// ## Why is this bad? +/// Reusing a builtin name for the name of a variable increases the +/// difficulty of reading and maintaining the code, and can cause +/// non-obvious errors, as readers may mistake the variable for the +/// builtin and vice versa. +/// +/// Builtins can be marked as exceptions to this rule via the +/// [`flake8-builtins.builtins-ignorelist`] configuration option. +/// +/// ## Options +/// +/// - `flake8-builtins.builtins-ignorelist` +/// +/// ## Example +/// ```python +/// def find_max(list_of_lists): +/// max = 0 +/// for flat_list in list_of_lists: +/// for value in flat_list: +/// max = max(max, value) # TypeError: 'int' object is not callable +/// return max +/// ``` +/// +/// Use instead: +/// ```python +/// def find_max(list_of_lists): +/// result = 0 +/// for flat_list in list_of_lists: +/// for value in flat_list: +/// result = max(result, value) +/// return result +/// ``` +/// +/// - [_Why is it a bad idea to name a variable `id` in Python?_](https://stackoverflow.com/questions/77552/id-is-a-bad-variable-name-in-python) +#[violation] +pub struct BuiltinVariableShadowing { + name: String, +} + +impl Violation for BuiltinVariableShadowing { + #[derive_message_formats] + fn message(&self) -> String { + let BuiltinVariableShadowing { name } = self; + format!("Variable `{name}` is shadowing a Python builtin") + } +} + +/// A001 +pub(crate) fn builtin_variable_shadowing( + checker: &mut Checker, + name: &str, + shadowing: AnyShadowing, +) { + if shadows_builtin(name, &checker.settings.flake8_builtins.builtins_ignorelist) { + checker.diagnostics.push(Diagnostic::new( + BuiltinVariableShadowing { + name: name.to_string(), + }, + shadowing.range(checker.locator), + )); + } +} diff --git a/crates/ruff/src/rules/flake8_builtins/rules/mod.rs b/crates/ruff/src/rules/flake8_builtins/rules/mod.rs new file mode 100644 index 0000000000000..f9b8c3c3d7d48 --- /dev/null +++ b/crates/ruff/src/rules/flake8_builtins/rules/mod.rs @@ -0,0 +1,9 @@ +pub(crate) use builtin_argument_shadowing::{builtin_argument_shadowing, BuiltinArgumentShadowing}; +pub(crate) use builtin_attribute_shadowing::{ + builtin_attribute_shadowing, BuiltinAttributeShadowing, +}; +pub(crate) use builtin_variable_shadowing::{builtin_variable_shadowing, BuiltinVariableShadowing}; + +mod builtin_argument_shadowing; +mod builtin_attribute_shadowing; +mod builtin_variable_shadowing; diff --git a/crates/ruff/src/rules/flake8_builtins/rules/rules.rs b/crates/ruff/src/rules/flake8_builtins/rules/rules.rs new file mode 100644 index 0000000000000..b28b04f643122 --- /dev/null +++ b/crates/ruff/src/rules/flake8_builtins/rules/rules.rs @@ -0,0 +1,3 @@ + + + diff --git a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py.snap b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py.snap index 1079c55bbe471..d65e5fa47670a 100644 --- a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py.snap +++ b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py.snap @@ -104,26 +104,22 @@ A001.py:11:1: A001 Variable `id` is shadowing a Python builtin 15 | def bytes(): | -A001.py:13:1: A001 Variable `bytes` is shadowing a Python builtin +A001.py:13:5: A001 Variable `bytes` is shadowing a Python builtin | -13 | id = 4 -14 | -15 | / def bytes(): -16 | | pass - | |________^ A001 -17 | -18 | class slice: +13 | id = 4 +14 | +15 | def bytes(): + | ^^^^^ A001 +16 | pass | -A001.py:16:1: A001 Variable `slice` is shadowing a Python builtin +A001.py:16:7: A001 Variable `slice` is shadowing a Python builtin | -16 | pass -17 | -18 | / class slice: -19 | | pass - | |________^ A001 -20 | -21 | try: +16 | pass +17 | +18 | class slice: + | ^^^^^ A001 +19 | pass | A001.py:21:1: A001 Variable `ValueError` is shadowing a Python builtin diff --git a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py_builtins_ignorelist.snap b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py_builtins_ignorelist.snap index d3480ecbaadf1..a709ce3c92a35 100644 --- a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py_builtins_ignorelist.snap +++ b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A001_A001.py_builtins_ignorelist.snap @@ -84,26 +84,22 @@ A001.py:9:6: A001 Variable `max` is shadowing a Python builtin 13 | id = 4 | -A001.py:13:1: A001 Variable `bytes` is shadowing a Python builtin +A001.py:13:5: A001 Variable `bytes` is shadowing a Python builtin | -13 | id = 4 -14 | -15 | / def bytes(): -16 | | pass - | |________^ A001 -17 | -18 | class slice: +13 | id = 4 +14 | +15 | def bytes(): + | ^^^^^ A001 +16 | pass | -A001.py:16:1: A001 Variable `slice` is shadowing a Python builtin +A001.py:16:7: A001 Variable `slice` is shadowing a Python builtin | -16 | pass -17 | -18 | / class slice: -19 | | pass - | |________^ A001 -20 | -21 | try: +16 | pass +17 | +18 | class slice: + | ^^^^^ A001 +19 | pass | A001.py:21:1: A001 Variable `ValueError` is shadowing a Python builtin diff --git a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py.snap b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py.snap index 590cda5054f01..1513b901fd94f 100644 --- a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py.snap +++ b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py.snap @@ -29,14 +29,13 @@ A003.py:4:5: A003 Class attribute `dir` is shadowing a Python builtin 8 | def __init__(self): | -A003.py:11:5: A003 Class attribute `str` is shadowing a Python builtin +A003.py:11:9: A003 Class attribute `str` is shadowing a Python builtin | -11 | self.dir = "." -12 | -13 | def str(self): - | _____^ -14 | | pass - | |____________^ A003 +11 | self.dir = "." +12 | +13 | def str(self): + | ^^^ A003 +14 | pass | diff --git a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py_builtins_ignorelist.snap b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py_builtins_ignorelist.snap index 6549678fb9752..82c08824fcbc8 100644 --- a/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py_builtins_ignorelist.snap +++ b/crates/ruff/src/rules/flake8_builtins/snapshots/ruff__rules__flake8_builtins__tests__A003_A003.py_builtins_ignorelist.snap @@ -10,14 +10,13 @@ A003.py:2:5: A003 Class attribute `ImportError` is shadowing a Python builtin 5 | dir = "/" | -A003.py:11:5: A003 Class attribute `str` is shadowing a Python builtin +A003.py:11:9: A003 Class attribute `str` is shadowing a Python builtin | -11 | self.dir = "." -12 | -13 | def str(self): - | _____^ -14 | | pass - | |____________^ A003 +11 | self.dir = "." +12 | +13 | def str(self): + | ^^^ A003 +14 | pass | diff --git a/crates/ruff/src/rules/flake8_commas/mod.rs b/crates/ruff/src/rules/flake8_commas/mod.rs index 9736b33f84814..675c70a8cc538 100644 --- a/crates/ruff/src/rules/flake8_commas/mod.rs +++ b/crates/ruff/src/rules/flake8_commas/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_commas/rules/mod.rs b/crates/ruff/src/rules/flake8_commas/rules/mod.rs new file mode 100644 index 0000000000000..0286278d8c7c9 --- /dev/null +++ b/crates/ruff/src/rules/flake8_commas/rules/mod.rs @@ -0,0 +1,5 @@ +pub(crate) use trailing_commas::{ + trailing_commas, MissingTrailingComma, ProhibitedTrailingComma, TrailingCommaOnBareTuple, +}; + +mod trailing_commas; diff --git a/crates/ruff/src/rules/flake8_commas/rules.rs b/crates/ruff/src/rules/flake8_commas/rules/trailing_commas.rs similarity index 100% rename from crates/ruff/src/rules/flake8_commas/rules.rs rename to crates/ruff/src/rules/flake8_commas/rules/trailing_commas.rs diff --git a/crates/ruff/src/rules/flake8_comprehensions/fixes.rs b/crates/ruff/src/rules/flake8_comprehensions/fixes.rs index 24deb88e1eb92..b9054be76381d 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/fixes.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/fixes.rs @@ -2,33 +2,20 @@ use anyhow::{bail, Result}; use itertools::Itertools; use libcst_native::{ Arg, AssignEqual, AssignTargetExpression, Call, Codegen, CodegenState, Comment, CompFor, Dict, - DictComp, DictElement, Element, EmptyLine, Expr, Expression, GeneratorExp, LeftCurlyBrace, - LeftParen, LeftSquareBracket, List, ListComp, Name, ParenthesizableWhitespace, - ParenthesizedWhitespace, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, - SimpleString, SimpleWhitespace, TrailingWhitespace, Tuple, + DictComp, DictElement, Element, EmptyLine, Expression, GeneratorExp, LeftCurlyBrace, LeftParen, + LeftSquareBracket, List, ListComp, Name, ParenthesizableWhitespace, ParenthesizedWhitespace, + RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, SimpleString, SimpleWhitespace, + TrailingWhitespace, Tuple, }; use rustpython_parser::ast::Ranged; use ruff_diagnostics::{Edit, Fix}; use ruff_python_ast::source_code::{Locator, Stylist}; -use crate::cst::matchers::{match_expr, match_module}; - -fn match_call<'a, 'b>(expr: &'a mut Expr<'b>) -> Result<&'a mut Call<'b>> { - if let Expression::Call(call) = &mut expr.value { - Ok(call) - } else { - bail!("Expected Expression::Call") - } -} - -fn match_arg<'a, 'b>(call: &'a Call<'b>) -> Result<&'a Arg<'b>> { - if let Some(arg) = call.args.first() { - Ok(arg) - } else { - bail!("Expected Arg") - } -} +use crate::cst::matchers::{ + match_arg, match_call, match_call_mut, match_expression, match_generator_exp, match_lambda, + match_list_comp, match_name, match_tuple, +}; /// (C400) Convert `list(x for x in y)` to `[x for x in y]`. pub(crate) fn fix_unnecessary_generator_list( @@ -38,18 +25,13 @@ pub(crate) fn fix_unnecessary_generator_list( ) -> Result { // Expr(Call(GeneratorExp)))) -> Expr(ListComp))) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; - let Expression::GeneratorExp(generator_exp) = &arg.value else { - bail!( - "Expected Expression::GeneratorExp" - ); - }; + let generator_exp = match_generator_exp(&arg.value)?; - body.value = Expression::ListComp(Box::new(ListComp { + tree = Expression::ListComp(Box::new(ListComp { elt: generator_exp.elt.clone(), for_in: generator_exp.for_in.clone(), lbracket: LeftSquareBracket { @@ -81,18 +63,13 @@ pub(crate) fn fix_unnecessary_generator_set( ) -> Result { // Expr(Call(GeneratorExp)))) -> Expr(SetComp))) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; - let Expression::GeneratorExp(generator_exp) = &arg.value else { - bail!( - "Expected Expression::GeneratorExp" - ); - }; + let generator_exp = match_generator_exp(&arg.value)?; - body.value = Expression::SetComp(Box::new(SetComp { + tree = Expression::SetComp(Box::new(SetComp { elt: generator_exp.elt.clone(), for_in: generator_exp.for_in.clone(), lbrace: LeftCurlyBrace { @@ -132,32 +109,18 @@ pub(crate) fn fix_unnecessary_generator_dict( parent: Option<&rustpython_parser::ast::Expr>, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; // Extract the (k, v) from `(k, v) for ...`. - let Expression::GeneratorExp(generator_exp) = &arg.value else { - bail!( - "Expected Expression::GeneratorExp" - ); - }; - let Expression::Tuple(tuple) = &generator_exp.elt.as_ref() else { - bail!("Expected Expression::Tuple"); - }; - let Some(Element::Simple { value: key, .. }) = &tuple.elements.get(0) else { - bail!( - "Expected tuple to contain a key as the first element" - ); - }; - let Some(Element::Simple { value, .. }) = &tuple.elements.get(1) else { - bail!( - "Expected tuple to contain a key as the second element" - ); + let generator_exp = match_generator_exp(&arg.value)?; + let tuple = match_tuple(&generator_exp.elt)?; + let [Element::Simple { value: key, .. }, Element::Simple { value, .. }] = &tuple.elements[..] else { + bail!("Expected tuple to contain two elements"); }; - body.value = Expression::DictComp(Box::new(DictComp { + tree = Expression::DictComp(Box::new(DictComp { key: Box::new(key.clone()), value: Box::new(value.clone()), for_in: generator_exp.for_in.clone(), @@ -200,16 +163,13 @@ pub(crate) fn fix_unnecessary_list_comprehension_set( // Expr(Call(ListComp)))) -> // Expr(SetComp))) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; - let Expression::ListComp(list_comp) = &arg.value else { - bail!("Expected Expression::ListComp"); - }; + let list_comp = match_list_comp(&arg.value)?; - body.value = Expression::SetComp(Box::new(SetComp { + tree = Expression::SetComp(Box::new(SetComp { elt: list_comp.elt.clone(), for_in: list_comp.for_in.clone(), lbrace: LeftCurlyBrace { @@ -240,25 +200,20 @@ pub(crate) fn fix_unnecessary_list_comprehension_dict( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; - let Expression::ListComp(list_comp) = &arg.value else { - bail!("Expected Expression::ListComp") - }; + let list_comp = match_list_comp(&arg.value)?; - let Expression::Tuple(tuple) = &*list_comp.elt else { - bail!("Expected Expression::Tuple") - }; + let tuple = match_tuple(&list_comp.elt)?; let [Element::Simple { value: key, comma: Some(comma), }, Element::Simple { value, .. }] = &tuple.elements[..] else { bail!("Expected tuple with two elements"); }; - body.value = Expression::DictComp(Box::new(DictComp { + tree = Expression::DictComp(Box::new(DictComp { key: Box::new(key.clone()), value: Box::new(value.clone()), for_in: list_comp.for_in.clone(), @@ -335,9 +290,8 @@ pub(crate) fn fix_unnecessary_literal_set( ) -> Result { // Expr(Call(List|Tuple)))) -> Expr(Set))) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let mut call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let mut call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; let (elements, whitespace_after, whitespace_before) = match &arg.value { @@ -355,7 +309,7 @@ pub(crate) fn fix_unnecessary_literal_set( if elements.is_empty() { call.args = vec![]; } else { - body.value = Expression::Set(Box::new(Set { + tree = Expression::Set(Box::new(Set { elements, lbrace: LeftCurlyBrace { whitespace_after }, rbrace: RightCurlyBrace { whitespace_before }, @@ -382,9 +336,8 @@ pub(crate) fn fix_unnecessary_literal_dict( ) -> Result { // Expr(Call(List|Tuple)))) -> Expr(Dict))) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; let elements = match &arg.value { @@ -421,7 +374,7 @@ pub(crate) fn fix_unnecessary_literal_dict( }) .collect::>>()?; - body.value = Expression::Dict(Box::new(Dict { + tree = Expression::Dict(Box::new(Dict { elements, lbrace: LeftCurlyBrace { whitespace_after: call.whitespace_before_args.clone(), @@ -451,12 +404,9 @@ pub(crate) fn fix_unnecessary_collection_call( ) -> Result { // Expr(Call("list" | "tuple" | "dict")))) -> Expr(List|Tuple|Dict) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; - let Expression::Name(name) = &call.func.as_ref() else { - bail!("Expected Expression::Name"); - }; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; + let name = match_name(&call.func)?; // Arena allocator used to create formatted strings of sufficient lifetime, // below. @@ -464,14 +414,14 @@ pub(crate) fn fix_unnecessary_collection_call( match name.value { "tuple" => { - body.value = Expression::Tuple(Box::new(Tuple { + tree = Expression::Tuple(Box::new(Tuple { elements: vec![], lpar: vec![LeftParen::default()], rpar: vec![RightParen::default()], })); } "list" => { - body.value = Expression::List(Box::new(List { + tree = Expression::List(Box::new(List { elements: vec![], lbracket: LeftSquareBracket::default(), rbracket: RightSquareBracket::default(), @@ -481,7 +431,7 @@ pub(crate) fn fix_unnecessary_collection_call( } "dict" => { if call.args.is_empty() { - body.value = Expression::Dict(Box::new(Dict { + tree = Expression::Dict(Box::new(Dict { elements: vec![], lbrace: LeftCurlyBrace::default(), rbrace: RightCurlyBrace::default(), @@ -522,7 +472,7 @@ pub(crate) fn fix_unnecessary_collection_call( }) .collect(); - body.value = Expression::Dict(Box::new(Dict { + tree = Expression::Dict(Box::new(Dict { elements, lbrace: LeftCurlyBrace { whitespace_after: call.whitespace_before_args.clone(), @@ -562,9 +512,8 @@ pub(crate) fn fix_unnecessary_literal_within_tuple_call( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; let (elements, whitespace_after, whitespace_before) = match &arg.value { Expression::Tuple(inner) => ( @@ -590,7 +539,7 @@ pub(crate) fn fix_unnecessary_literal_within_tuple_call( } }; - body.value = Expression::Tuple(Box::new(Tuple { + tree = Expression::Tuple(Box::new(Tuple { elements: elements.clone(), lpar: vec![LeftParen { whitespace_after: whitespace_after.clone(), @@ -617,9 +566,8 @@ pub(crate) fn fix_unnecessary_literal_within_list_call( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; let (elements, whitespace_after, whitespace_before) = match &arg.value { Expression::Tuple(inner) => ( @@ -645,7 +593,7 @@ pub(crate) fn fix_unnecessary_literal_within_list_call( } }; - body.value = Expression::List(Box::new(List { + tree = Expression::List(Box::new(List { elements: elements.clone(), lbracket: LeftSquareBracket { whitespace_after: whitespace_after.clone(), @@ -675,12 +623,11 @@ pub(crate) fn fix_unnecessary_list_call( ) -> Result { // Expr(Call(List|Tuple)))) -> Expr(List|Tuple))) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; - body.value = arg.value.clone(); + tree = arg.value.clone(); let mut state = CodegenState { default_newline: &stylist.line_ending(), @@ -701,17 +648,10 @@ pub(crate) fn fix_unnecessary_call_around_sorted( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let outer_call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let outer_call = match_call_mut(&mut tree)?; let inner_call = match &outer_call.args[..] { - [arg] => { - if let Expression::Call(call) = &arg.value { - call - } else { - bail!("Expected Expression::Call "); - } - } + [arg] => match_call(&arg.value)?, _ => { bail!("Expected one argument in outer function call"); } @@ -719,7 +659,7 @@ pub(crate) fn fix_unnecessary_call_around_sorted( if let Expression::Name(outer_name) = &*outer_call.func { if outer_name.value == "list" { - body.value = Expression::Call(inner_call.clone()); + tree = Expression::Call(Box::new((*inner_call).clone())); } else { // If the `reverse` argument is used let args = if inner_call.args.iter().any(|arg| { @@ -796,7 +736,7 @@ pub(crate) fn fix_unnecessary_call_around_sorted( args }; - body.value = Expression::Call(Box::new(Call { + tree = Expression::Call(Box::new(Call { func: inner_call.func.clone(), args, lpar: inner_call.lpar.clone(), @@ -824,15 +764,12 @@ pub(crate) fn fix_unnecessary_double_cast_or_process( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let body = match_expr(&mut tree)?; - let mut outer_call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let mut outer_call = match_call_mut(&mut tree)?; outer_call.args = match outer_call.args.split_first() { Some((first, rest)) => { - let Expression::Call(inner_call) = &first.value else { - bail!("Expected Expression::Call "); - }; + let inner_call = match_call(&first.value)?; if let Some(iterable) = inner_call.args.first() { let mut args = vec![iterable.clone()]; args.extend_from_slice(rest); @@ -861,12 +798,11 @@ pub(crate) fn fix_unnecessary_comprehension( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; + let mut tree = match_expression(module_text)?; - match &body.value { + match &tree { Expression::ListComp(inner) => { - body.value = Expression::Call(Box::new(Call { + tree = Expression::Call(Box::new(Call { func: Box::new(Expression::Name(Box::new(Name { value: "list", lpar: vec![], @@ -888,7 +824,7 @@ pub(crate) fn fix_unnecessary_comprehension( })); } Expression::SetComp(inner) => { - body.value = Expression::Call(Box::new(Call { + tree = Expression::Call(Box::new(Call { func: Box::new(Expression::Name(Box::new(Name { value: "set", lpar: vec![], @@ -910,7 +846,7 @@ pub(crate) fn fix_unnecessary_comprehension( })); } Expression::DictComp(inner) => { - body.value = Expression::Call(Box::new(Call { + tree = Expression::Call(Box::new(Call { func: Box::new(Expression::Name(Box::new(Name { value: "dict", lpar: vec![], @@ -955,9 +891,8 @@ pub(crate) fn fix_unnecessary_map( kind: &str, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; let (args, lambda_func) = match &arg.value { @@ -976,9 +911,7 @@ pub(crate) fn fix_unnecessary_map( } }; - let Expression::Lambda(func_body) = &lambda_func else { - bail!("Expected a lambda") - }; + let func_body = match_lambda(&lambda_func)?; if args.len() == 2 { if func_body.params.params.iter().any(|f| f.default.is_some()) { @@ -1017,7 +950,7 @@ pub(crate) fn fix_unnecessary_map( match kind { "generator" => { - body.value = Expression::GeneratorExp(Box::new(GeneratorExp { + tree = Expression::GeneratorExp(Box::new(GeneratorExp { elt: func_body.body.clone(), for_in: compfor, lpar: vec![LeftParen::default()], @@ -1025,7 +958,7 @@ pub(crate) fn fix_unnecessary_map( })); } "list" => { - body.value = Expression::ListComp(Box::new(ListComp { + tree = Expression::ListComp(Box::new(ListComp { elt: func_body.body.clone(), for_in: compfor, lbracket: LeftSquareBracket::default(), @@ -1035,7 +968,7 @@ pub(crate) fn fix_unnecessary_map( })); } "set" => { - body.value = Expression::SetComp(Box::new(SetComp { + tree = Expression::SetComp(Box::new(SetComp { elt: func_body.body.clone(), for_in: compfor, lpar: vec![], @@ -1066,7 +999,7 @@ pub(crate) fn fix_unnecessary_map( bail!("Expected tuple for dict comprehension") }; - body.value = Expression::DictComp(Box::new(DictComp { + tree = Expression::DictComp(Box::new(DictComp { for_in: compfor, lpar: vec![], rpar: vec![], @@ -1115,12 +1048,11 @@ pub(crate) fn fix_unnecessary_literal_within_dict_call( expr: &rustpython_parser::ast::Expr, ) -> Result { let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let mut body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let arg = match_arg(call)?; - body.value = arg.value.clone(); + tree = arg.value.clone(); let mut state = CodegenState { default_newline: &stylist.line_ending(), @@ -1140,9 +1072,8 @@ pub(crate) fn fix_unnecessary_comprehension_any_all( ) -> Result { // Expr(ListComp) -> Expr(GeneratorExp) let module_text = locator.slice(expr.range()); - let mut tree = match_module(module_text)?; - let body = match_expr(&mut tree)?; - let call = match_call(body)?; + let mut tree = match_expression(module_text)?; + let call = match_call_mut(&mut tree)?; let Expression::ListComp(list_comp) = &call.args[0].value else { bail!( diff --git a/crates/ruff/src/rules/flake8_comprehensions/mod.rs b/crates/ruff/src/rules/flake8_comprehensions/mod.rs index 02bd63ddb2d89..9806e41659839 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/mod.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/mod.rs @@ -7,11 +7,10 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_call_around_sorted.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_call_around_sorted.rs index 4d1514cf9e882..f94fb7e0bff77 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_call_around_sorted.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_call_around_sorted.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{self, Expr, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -74,7 +75,7 @@ pub(crate) fn unnecessary_call_around_sorted( if inner != "sorted" { return; } - if !checker.ctx.is_builtin(inner) || !checker.ctx.is_builtin(outer) { + if !checker.semantic_model().is_builtin(inner) || !checker.semantic_model().is_builtin(outer) { return; } let mut diagnostic = Diagnostic::new( diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_collection_call.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_collection_call.rs index 1789608900d13..e559d50bb3a56 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_collection_call.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_collection_call.rs @@ -79,7 +79,7 @@ pub(crate) fn unnecessary_collection_call( } _ => return, }; - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } let mut diagnostic = Diagnostic::new( diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension.rs index 24dbdcfbf5c12..498e86edc417b 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension.rs @@ -56,7 +56,7 @@ fn add_diagnostic(checker: &mut Checker, expr: &Expr) { Expr::DictComp(_) => "dict", _ => return, }; - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } let mut diagnostic = Diagnostic::new( diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension_any_all.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension_any_all.rs index 0eb7b10d43737..c3aac344dc3dd 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension_any_all.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_comprehension_any_all.rs @@ -76,7 +76,7 @@ pub(crate) fn unnecessary_comprehension_any_all( if is_async_generator(elt) { return; } - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } let mut diagnostic = Diagnostic::new(UnnecessaryComprehensionAnyAll, args[0].range()); diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_double_cast_or_process.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_double_cast_or_process.rs index ee0767b05624d..96cec22cf021a 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_double_cast_or_process.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_double_cast_or_process.rs @@ -90,7 +90,7 @@ pub(crate) fn unnecessary_double_cast_or_process( let Some(inner) = helpers::expr_name(func) else { return; }; - if !checker.ctx.is_builtin(inner) || !checker.ctx.is_builtin(outer) { + if !checker.semantic_model().is_builtin(inner) || !checker.semantic_model().is_builtin(outer) { return; } diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_list.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_list.rs index db6a5bcf9f771..4cdd85512dddc 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_list.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_list.rs @@ -52,7 +52,7 @@ pub(crate) fn unnecessary_generator_list( let Some(argument) = helpers::exactly_one_argument_with_matching_function("list", func, args, keywords) else { return; }; - if !checker.ctx.is_builtin("list") { + if !checker.semantic_model().is_builtin("list") { return; } if let Expr::GeneratorExp(_) = argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_set.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_set.rs index eb80f2b5fce8d..7f7cf83be2453 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_set.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_generator_set.rs @@ -53,7 +53,7 @@ pub(crate) fn unnecessary_generator_set( let Some(argument) = helpers::exactly_one_argument_with_matching_function("set", func, args, keywords) else { return; }; - if !checker.ctx.is_builtin("set") { + if !checker.semantic_model().is_builtin("set") { return; } if let Expr::GeneratorExp(_) = argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_call.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_call.rs index dd3a7c3cc5ae8..597253875b661 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_call.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_call.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{Expr, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -47,7 +48,7 @@ pub(crate) fn unnecessary_list_call( let Some(argument) = helpers::first_argument_with_matching_function("list", func, args) else { return; }; - if !checker.ctx.is_builtin("list") { + if !checker.semantic_model().is_builtin("list") { return; } if !argument.is_list_comp_expr() { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_dict.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_dict.rs index 12e9ee790c8a9..9c6f87873bb4b 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_dict.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_dict.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{self, Expr, Keyword, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -49,7 +50,7 @@ pub(crate) fn unnecessary_list_comprehension_dict( let Some(argument) = helpers::exactly_one_argument_with_matching_function("dict", func, args, keywords) else { return; }; - if !checker.ctx.is_builtin("dict") { + if !checker.semantic_model().is_builtin("dict") { return; } let Expr::ListComp(ast::ExprListComp { elt, .. }) = argument else { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_set.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_set.rs index 49076501f453e..b6dc24823ca19 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_set.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_list_comprehension_set.rs @@ -50,7 +50,7 @@ pub(crate) fn unnecessary_list_comprehension_set( let Some(argument) = helpers::exactly_one_argument_with_matching_function("set", func, args, keywords) else { return; }; - if !checker.ctx.is_builtin("set") { + if !checker.semantic_model().is_builtin("set") { return; } if argument.is_list_comp_expr() { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_dict.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_dict.rs index 0d1890a502de3..5ba588870b423 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_dict.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_dict.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{self, Expr, Keyword, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -56,7 +57,7 @@ pub(crate) fn unnecessary_literal_dict( let Some(argument) = helpers::exactly_one_argument_with_matching_function("dict", func, args, keywords) else { return; }; - if !checker.ctx.is_builtin("dict") { + if !checker.semantic_model().is_builtin("dict") { return; } let (kind, elts) = match argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_set.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_set.rs index 2d58455da0199..86c126dd5a7b2 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_set.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_set.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{Expr, Keyword, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -57,7 +58,7 @@ pub(crate) fn unnecessary_literal_set( let Some(argument) = helpers::exactly_one_argument_with_matching_function("set", func, args, keywords) else { return; }; - if !checker.ctx.is_builtin("set") { + if !checker.semantic_model().is_builtin("set") { return; } let kind = match argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_dict_call.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_dict_call.rs index f48ec9c9fb68e..cc9ba2d5ce47b 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_dict_call.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_dict_call.rs @@ -1,6 +1,7 @@ -use rustpython_parser::ast::{Expr, Keyword, Ranged}; use std::fmt; +use rustpython_parser::ast::{Expr, Keyword, Ranged}; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; use ruff_macros::{derive_message_formats, violation}; @@ -75,7 +76,7 @@ pub(crate) fn unnecessary_literal_within_dict_call( let Some(argument) = helpers::first_argument_with_matching_function("dict", func, args) else { return; }; - if !checker.ctx.is_builtin("dict") { + if !checker.semantic_model().is_builtin("dict") { return; } let argument_kind = match argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_list_call.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_list_call.rs index b4f99d719107b..d1c240d55fa4e 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_list_call.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_list_call.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{Expr, Keyword, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -78,7 +79,7 @@ pub(crate) fn unnecessary_literal_within_list_call( let Some(argument) = helpers::first_argument_with_matching_function("list", func, args) else { return; }; - if !checker.ctx.is_builtin("list") { + if !checker.semantic_model().is_builtin("list") { return; } let argument_kind = match argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_tuple_call.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_tuple_call.rs index f569a34e005a4..cd61c1943f97a 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_tuple_call.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_literal_within_tuple_call.rs @@ -1,10 +1,11 @@ use rustpython_parser::ast::{Expr, Keyword, Ranged}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; +use ruff_macros::{derive_message_formats, violation}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_comprehensions::fixes; -use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; -use ruff_macros::{derive_message_formats, violation}; use super::helpers; @@ -79,7 +80,7 @@ pub(crate) fn unnecessary_literal_within_tuple_call( let Some(argument) = helpers::first_argument_with_matching_function("tuple", func, args) else { return; }; - if !checker.ctx.is_builtin("tuple") { + if !checker.semantic_model().is_builtin("tuple") { return; } let argument_kind = match argument { diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_map.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_map.rs index e6cf53f68d1be..086a3137a4a19 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_map.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_map.rs @@ -88,7 +88,7 @@ pub(crate) fn unnecessary_map( }; match id { "map" => { - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } @@ -119,7 +119,7 @@ pub(crate) fn unnecessary_map( } } "list" | "set" => { - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } @@ -149,7 +149,7 @@ pub(crate) fn unnecessary_map( } } "dict" => { - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } diff --git a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_subscript_reversal.rs b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_subscript_reversal.rs index a27628d77449e..8659025978038 100644 --- a/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_subscript_reversal.rs +++ b/crates/ruff/src/rules/flake8_comprehensions/rules/unnecessary_subscript_reversal.rs @@ -1,10 +1,11 @@ use num_bigint::BigInt; use rustpython_parser::ast::{self, Constant, Expr, Ranged, Unaryop}; -use crate::checkers::ast::Checker; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use crate::checkers::ast::Checker; + use super::helpers; /// ## What it does @@ -57,7 +58,7 @@ pub(crate) fn unnecessary_subscript_reversal( if !(id == "set" || id == "sorted" || id == "reversed") { return; } - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; } let Expr::Subscript(ast::ExprSubscript { slice, .. }) = first_arg else { diff --git a/crates/ruff/src/rules/flake8_datetimez/mod.rs b/crates/ruff/src/rules/flake8_datetimez/mod.rs index d2d22010eba85..b4d19816da621 100644 --- a/crates/ruff/src/rules/flake8_datetimez/mod.rs +++ b/crates/ruff/src/rules/flake8_datetimez/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_datetimez/rules.rs b/crates/ruff/src/rules/flake8_datetimez/rules.rs deleted file mode 100644 index de3faa826d2b2..0000000000000 --- a/crates/ruff/src/rules/flake8_datetimez/rules.rs +++ /dev/null @@ -1,403 +0,0 @@ -use ruff_text_size::TextRange; -use rustpython_parser::ast::{self, Constant, Expr, Keyword}; - -use ruff_diagnostics::{Diagnostic, Violation}; -use ruff_macros::{derive_message_formats, violation}; -use ruff_python_ast::helpers::{has_non_none_keyword, is_const_none}; - -use crate::checkers::ast::Checker; - -#[violation] -pub struct CallDatetimeWithoutTzinfo; - -impl Violation for CallDatetimeWithoutTzinfo { - #[derive_message_formats] - fn message(&self) -> String { - format!("The use of `datetime.datetime()` without `tzinfo` argument is not allowed") - } -} - -#[violation] -pub struct CallDatetimeToday; - -impl Violation for CallDatetimeToday { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.datetime.today()` is not allowed, use \ - `datetime.datetime.now(tz=)` instead" - ) - } -} - -#[violation] -pub struct CallDatetimeUtcnow; - -impl Violation for CallDatetimeUtcnow { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.datetime.utcnow()` is not allowed, use \ - `datetime.datetime.now(tz=)` instead" - ) - } -} - -#[violation] -pub struct CallDatetimeUtcfromtimestamp; - -impl Violation for CallDatetimeUtcfromtimestamp { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.datetime.utcfromtimestamp()` is not allowed, use \ - `datetime.datetime.fromtimestamp(ts, tz=)` instead" - ) - } -} - -#[violation] -pub struct CallDatetimeNowWithoutTzinfo; - -impl Violation for CallDatetimeNowWithoutTzinfo { - #[derive_message_formats] - fn message(&self) -> String { - format!("The use of `datetime.datetime.now()` without `tz` argument is not allowed") - } -} - -#[violation] -pub struct CallDatetimeFromtimestamp; - -impl Violation for CallDatetimeFromtimestamp { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.datetime.fromtimestamp()` without `tz` argument is not allowed" - ) - } -} - -#[violation] -pub struct CallDatetimeStrptimeWithoutZone; - -impl Violation for CallDatetimeStrptimeWithoutZone { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.datetime.strptime()` without %z must be followed by \ - `.replace(tzinfo=)` or `.astimezone()`" - ) - } -} - -#[violation] -pub struct CallDateToday; - -impl Violation for CallDateToday { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.date.today()` is not allowed, use \ - `datetime.datetime.now(tz=).date()` instead" - ) - } -} - -#[violation] -pub struct CallDateFromtimestamp; - -impl Violation for CallDateFromtimestamp { - #[derive_message_formats] - fn message(&self) -> String { - format!( - "The use of `datetime.date.fromtimestamp()` is not allowed, use \ - `datetime.datetime.fromtimestamp(ts, tz=).date()` instead" - ) - } -} - -pub(crate) fn call_datetime_without_tzinfo( - checker: &mut Checker, - func: &Expr, - args: &[Expr], - keywords: &[Keyword], - location: TextRange, -) { - if !checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime"] - }) - { - return; - } - - // No positional arg: keyword is missing or constant None. - if args.len() < 8 && !has_non_none_keyword(keywords, "tzinfo") { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeWithoutTzinfo, location)); - return; - } - - // Positional arg: is constant None. - if args.len() >= 8 && is_const_none(&args[7]) { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeWithoutTzinfo, location)); - } -} - -/// Checks for `datetime.datetime.today()`. (DTZ002) -/// -/// ## Why is this bad? -/// -/// It uses the system local timezone. -/// Use `datetime.datetime.now(tz=)` instead. -pub(crate) fn call_datetime_today(checker: &mut Checker, func: &Expr, location: TextRange) { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime", "today"] - }) - { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeToday, location)); - } -} - -/// Checks for `datetime.datetime.today()`. (DTZ003) -/// -/// ## Why is this bad? -/// -/// Because naive `datetime` objects are treated by many `datetime` methods as -/// local times, it is preferred to use aware datetimes to represent times in -/// UTC. As such, the recommended way to create an object representing the -/// current time in UTC is by calling `datetime.now(timezone.utc)`. -pub(crate) fn call_datetime_utcnow(checker: &mut Checker, func: &Expr, location: TextRange) { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime", "utcnow"] - }) - { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeUtcnow, location)); - } -} - -/// Checks for `datetime.datetime.utcfromtimestamp()`. (DTZ004) -/// -/// ## Why is this bad? -/// -/// Because naive `datetime` objects are treated by many `datetime` methods as -/// local times, it is preferred to use aware datetimes to represent times in -/// UTC. As such, the recommended way to create an object representing a -/// specific timestamp in UTC is by calling `datetime.fromtimestamp(timestamp, -/// tz=timezone.utc)`. -pub(crate) fn call_datetime_utcfromtimestamp( - checker: &mut Checker, - func: &Expr, - location: TextRange, -) { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime", "utcfromtimestamp"] - }) - { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeUtcfromtimestamp, location)); - } -} - -/// DTZ005 -pub(crate) fn call_datetime_now_without_tzinfo( - checker: &mut Checker, - func: &Expr, - args: &[Expr], - keywords: &[Keyword], - location: TextRange, -) { - if !checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime", "now"] - }) - { - return; - } - - // no args / no args unqualified - if args.is_empty() && keywords.is_empty() { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeNowWithoutTzinfo, location)); - return; - } - - // none args - if !args.is_empty() && is_const_none(&args[0]) { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeNowWithoutTzinfo, location)); - return; - } - - // wrong keywords / none keyword - if !keywords.is_empty() && !has_non_none_keyword(keywords, "tz") { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeNowWithoutTzinfo, location)); - } -} - -/// DTZ006 -pub(crate) fn call_datetime_fromtimestamp( - checker: &mut Checker, - func: &Expr, - args: &[Expr], - keywords: &[Keyword], - location: TextRange, -) { - if !checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime", "fromtimestamp"] - }) - { - return; - } - - // no args / no args unqualified - if args.len() < 2 && keywords.is_empty() { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeFromtimestamp, location)); - return; - } - - // none args - if args.len() > 1 && is_const_none(&args[1]) { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeFromtimestamp, location)); - return; - } - - // wrong keywords / none keyword - if !keywords.is_empty() && !has_non_none_keyword(keywords, "tz") { - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeFromtimestamp, location)); - } -} - -/// DTZ007 -pub(crate) fn call_datetime_strptime_without_zone( - checker: &mut Checker, - func: &Expr, - args: &[Expr], - location: TextRange, -) { - if !checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "datetime", "strptime"] - }) - { - return; - } - - // Does the `strptime` call contain a format string with a timezone specifier? - if let Some(Expr::Constant(ast::ExprConstant { - value: Constant::Str(format), - kind: None, - range: _, - })) = args.get(1).as_ref() - { - if format.contains("%z") { - return; - } - }; - - let (Some(grandparent), Some(parent)) = (checker.ctx.expr_grandparent(), checker.ctx.expr_parent()) else { - checker.diagnostics.push(Diagnostic::new( - CallDatetimeStrptimeWithoutZone, - location, - )); - return; - }; - - if let Expr::Call(ast::ExprCall { keywords, .. }) = grandparent { - if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = parent { - let attr = attr.as_str(); - // Ex) `datetime.strptime(...).astimezone()` - if attr == "astimezone" { - return; - } - - // Ex) `datetime.strptime(...).replace(tzinfo=UTC)` - if attr == "replace" { - if has_non_none_keyword(keywords, "tzinfo") { - return; - } - } - } - } - - checker - .diagnostics - .push(Diagnostic::new(CallDatetimeStrptimeWithoutZone, location)); -} - -/// Checks for `datetime.date.today()`. (DTZ011) -/// -/// ## Why is this bad? -/// -/// It uses the system local timezone. -/// Use `datetime.datetime.now(tz=).date()` instead. -pub(crate) fn call_date_today(checker: &mut Checker, func: &Expr, location: TextRange) { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "date", "today"] - }) - { - checker - .diagnostics - .push(Diagnostic::new(CallDateToday, location)); - } -} - -/// Checks for `datetime.date.fromtimestamp()`. (DTZ012) -/// -/// ## Why is this bad? -/// -/// It uses the system local timezone. -/// Use `datetime.datetime.fromtimestamp(, tz=).date()` instead. -pub(crate) fn call_date_fromtimestamp(checker: &mut Checker, func: &Expr, location: TextRange) { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["datetime", "date", "fromtimestamp"] - }) - { - checker - .diagnostics - .push(Diagnostic::new(CallDateFromtimestamp, location)); - } -} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_date_fromtimestamp.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_date_fromtimestamp.rs new file mode 100644 index 0000000000000..30298012163f7 --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_date_fromtimestamp.rs @@ -0,0 +1,40 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::Expr; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDateFromtimestamp; + +impl Violation for CallDateFromtimestamp { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.date.fromtimestamp()` is not allowed, use \ + `datetime.datetime.fromtimestamp(ts, tz=).date()` instead" + ) + } +} + +/// Checks for `datetime.date.fromtimestamp()`. (DTZ012) +/// +/// ## Why is this bad? +/// +/// It uses the system local timezone. +/// Use `datetime.datetime.fromtimestamp(, tz=).date()` instead. +pub(crate) fn call_date_fromtimestamp(checker: &mut Checker, func: &Expr, location: TextRange) { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "date", "fromtimestamp"] + }) + { + checker + .diagnostics + .push(Diagnostic::new(CallDateFromtimestamp, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_date_today.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_date_today.rs new file mode 100644 index 0000000000000..461fa067618f3 --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_date_today.rs @@ -0,0 +1,40 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::Expr; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDateToday; + +impl Violation for CallDateToday { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.date.today()` is not allowed, use \ + `datetime.datetime.now(tz=).date()` instead" + ) + } +} + +/// Checks for `datetime.date.today()`. (DTZ011) +/// +/// ## Why is this bad? +/// +/// It uses the system local timezone. +/// Use `datetime.datetime.now(tz=).date()` instead. +pub(crate) fn call_date_today(checker: &mut Checker, func: &Expr, location: TextRange) { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "date", "today"] + }) + { + checker + .diagnostics + .push(Diagnostic::new(CallDateToday, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_fromtimestamp.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_fromtimestamp.rs new file mode 100644 index 0000000000000..4ff08ba885dee --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_fromtimestamp.rs @@ -0,0 +1,62 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::{Expr, Keyword}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::helpers::{has_non_none_keyword, is_const_none}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeFromtimestamp; + +impl Violation for CallDatetimeFromtimestamp { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.datetime.fromtimestamp()` without `tz` argument is not allowed" + ) + } +} + +/// DTZ006 +pub(crate) fn call_datetime_fromtimestamp( + checker: &mut Checker, + func: &Expr, + args: &[Expr], + keywords: &[Keyword], + location: TextRange, +) { + if !checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime", "fromtimestamp"] + }) + { + return; + } + + // no args / no args unqualified + if args.len() < 2 && keywords.is_empty() { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeFromtimestamp, location)); + return; + } + + // none args + if args.len() > 1 && is_const_none(&args[1]) { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeFromtimestamp, location)); + return; + } + + // wrong keywords / none keyword + if !keywords.is_empty() && !has_non_none_keyword(keywords, "tz") { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeFromtimestamp, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_now_without_tzinfo.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_now_without_tzinfo.rs new file mode 100644 index 0000000000000..46202fb6a2ba6 --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_now_without_tzinfo.rs @@ -0,0 +1,60 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::{Expr, Keyword}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::helpers::{has_non_none_keyword, is_const_none}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeNowWithoutTzinfo; + +impl Violation for CallDatetimeNowWithoutTzinfo { + #[derive_message_formats] + fn message(&self) -> String { + format!("The use of `datetime.datetime.now()` without `tz` argument is not allowed") + } +} + +/// DTZ005 +pub(crate) fn call_datetime_now_without_tzinfo( + checker: &mut Checker, + func: &Expr, + args: &[Expr], + keywords: &[Keyword], + location: TextRange, +) { + if !checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime", "now"] + }) + { + return; + } + + // no args / no args unqualified + if args.is_empty() && keywords.is_empty() { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeNowWithoutTzinfo, location)); + return; + } + + // none args + if !args.is_empty() && is_const_none(&args[0]) { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeNowWithoutTzinfo, location)); + return; + } + + // wrong keywords / none keyword + if !keywords.is_empty() && !has_non_none_keyword(keywords, "tz") { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeNowWithoutTzinfo, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_strptime_without_zone.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_strptime_without_zone.rs new file mode 100644 index 0000000000000..2cc4fb0b996a6 --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_strptime_without_zone.rs @@ -0,0 +1,80 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::{self, Constant, Expr}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::helpers::has_non_none_keyword; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeStrptimeWithoutZone; + +impl Violation for CallDatetimeStrptimeWithoutZone { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.datetime.strptime()` without %z must be followed by \ + `.replace(tzinfo=)` or `.astimezone()`" + ) + } +} + +/// DTZ007 +pub(crate) fn call_datetime_strptime_without_zone( + checker: &mut Checker, + func: &Expr, + args: &[Expr], + location: TextRange, +) { + if !checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime", "strptime"] + }) + { + return; + } + + // Does the `strptime` call contain a format string with a timezone specifier? + if let Some(Expr::Constant(ast::ExprConstant { + value: Constant::Str(format), + kind: None, + range: _, + })) = args.get(1).as_ref() + { + if format.contains("%z") { + return; + } + }; + + let (Some(grandparent), Some(parent)) = (checker.semantic_model().expr_grandparent(), checker.semantic_model().expr_parent()) else { + checker.diagnostics.push(Diagnostic::new( + CallDatetimeStrptimeWithoutZone, + location, + )); + return; + }; + + if let Expr::Call(ast::ExprCall { keywords, .. }) = grandparent { + if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = parent { + let attr = attr.as_str(); + // Ex) `datetime.strptime(...).astimezone()` + if attr == "astimezone" { + return; + } + + // Ex) `datetime.strptime(...).replace(tzinfo=UTC)` + if attr == "replace" { + if has_non_none_keyword(keywords, "tzinfo") { + return; + } + } + } + } + + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeStrptimeWithoutZone, location)); +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_today.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_today.rs new file mode 100644 index 0000000000000..5c42c7c176b48 --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_today.rs @@ -0,0 +1,40 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::Expr; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeToday; + +impl Violation for CallDatetimeToday { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.datetime.today()` is not allowed, use \ + `datetime.datetime.now(tz=)` instead" + ) + } +} + +/// Checks for `datetime.datetime.today()`. (DTZ002) +/// +/// ## Why is this bad? +/// +/// It uses the system local timezone. +/// Use `datetime.datetime.now(tz=)` instead. +pub(crate) fn call_datetime_today(checker: &mut Checker, func: &Expr, location: TextRange) { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime", "today"] + }) + { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeToday, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_utcfromtimestamp.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_utcfromtimestamp.rs new file mode 100644 index 0000000000000..ffa73e5416b4f --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_utcfromtimestamp.rs @@ -0,0 +1,47 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::Expr; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeUtcfromtimestamp; + +impl Violation for CallDatetimeUtcfromtimestamp { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.datetime.utcfromtimestamp()` is not allowed, use \ + `datetime.datetime.fromtimestamp(ts, tz=)` instead" + ) + } +} + +/// Checks for `datetime.datetime.utcfromtimestamp()`. (DTZ004) +/// +/// ## Why is this bad? +/// +/// Because naive `datetime` objects are treated by many `datetime` methods as +/// local times, it is preferred to use aware datetimes to represent times in +/// UTC. As such, the recommended way to create an object representing a +/// specific timestamp in UTC is by calling `datetime.fromtimestamp(timestamp, +/// tz=timezone.utc)`. +pub(crate) fn call_datetime_utcfromtimestamp( + checker: &mut Checker, + func: &Expr, + location: TextRange, +) { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime", "utcfromtimestamp"] + }) + { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeUtcfromtimestamp, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_utcnow.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_utcnow.rs new file mode 100644 index 0000000000000..33d89f867334c --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_utcnow.rs @@ -0,0 +1,42 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::Expr; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeUtcnow; + +impl Violation for CallDatetimeUtcnow { + #[derive_message_formats] + fn message(&self) -> String { + format!( + "The use of `datetime.datetime.utcnow()` is not allowed, use \ + `datetime.datetime.now(tz=)` instead" + ) + } +} + +/// Checks for `datetime.datetime.today()`. (DTZ003) +/// +/// ## Why is this bad? +/// +/// Because naive `datetime` objects are treated by many `datetime` methods as +/// local times, it is preferred to use aware datetimes to represent times in +/// UTC. As such, the recommended way to create an object representing the +/// current time in UTC is by calling `datetime.now(timezone.utc)`. +pub(crate) fn call_datetime_utcnow(checker: &mut Checker, func: &Expr, location: TextRange) { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime", "utcnow"] + }) + { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeUtcnow, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_without_tzinfo.rs b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_without_tzinfo.rs new file mode 100644 index 0000000000000..af7eabf837a9c --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/call_datetime_without_tzinfo.rs @@ -0,0 +1,51 @@ +use ruff_text_size::TextRange; +use rustpython_parser::ast::{Expr, Keyword}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::helpers::{has_non_none_keyword, is_const_none}; + +use crate::checkers::ast::Checker; + +#[violation] +pub struct CallDatetimeWithoutTzinfo; + +impl Violation for CallDatetimeWithoutTzinfo { + #[derive_message_formats] + fn message(&self) -> String { + format!("The use of `datetime.datetime()` without `tzinfo` argument is not allowed") + } +} + +pub(crate) fn call_datetime_without_tzinfo( + checker: &mut Checker, + func: &Expr, + args: &[Expr], + keywords: &[Keyword], + location: TextRange, +) { + if !checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| { + call_path.as_slice() == ["datetime", "datetime"] + }) + { + return; + } + + // No positional arg: keyword is missing or constant None. + if args.len() < 8 && !has_non_none_keyword(keywords, "tzinfo") { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeWithoutTzinfo, location)); + return; + } + + // Positional arg: is constant None. + if args.len() >= 8 && is_const_none(&args[7]) { + checker + .diagnostics + .push(Diagnostic::new(CallDatetimeWithoutTzinfo, location)); + } +} diff --git a/crates/ruff/src/rules/flake8_datetimez/rules/mod.rs b/crates/ruff/src/rules/flake8_datetimez/rules/mod.rs new file mode 100644 index 0000000000000..8cfa09cdccf30 --- /dev/null +++ b/crates/ruff/src/rules/flake8_datetimez/rules/mod.rs @@ -0,0 +1,29 @@ +pub(crate) use call_date_fromtimestamp::{call_date_fromtimestamp, CallDateFromtimestamp}; +pub(crate) use call_date_today::{call_date_today, CallDateToday}; +pub(crate) use call_datetime_fromtimestamp::{ + call_datetime_fromtimestamp, CallDatetimeFromtimestamp, +}; +pub(crate) use call_datetime_now_without_tzinfo::{ + call_datetime_now_without_tzinfo, CallDatetimeNowWithoutTzinfo, +}; +pub(crate) use call_datetime_strptime_without_zone::{ + call_datetime_strptime_without_zone, CallDatetimeStrptimeWithoutZone, +}; +pub(crate) use call_datetime_today::{call_datetime_today, CallDatetimeToday}; +pub(crate) use call_datetime_utcfromtimestamp::{ + call_datetime_utcfromtimestamp, CallDatetimeUtcfromtimestamp, +}; +pub(crate) use call_datetime_utcnow::{call_datetime_utcnow, CallDatetimeUtcnow}; +pub(crate) use call_datetime_without_tzinfo::{ + call_datetime_without_tzinfo, CallDatetimeWithoutTzinfo, +}; + +mod call_date_fromtimestamp; +mod call_date_today; +mod call_datetime_fromtimestamp; +mod call_datetime_now_without_tzinfo; +mod call_datetime_strptime_without_zone; +mod call_datetime_today; +mod call_datetime_utcfromtimestamp; +mod call_datetime_utcnow; +mod call_datetime_without_tzinfo; diff --git a/crates/ruff/src/rules/flake8_debugger/mod.rs b/crates/ruff/src/rules/flake8_debugger/mod.rs index 799d434b6c514..29a40913ca6d0 100644 --- a/crates/ruff/src/rules/flake8_debugger/mod.rs +++ b/crates/ruff/src/rules/flake8_debugger/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_debugger/rules.rs b/crates/ruff/src/rules/flake8_debugger/rules/debugger.rs similarity index 91% rename from crates/ruff/src/rules/flake8_debugger/rules.rs rename to crates/ruff/src/rules/flake8_debugger/rules/debugger.rs index 22036b1942d5d..0dfdc9ce83629 100644 --- a/crates/ruff/src/rules/flake8_debugger/rules.rs +++ b/crates/ruff/src/rules/flake8_debugger/rules/debugger.rs @@ -1,11 +1,12 @@ use rustpython_parser::ast::{Expr, Ranged, Stmt}; -use crate::checkers::ast::Checker; -use crate::rules::flake8_debugger::types::DebuggerUsingType; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::call_path::{format_call_path, from_unqualified_name, CallPath}; +use crate::checkers::ast::Checker; +use crate::rules::flake8_debugger::types::DebuggerUsingType; + #[violation] pub struct Debugger { using_type: DebuggerUsingType, @@ -42,11 +43,15 @@ const DEBUGGERS: &[&[&str]] = &[ /// Checks for the presence of a debugger call. pub(crate) fn debugger_call(checker: &mut Checker, expr: &Expr, func: &Expr) { - if let Some(target) = checker.ctx.resolve_call_path(func).and_then(|call_path| { - DEBUGGERS - .iter() - .find(|target| call_path.as_slice() == **target) - }) { + if let Some(target) = checker + .semantic_model() + .resolve_call_path(func) + .and_then(|call_path| { + DEBUGGERS + .iter() + .find(|target| call_path.as_slice() == **target) + }) + { checker.diagnostics.push(Diagnostic::new( Debugger { using_type: DebuggerUsingType::Call(format_call_path(target)), diff --git a/crates/ruff/src/rules/flake8_debugger/rules/mod.rs b/crates/ruff/src/rules/flake8_debugger/rules/mod.rs new file mode 100644 index 0000000000000..1dda738e6a842 --- /dev/null +++ b/crates/ruff/src/rules/flake8_debugger/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use debugger::{debugger_call, debugger_import, Debugger}; + +mod debugger; diff --git a/crates/ruff/src/rules/flake8_django/mod.rs b/crates/ruff/src/rules/flake8_django/mod.rs index 321373db80e6f..ec6ad0f098d2b 100644 --- a/crates/ruff/src/rules/flake8_django/mod.rs +++ b/crates/ruff/src/rules/flake8_django/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_django/rules/all_with_model_form.rs b/crates/ruff/src/rules/flake8_django/rules/all_with_model_form.rs index 410f83fbfa135..da019f071aa32 100644 --- a/crates/ruff/src/rules/flake8_django/rules/all_with_model_form.rs +++ b/crates/ruff/src/rules/flake8_django/rules/all_with_model_form.rs @@ -52,7 +52,10 @@ pub(crate) fn all_with_model_form( bases: &[Expr], body: &[Stmt], ) -> Option { - if !bases.iter().any(|base| is_model_form(&checker.ctx, base)) { + if !bases + .iter() + .any(|base| is_model_form(checker.semantic_model(), base)) + { return None; } for element in body.iter() { diff --git a/crates/ruff/src/rules/flake8_django/rules/exclude_with_model_form.rs b/crates/ruff/src/rules/flake8_django/rules/exclude_with_model_form.rs index bc3ae08909d95..af229a6a326a0 100644 --- a/crates/ruff/src/rules/flake8_django/rules/exclude_with_model_form.rs +++ b/crates/ruff/src/rules/flake8_django/rules/exclude_with_model_form.rs @@ -50,7 +50,10 @@ pub(crate) fn exclude_with_model_form( bases: &[Expr], body: &[Stmt], ) -> Option { - if !bases.iter().any(|base| is_model_form(&checker.ctx, base)) { + if !bases + .iter() + .any(|base| is_model_form(checker.semantic_model(), base)) + { return None; } for element in body.iter() { diff --git a/crates/ruff/src/rules/flake8_django/rules/helpers.rs b/crates/ruff/src/rules/flake8_django/rules/helpers.rs index ac73f2143f811..3d0f72e10eb66 100644 --- a/crates/ruff/src/rules/flake8_django/rules/helpers.rs +++ b/crates/ruff/src/rules/flake8_django/rules/helpers.rs @@ -1,25 +1,25 @@ use rustpython_parser::ast::Expr; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; /// Return `true` if a Python class appears to be a Django model, based on its base classes. -pub(crate) fn is_model(context: &Context, base: &Expr) -> bool { - context.resolve_call_path(base).map_or(false, |call_path| { +pub(crate) fn is_model(model: &SemanticModel, base: &Expr) -> bool { + model.resolve_call_path(base).map_or(false, |call_path| { call_path.as_slice() == ["django", "db", "models", "Model"] }) } /// Return `true` if a Python class appears to be a Django model form, based on its base classes. -pub(crate) fn is_model_form(context: &Context, base: &Expr) -> bool { - context.resolve_call_path(base).map_or(false, |call_path| { +pub(crate) fn is_model_form(model: &SemanticModel, base: &Expr) -> bool { + model.resolve_call_path(base).map_or(false, |call_path| { call_path.as_slice() == ["django", "forms", "ModelForm"] || call_path.as_slice() == ["django", "forms", "models", "ModelForm"] }) } /// Return `true` if the expression is constructor for a Django model field. -pub(crate) fn is_model_field(context: &Context, expr: &Expr) -> bool { - context.resolve_call_path(expr).map_or(false, |call_path| { +pub(crate) fn is_model_field(model: &SemanticModel, expr: &Expr) -> bool { + model.resolve_call_path(expr).map_or(false, |call_path| { call_path .as_slice() .starts_with(&["django", "db", "models"]) @@ -27,8 +27,11 @@ pub(crate) fn is_model_field(context: &Context, expr: &Expr) -> bool { } /// Return the name of the field type, if the expression is constructor for a Django model field. -pub(crate) fn get_model_field_name<'a>(context: &'a Context, expr: &'a Expr) -> Option<&'a str> { - context.resolve_call_path(expr).and_then(|call_path| { +pub(crate) fn get_model_field_name<'a>( + model: &'a SemanticModel, + expr: &'a Expr, +) -> Option<&'a str> { + model.resolve_call_path(expr).and_then(|call_path| { let call_path = call_path.as_slice(); if !call_path.starts_with(&["django", "db", "models"]) { return None; diff --git a/crates/ruff/src/rules/flake8_django/rules/locals_in_render_function.rs b/crates/ruff/src/rules/flake8_django/rules/locals_in_render_function.rs index 5e3aa9a3091a9..469ff81eb61f3 100644 --- a/crates/ruff/src/rules/flake8_django/rules/locals_in_render_function.rs +++ b/crates/ruff/src/rules/flake8_django/rules/locals_in_render_function.rs @@ -2,6 +2,7 @@ use rustpython_parser::ast::{self, Expr, Keyword, Ranged}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; @@ -50,7 +51,7 @@ pub(crate) fn locals_in_render_function( keywords: &[Keyword], ) { if !checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["django", "shortcuts", "render"] @@ -60,7 +61,7 @@ pub(crate) fn locals_in_render_function( } let locals = if args.len() >= 3 { - if !is_locals_call(checker, &args[2]) { + if !is_locals_call(checker.semantic_model(), &args[2]) { return; } &args[2] @@ -68,7 +69,7 @@ pub(crate) fn locals_in_render_function( .iter() .find(|keyword| keyword.arg.as_ref().map_or(false, |arg| arg == "context")) { - if !is_locals_call(checker, &keyword.value) { + if !is_locals_call(checker.semantic_model(), &keyword.value) { return; } &keyword.value @@ -82,12 +83,11 @@ pub(crate) fn locals_in_render_function( )); } -fn is_locals_call(checker: &Checker, expr: &Expr) -> bool { +fn is_locals_call(model: &SemanticModel, expr: &Expr) -> bool { let Expr::Call(ast::ExprCall { func, .. }) = expr else { return false }; - checker - .ctx + model .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["", "locals"]) } diff --git a/crates/ruff/src/rules/flake8_django/rules/model_without_dunder_str.rs b/crates/ruff/src/rules/flake8_django/rules/model_without_dunder_str.rs index 69853ac571143..5e07cbfa53ccc 100644 --- a/crates/ruff/src/rules/flake8_django/rules/model_without_dunder_str.rs +++ b/crates/ruff/src/rules/flake8_django/rules/model_without_dunder_str.rs @@ -2,6 +2,7 @@ use rustpython_parser::ast::{self, Constant, Expr, Ranged, Stmt}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; @@ -55,7 +56,7 @@ pub(crate) fn model_without_dunder_str( body: &[Stmt], class_location: &Stmt, ) -> Option { - if !checker_applies(checker, bases, body) { + if !checker_applies(checker.semantic_model(), bases, body) { return None; } if !has_dunder_method(body) { @@ -79,12 +80,12 @@ fn has_dunder_method(body: &[Stmt]) -> bool { }) } -fn checker_applies(checker: &Checker, bases: &[Expr], body: &[Stmt]) -> bool { +fn checker_applies(model: &SemanticModel, bases: &[Expr], body: &[Stmt]) -> bool { for base in bases.iter() { if is_model_abstract(body) { continue; } - if helpers::is_model(&checker.ctx, base) { + if helpers::is_model(model, base) { return true; } } diff --git a/crates/ruff/src/rules/flake8_django/rules/nullable_model_string_field.rs b/crates/ruff/src/rules/flake8_django/rules/nullable_model_string_field.rs index e73d323785062..d8ccedb65b551 100644 --- a/crates/ruff/src/rules/flake8_django/rules/nullable_model_string_field.rs +++ b/crates/ruff/src/rules/flake8_django/rules/nullable_model_string_field.rs @@ -84,7 +84,7 @@ fn is_nullable_field<'a>(checker: &'a Checker, value: &'a Expr) -> Option<&'a st return None; }; - let Some(valid_field_name) = helpers::get_model_field_name(&checker.ctx, func) else { + let Some(valid_field_name) = helpers::get_model_field_name(checker.semantic_model(), func) else { return None; }; diff --git a/crates/ruff/src/rules/flake8_django/rules/unordered_body_content_in_model.rs b/crates/ruff/src/rules/flake8_django/rules/unordered_body_content_in_model.rs index e8a18c27cbcb2..857ebc96ee9ff 100644 --- a/crates/ruff/src/rules/flake8_django/rules/unordered_body_content_in_model.rs +++ b/crates/ruff/src/rules/flake8_django/rules/unordered_body_content_in_model.rs @@ -4,6 +4,7 @@ use rustpython_parser::ast::{self, Expr, Ranged, Stmt}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; @@ -99,11 +100,11 @@ impl fmt::Display for ContentType { } } -fn get_element_type(checker: &Checker, element: &Stmt) -> Option { +fn get_element_type(model: &SemanticModel, element: &Stmt) -> Option { match element { Stmt::Assign(ast::StmtAssign { targets, value, .. }) => { if let Expr::Call(ast::ExprCall { func, .. }) = value.as_ref() { - if helpers::is_model_field(&checker.ctx, func) { + if helpers::is_model_field(model, func) { return Some(ContentType::FieldDeclaration); } } @@ -144,13 +145,13 @@ pub(crate) fn unordered_body_content_in_model( ) { if !bases .iter() - .any(|base| helpers::is_model(&checker.ctx, base)) + .any(|base| helpers::is_model(checker.semantic_model(), base)) { return; } let mut elements_type_found = Vec::new(); for element in body.iter() { - let Some(current_element_type) = get_element_type(checker, element) else { + let Some(current_element_type) = get_element_type(checker.semantic_model(), element) else { continue; }; let Some(&element_type) = elements_type_found diff --git a/crates/ruff/src/rules/flake8_errmsg/rules/mod.rs b/crates/ruff/src/rules/flake8_errmsg/rules/mod.rs new file mode 100644 index 0000000000000..8740e30113256 --- /dev/null +++ b/crates/ruff/src/rules/flake8_errmsg/rules/mod.rs @@ -0,0 +1,5 @@ +pub(crate) use string_in_exception::{ + string_in_exception, DotFormatInException, FStringInException, RawStringInException, +}; + +mod string_in_exception; diff --git a/crates/ruff/src/rules/flake8_errmsg/rules.rs b/crates/ruff/src/rules/flake8_errmsg/rules/string_in_exception.rs similarity index 95% rename from crates/ruff/src/rules/flake8_errmsg/rules.rs rename to crates/ruff/src/rules/flake8_errmsg/rules/string_in_exception.rs index 4d749806b6046..11ed03951155a 100644 --- a/crates/ruff/src/rules/flake8_errmsg/rules.rs +++ b/crates/ruff/src/rules/flake8_errmsg/rules/string_in_exception.rs @@ -3,7 +3,6 @@ use rustpython_parser::ast::{self, Constant, Expr, ExprContext, Ranged, Stmt}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_ast::source_code::{Generator, Stylist}; use ruff_python_ast::whitespace; @@ -230,11 +229,11 @@ pub(crate) fn string_in_exception(checker: &mut Checker, stmt: &Stmt, exc: &Expr value: Constant::Str(string), .. }) => { - if checker.settings.rules.enabled(Rule::RawStringInException) { + if checker.enabled(Rule::RawStringInException) { if string.len() > checker.settings.flake8_errmsg.max_string_length { let indentation = whitespace::indentation(checker.locator, stmt) .and_then(|indentation| { - if checker.ctx.find_binding("msg").is_none() { + if checker.semantic_model().find_binding("msg").is_none() { Some(indentation) } else { None @@ -259,10 +258,10 @@ pub(crate) fn string_in_exception(checker: &mut Checker, stmt: &Stmt, exc: &Expr } // Check for f-strings. Expr::JoinedStr(_) => { - if checker.settings.rules.enabled(Rule::FStringInException) { + if checker.enabled(Rule::FStringInException) { let indentation = whitespace::indentation(checker.locator, stmt).and_then( |indentation| { - if checker.ctx.find_binding("msg").is_none() { + if checker.semantic_model().find_binding("msg").is_none() { Some(indentation) } else { None @@ -286,14 +285,14 @@ pub(crate) fn string_in_exception(checker: &mut Checker, stmt: &Stmt, exc: &Expr } // Check for .format() calls. Expr::Call(ast::ExprCall { func, .. }) => { - if checker.settings.rules.enabled(Rule::DotFormatInException) { + if checker.enabled(Rule::DotFormatInException) { if let Expr::Attribute(ast::ExprAttribute { value, attr, .. }) = func.as_ref() { if attr == "format" && value.is_constant_expr() { let indentation = whitespace::indentation(checker.locator, stmt) .and_then(|indentation| { - if checker.ctx.find_binding("msg").is_none() { + if checker.semantic_model().find_binding("msg").is_none() { Some(indentation) } else { None diff --git a/crates/ruff/src/rules/flake8_executable/helpers.rs b/crates/ruff/src/rules/flake8_executable/helpers.rs index 70592af16e7e1..f52e746bd0e61 100644 --- a/crates/ruff/src/rules/flake8_executable/helpers.rs +++ b/crates/ruff/src/rules/flake8_executable/helpers.rs @@ -51,12 +51,12 @@ pub(crate) fn is_executable(filepath: &Path) -> Result { #[cfg(test)] mod tests { + use ruff_text_size::TextSize; + use crate::rules::flake8_executable::helpers::{ extract_shebang, ShebangDirective, SHEBANG_REGEX, }; - use ruff_text_size::TextSize; - #[test] fn shebang_regex() { // Positive cases diff --git a/crates/ruff/src/rules/flake8_executable/mod.rs b/crates/ruff/src/rules/flake8_executable/mod.rs index f37d8f799349e..a38ffbac24d48 100644 --- a/crates/ruff/src/rules/flake8_executable/mod.rs +++ b/crates/ruff/src/rules/flake8_executable/mod.rs @@ -8,7 +8,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_executable/rules/shebang_missing.rs b/crates/ruff/src/rules/flake8_executable/rules/shebang_missing.rs index 4466127f9c681..ea33e57d0c9cc 100644 --- a/crates/ruff/src/rules/flake8_executable/rules/shebang_missing.rs +++ b/crates/ruff/src/rules/flake8_executable/rules/shebang_missing.rs @@ -1,8 +1,9 @@ #![allow(unused_imports)] -use ruff_text_size::TextRange; use std::path::Path; +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; diff --git a/crates/ruff/src/rules/flake8_executable/rules/shebang_not_executable.rs b/crates/ruff/src/rules/flake8_executable/rules/shebang_not_executable.rs index f68ffa5aafa12..6cfcc13887e70 100644 --- a/crates/ruff/src/rules/flake8_executable/rules/shebang_not_executable.rs +++ b/crates/ruff/src/rules/flake8_executable/rules/shebang_not_executable.rs @@ -1,8 +1,9 @@ #![allow(unused_imports)] -use ruff_text_size::{TextLen, TextRange, TextSize}; use std::path::Path; +use ruff_text_size::{TextLen, TextRange, TextSize}; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; diff --git a/crates/ruff/src/rules/flake8_future_annotations/rules.rs b/crates/ruff/src/rules/flake8_future_annotations/rules/missing_future_annotations.rs similarity index 91% rename from crates/ruff/src/rules/flake8_future_annotations/rules.rs rename to crates/ruff/src/rules/flake8_future_annotations/rules/missing_future_annotations.rs index 4828164b3af29..28aebb80743f6 100644 --- a/crates/ruff/src/rules/flake8_future_annotations/rules.rs +++ b/crates/ruff/src/rules/flake8_future_annotations/rules/missing_future_annotations.rs @@ -67,11 +67,14 @@ impl Violation for MissingFutureAnnotationsImport { /// FA100 pub(crate) fn missing_future_annotations(checker: &mut Checker, expr: &Expr) { - if let Some(binding) = checker.ctx.resolve_call_path(expr) { + let name = checker + .semantic_model() + .resolve_call_path(expr) + .map(|binding| format_call_path(&binding)); + + if let Some(name) = name { checker.diagnostics.push(Diagnostic::new( - MissingFutureAnnotationsImport { - name: format_call_path(&binding), - }, + MissingFutureAnnotationsImport { name }, expr.range(), )); } diff --git a/crates/ruff/src/rules/flake8_future_annotations/rules/mod.rs b/crates/ruff/src/rules/flake8_future_annotations/rules/mod.rs new file mode 100644 index 0000000000000..5a845f4b8a4d9 --- /dev/null +++ b/crates/ruff/src/rules/flake8_future_annotations/rules/mod.rs @@ -0,0 +1,5 @@ +pub(crate) use missing_future_annotations::{ + missing_future_annotations, MissingFutureAnnotationsImport, +}; + +mod missing_future_annotations; diff --git a/crates/ruff/src/rules/flake8_gettext/mod.rs b/crates/ruff/src/rules/flake8_gettext/mod.rs index bc150742908b8..f7d71451076cc 100644 --- a/crates/ruff/src/rules/flake8_gettext/mod.rs +++ b/crates/ruff/src/rules/flake8_gettext/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_gettext/rules.rs b/crates/ruff/src/rules/flake8_gettext/rules.rs deleted file mode 100644 index a9df874c7715a..0000000000000 --- a/crates/ruff/src/rules/flake8_gettext/rules.rs +++ /dev/null @@ -1,87 +0,0 @@ -use rustpython_parser::ast::{self, Constant, Expr, Operator, Ranged}; - -use ruff_diagnostics::{Diagnostic, Violation}; -use ruff_macros::{derive_message_formats, violation}; - -#[violation] -pub struct FStringInGetTextFuncCall; - -impl Violation for FStringInGetTextFuncCall { - #[derive_message_formats] - fn message(&self) -> String { - format!("f-string is resolved before function call; consider `_(\"string %s\") % arg`") - } -} - -#[violation] -pub struct FormatInGetTextFuncCall; - -impl Violation for FormatInGetTextFuncCall { - #[derive_message_formats] - fn message(&self) -> String { - format!("`format` method argument is resolved before function call; consider `_(\"string %s\") % arg`") - } -} -#[violation] -pub struct PrintfInGetTextFuncCall; - -impl Violation for PrintfInGetTextFuncCall { - #[derive_message_formats] - fn message(&self) -> String { - format!("printf-style format is resolved before function call; consider `_(\"string %s\") % arg`") - } -} - -/// Returns true if the [`Expr`] is an internationalization function call. -pub(crate) fn is_gettext_func_call(func: &Expr, functions_names: &[String]) -> bool { - if let Expr::Name(ast::ExprName { id, .. }) = func { - functions_names.contains(id.as_ref()) - } else { - false - } -} - -/// INT001 -pub(crate) fn f_string_in_gettext_func_call(args: &[Expr]) -> Option { - if let Some(first) = args.first() { - if first.is_joined_str_expr() { - return Some(Diagnostic::new(FStringInGetTextFuncCall {}, first.range())); - } - } - None -} - -/// INT002 -pub(crate) fn format_in_gettext_func_call(args: &[Expr]) -> Option { - if let Some(first) = args.first() { - if let Expr::Call(ast::ExprCall { func, .. }) = &first { - if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = func.as_ref() { - if attr == "format" { - return Some(Diagnostic::new(FormatInGetTextFuncCall {}, first.range())); - } - } - } - } - None -} - -/// INT003 -pub(crate) fn printf_in_gettext_func_call(args: &[Expr]) -> Option { - if let Some(first) = args.first() { - if let Expr::BinOp(ast::ExprBinOp { - op: Operator::Mod { .. }, - left, - .. - }) = &first - { - if let Expr::Constant(ast::ExprConstant { - value: Constant::Str(_), - .. - }) = left.as_ref() - { - return Some(Diagnostic::new(PrintfInGetTextFuncCall {}, first.range())); - } - } - } - None -} diff --git a/crates/ruff/src/rules/flake8_gettext/rules/f_string_in_gettext_func_call.rs b/crates/ruff/src/rules/flake8_gettext/rules/f_string_in_gettext_func_call.rs new file mode 100644 index 0000000000000..0f6b8c6a6021f --- /dev/null +++ b/crates/ruff/src/rules/flake8_gettext/rules/f_string_in_gettext_func_call.rs @@ -0,0 +1,24 @@ +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +#[violation] +pub struct FStringInGetTextFuncCall; + +impl Violation for FStringInGetTextFuncCall { + #[derive_message_formats] + fn message(&self) -> String { + format!("f-string is resolved before function call; consider `_(\"string %s\") % arg`") + } +} + +/// INT001 +pub(crate) fn f_string_in_gettext_func_call(args: &[Expr]) -> Option { + if let Some(first) = args.first() { + if first.is_joined_str_expr() { + return Some(Diagnostic::new(FStringInGetTextFuncCall {}, first.range())); + } + } + None +} diff --git a/crates/ruff/src/rules/flake8_gettext/rules/format_in_gettext_func_call.rs b/crates/ruff/src/rules/flake8_gettext/rules/format_in_gettext_func_call.rs new file mode 100644 index 0000000000000..ec159d03373a1 --- /dev/null +++ b/crates/ruff/src/rules/flake8_gettext/rules/format_in_gettext_func_call.rs @@ -0,0 +1,28 @@ +use rustpython_parser::ast::{self, Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +#[violation] +pub struct FormatInGetTextFuncCall; + +impl Violation for FormatInGetTextFuncCall { + #[derive_message_formats] + fn message(&self) -> String { + format!("`format` method argument is resolved before function call; consider `_(\"string %s\") % arg`") + } +} + +/// INT002 +pub(crate) fn format_in_gettext_func_call(args: &[Expr]) -> Option { + if let Some(first) = args.first() { + if let Expr::Call(ast::ExprCall { func, .. }) = &first { + if let Expr::Attribute(ast::ExprAttribute { attr, .. }) = func.as_ref() { + if attr == "format" { + return Some(Diagnostic::new(FormatInGetTextFuncCall {}, first.range())); + } + } + } + } + None +} diff --git a/crates/ruff/src/rules/flake8_gettext/rules/is_gettext_func_call.rs b/crates/ruff/src/rules/flake8_gettext/rules/is_gettext_func_call.rs new file mode 100644 index 0000000000000..0e539ead9a4dd --- /dev/null +++ b/crates/ruff/src/rules/flake8_gettext/rules/is_gettext_func_call.rs @@ -0,0 +1,10 @@ +use rustpython_parser::ast::{self, Expr}; + +/// Returns true if the [`Expr`] is an internationalization function call. +pub(crate) fn is_gettext_func_call(func: &Expr, functions_names: &[String]) -> bool { + if let Expr::Name(ast::ExprName { id, .. }) = func { + functions_names.contains(id.as_ref()) + } else { + false + } +} diff --git a/crates/ruff/src/rules/flake8_gettext/rules/mod.rs b/crates/ruff/src/rules/flake8_gettext/rules/mod.rs new file mode 100644 index 0000000000000..1a80938fd12a1 --- /dev/null +++ b/crates/ruff/src/rules/flake8_gettext/rules/mod.rs @@ -0,0 +1,15 @@ +pub(crate) use f_string_in_gettext_func_call::{ + f_string_in_gettext_func_call, FStringInGetTextFuncCall, +}; +pub(crate) use format_in_gettext_func_call::{ + format_in_gettext_func_call, FormatInGetTextFuncCall, +}; +pub(crate) use is_gettext_func_call::is_gettext_func_call; +pub(crate) use printf_in_gettext_func_call::{ + printf_in_gettext_func_call, PrintfInGetTextFuncCall, +}; + +mod f_string_in_gettext_func_call; +mod format_in_gettext_func_call; +mod is_gettext_func_call; +mod printf_in_gettext_func_call; diff --git a/crates/ruff/src/rules/flake8_gettext/rules/printf_in_gettext_func_call.rs b/crates/ruff/src/rules/flake8_gettext/rules/printf_in_gettext_func_call.rs new file mode 100644 index 0000000000000..088eaa60f83f9 --- /dev/null +++ b/crates/ruff/src/rules/flake8_gettext/rules/printf_in_gettext_func_call.rs @@ -0,0 +1,35 @@ +use rustpython_parser::ast::{self, Constant, Expr, Operator, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +#[violation] +pub struct PrintfInGetTextFuncCall; + +impl Violation for PrintfInGetTextFuncCall { + #[derive_message_formats] + fn message(&self) -> String { + format!("printf-style format is resolved before function call; consider `_(\"string %s\") % arg`") + } +} + +/// INT003 +pub(crate) fn printf_in_gettext_func_call(args: &[Expr]) -> Option { + if let Some(first) = args.first() { + if let Expr::BinOp(ast::ExprBinOp { + op: Operator::Mod { .. }, + left, + .. + }) = &first + { + if let Expr::Constant(ast::ExprConstant { + value: Constant::Str(_), + .. + }) = left.as_ref() + { + return Some(Diagnostic::new(PrintfInGetTextFuncCall {}, first.range())); + } + } + } + None +} diff --git a/crates/ruff/src/rules/flake8_implicit_str_concat/mod.rs b/crates/ruff/src/rules/flake8_implicit_str_concat/mod.rs index 4dd5497135611..d5ddd4b37df24 100644 --- a/crates/ruff/src/rules/flake8_implicit_str_concat/mod.rs +++ b/crates/ruff/src/rules/flake8_implicit_str_concat/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_implicit_str_concat/rules/explicit.rs b/crates/ruff/src/rules/flake8_implicit_str_concat/rules/explicit.rs new file mode 100644 index 0000000000000..e6b706519ef7a --- /dev/null +++ b/crates/ruff/src/rules/flake8_implicit_str_concat/rules/explicit.rs @@ -0,0 +1,70 @@ +use rustpython_parser::ast::{self, Constant, Expr, Operator, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +/// ## What it does +/// Checks for string literals that are explicitly concatenated (using the +/// `+` operator). +/// +/// ## Why is this bad? +/// For string literals that wrap across multiple lines, implicit string +/// concatenation within parentheses is preferred over explicit +/// concatenation using the `+` operator, as the former is more readable. +/// +/// ## Example +/// ```python +/// z = ( +/// "The quick brown fox jumps over the lazy " +/// + "dog" +/// ) +/// ``` +/// +/// Use instead: +/// ```python +/// z = ( +/// "The quick brown fox jumps over the lazy " +/// "dog" +/// ) +/// ``` +#[violation] +pub struct ExplicitStringConcatenation; + +impl Violation for ExplicitStringConcatenation { + #[derive_message_formats] + fn message(&self) -> String { + format!("Explicitly concatenated string should be implicitly concatenated") + } +} + +/// ISC003 +pub(crate) fn explicit(expr: &Expr) -> Option { + if let Expr::BinOp(ast::ExprBinOp { + left, + op, + right, + range: _, + }) = expr + { + if matches!(op, Operator::Add) { + if matches!( + left.as_ref(), + Expr::JoinedStr(_) + | Expr::Constant(ast::ExprConstant { + value: Constant::Str(..) | Constant::Bytes(..), + .. + }) + ) && matches!( + right.as_ref(), + Expr::JoinedStr(_) + | Expr::Constant(ast::ExprConstant { + value: Constant::Str(..) | Constant::Bytes(..), + .. + }) + ) { + return Some(Diagnostic::new(ExplicitStringConcatenation, expr.range())); + } + } + } + None +} diff --git a/crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs b/crates/ruff/src/rules/flake8_implicit_str_concat/rules/implicit.rs similarity index 66% rename from crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs rename to crates/ruff/src/rules/flake8_implicit_str_concat/rules/implicit.rs index 49350098c0c5b..43dac7b3e59f7 100644 --- a/crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs +++ b/crates/ruff/src/rules/flake8_implicit_str_concat/rules/implicit.rs @@ -1,6 +1,5 @@ use itertools::Itertools; use ruff_text_size::TextRange; -use rustpython_parser::ast::{self, Constant, Expr, Operator, Ranged}; use rustpython_parser::lexer::LexResult; use rustpython_parser::Tok; @@ -84,40 +83,6 @@ impl Violation for MultiLineImplicitStringConcatenation { } } -/// ## What it does -/// Checks for string literals that are explicitly concatenated (using the -/// `+` operator). -/// -/// ## Why is this bad? -/// For string literals that wrap across multiple lines, implicit string -/// concatenation within parentheses is preferred over explicit -/// concatenation using the `+` operator, as the former is more readable. -/// -/// ## Example -/// ```python -/// z = ( -/// "The quick brown fox jumps over the lazy " -/// + "dog" -/// ) -/// ``` -/// -/// Use instead: -/// ```python -/// z = ( -/// "The quick brown fox jumps over the lazy " -/// "dog" -/// ) -/// ``` -#[violation] -pub struct ExplicitStringConcatenation; - -impl Violation for ExplicitStringConcatenation { - #[derive_message_formats] - fn message(&self) -> String { - format!("Explicitly concatenated string should be implicitly concatenated") - } -} - /// ISC001, ISC002 pub(crate) fn implicit( tokens: &[LexResult], @@ -150,35 +115,3 @@ pub(crate) fn implicit( } diagnostics } - -/// ISC003 -pub(crate) fn explicit(expr: &Expr) -> Option { - if let Expr::BinOp(ast::ExprBinOp { - left, - op, - right, - range: _, - }) = expr - { - if matches!(op, Operator::Add) { - if matches!( - left.as_ref(), - Expr::JoinedStr(_) - | Expr::Constant(ast::ExprConstant { - value: Constant::Str(..) | Constant::Bytes(..), - .. - }) - ) && matches!( - right.as_ref(), - Expr::JoinedStr(_) - | Expr::Constant(ast::ExprConstant { - value: Constant::Str(..) | Constant::Bytes(..), - .. - }) - ) { - return Some(Diagnostic::new(ExplicitStringConcatenation, expr.range())); - } - } - } - None -} diff --git a/crates/ruff/src/rules/flake8_implicit_str_concat/rules/mod.rs b/crates/ruff/src/rules/flake8_implicit_str_concat/rules/mod.rs new file mode 100644 index 0000000000000..605341dc27cb8 --- /dev/null +++ b/crates/ruff/src/rules/flake8_implicit_str_concat/rules/mod.rs @@ -0,0 +1,7 @@ +pub(crate) use explicit::{explicit, ExplicitStringConcatenation}; +pub(crate) use implicit::{ + implicit, MultiLineImplicitStringConcatenation, SingleLineImplicitStringConcatenation, +}; + +mod explicit; +mod implicit; diff --git a/crates/ruff/src/rules/flake8_import_conventions/mod.rs b/crates/ruff/src/rules/flake8_import_conventions/mod.rs index bd07037e43cd4..3df13cbea42ef 100644 --- a/crates/ruff/src/rules/flake8_import_conventions/mod.rs +++ b/crates/ruff/src/rules/flake8_import_conventions/mod.rs @@ -6,11 +6,10 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use rustc_hash::{FxHashMap, FxHashSet}; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/flake8_logging_format/rules.rs b/crates/ruff/src/rules/flake8_logging_format/rules/logging_call.rs similarity index 87% rename from crates/ruff/src/rules/flake8_logging_format/rules.rs rename to crates/ruff/src/rules/flake8_logging_format/rules/logging_call.rs index b30870b3e37d1..410eb43b84b61 100644 --- a/crates/ruff/src/rules/flake8_logging_format/rules.rs +++ b/crates/ruff/src/rules/flake8_logging_format/rules/logging_call.rs @@ -44,14 +44,14 @@ fn check_msg(checker: &mut Checker, msg: &Expr) { // Check for string concatenation and percent format. Expr::BinOp(ast::ExprBinOp { op, .. }) => match op { Operator::Add => { - if checker.settings.rules.enabled(Rule::LoggingStringConcat) { + if checker.enabled(Rule::LoggingStringConcat) { checker .diagnostics .push(Diagnostic::new(LoggingStringConcat, msg.range())); } } Operator::Mod => { - if checker.settings.rules.enabled(Rule::LoggingPercentFormat) { + if checker.enabled(Rule::LoggingPercentFormat) { checker .diagnostics .push(Diagnostic::new(LoggingPercentFormat, msg.range())); @@ -61,7 +61,7 @@ fn check_msg(checker: &mut Checker, msg: &Expr) { }, // Check for f-strings. Expr::JoinedStr(_) => { - if checker.settings.rules.enabled(Rule::LoggingFString) { + if checker.enabled(Rule::LoggingFString) { checker .diagnostics .push(Diagnostic::new(LoggingFString, msg.range())); @@ -69,7 +69,7 @@ fn check_msg(checker: &mut Checker, msg: &Expr) { } // Check for .format() calls. Expr::Call(ast::ExprCall { func, .. }) => { - if checker.settings.rules.enabled(Rule::LoggingStringFormat) { + if checker.enabled(Rule::LoggingStringFormat) { if let Expr::Attribute(ast::ExprAttribute { value, attr, .. }) = func.as_ref() { if attr == "format" && value.is_constant_expr() { checker @@ -106,7 +106,7 @@ fn check_log_record_attr_clash(checker: &mut Checker, extra: &Keyword) { } Expr::Call(ast::ExprCall { func, keywords, .. }) => { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["", "dict"]) { @@ -151,7 +151,7 @@ pub(crate) fn logging_call( args: &[Expr], keywords: &[Keyword], ) { - if !logging::is_logger_candidate(&checker.ctx, func) { + if !logging::is_logger_candidate(func, checker.semantic_model()) { return; } @@ -167,7 +167,7 @@ pub(crate) fn logging_call( } // G010 - if checker.settings.rules.enabled(Rule::LoggingWarn) + if checker.enabled(Rule::LoggingWarn) && matches!( logging_call_type, LoggingCallType::LevelCall(LoggingLevel::Warn) @@ -185,20 +185,17 @@ pub(crate) fn logging_call( } // G101 - if checker.settings.rules.enabled(Rule::LoggingExtraAttrClash) { + if checker.enabled(Rule::LoggingExtraAttrClash) { if let Some(extra) = find_keyword(keywords, "extra") { check_log_record_attr_clash(checker, extra); } } // G201, G202 - if checker.settings.rules.enabled(Rule::LoggingExcInfo) - || checker - .settings - .rules - .enabled(Rule::LoggingRedundantExcInfo) + if checker.enabled(Rule::LoggingExcInfo) + || checker.enabled(Rule::LoggingRedundantExcInfo) { - if !checker.ctx.in_exception_handler() { + if !checker.semantic_model().in_exception_handler() { return; } if let Some(exc_info) = find_keyword(keywords, "exc_info") { @@ -212,7 +209,7 @@ pub(crate) fn logging_call( }) ) || if let Expr::Call(ast::ExprCall { func, .. }) = &exc_info.value { checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["sys", "exc_info"] @@ -226,18 +223,14 @@ pub(crate) fn logging_call( if let LoggingCallType::LevelCall(logging_level) = logging_call_type { match logging_level { LoggingLevel::Error => { - if checker.settings.rules.enabled(Rule::LoggingExcInfo) { + if checker.enabled(Rule::LoggingExcInfo) { checker .diagnostics .push(Diagnostic::new(LoggingExcInfo, level_call_range)); } } LoggingLevel::Exception => { - if checker - .settings - .rules - .enabled(Rule::LoggingRedundantExcInfo) - { + if checker.enabled(Rule::LoggingRedundantExcInfo) { checker.diagnostics.push(Diagnostic::new( LoggingRedundantExcInfo, exc_info.range(), diff --git a/crates/ruff/src/rules/flake8_logging_format/rules/mod.rs b/crates/ruff/src/rules/flake8_logging_format/rules/mod.rs new file mode 100644 index 0000000000000..c0cce2dd57d8c --- /dev/null +++ b/crates/ruff/src/rules/flake8_logging_format/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use logging_call::logging_call; + +mod logging_call; diff --git a/crates/ruff/src/rules/flake8_no_pep420/mod.rs b/crates/ruff/src/rules/flake8_no_pep420/mod.rs index 623cf89c69d35..c6dbdbe60548c 100644 --- a/crates/ruff/src/rules/flake8_no_pep420/mod.rs +++ b/crates/ruff/src/rules/flake8_no_pep420/mod.rs @@ -5,11 +5,10 @@ pub(crate) mod rules; mod tests { use std::path::{Path, PathBuf}; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::{test_path, test_resource_path}; diff --git a/crates/ruff/src/rules/flake8_no_pep420/rules.rs b/crates/ruff/src/rules/flake8_no_pep420/rules/implicit_namespace_package.rs similarity index 99% rename from crates/ruff/src/rules/flake8_no_pep420/rules.rs rename to crates/ruff/src/rules/flake8_no_pep420/rules/implicit_namespace_package.rs index 406446bdafd23..5c2a8ae4f8580 100644 --- a/crates/ruff/src/rules/flake8_no_pep420/rules.rs +++ b/crates/ruff/src/rules/flake8_no_pep420/rules/implicit_namespace_package.rs @@ -1,6 +1,7 @@ -use ruff_text_size::TextRange; use std::path::{Path, PathBuf}; +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; diff --git a/crates/ruff/src/rules/flake8_no_pep420/rules/mod.rs b/crates/ruff/src/rules/flake8_no_pep420/rules/mod.rs new file mode 100644 index 0000000000000..6b3762cae36a1 --- /dev/null +++ b/crates/ruff/src/rules/flake8_no_pep420/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use implicit_namespace_package::{implicit_namespace_package, ImplicitNamespacePackage}; + +mod implicit_namespace_package; diff --git a/crates/ruff/src/rules/flake8_pie/mod.rs b/crates/ruff/src/rules/flake8_pie/mod.rs index 67925fca2d907..ed9b8730ab94d 100644 --- a/crates/ruff/src/rules/flake8_pie/mod.rs +++ b/crates/ruff/src/rules/flake8_pie/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_pie/rules.rs b/crates/ruff/src/rules/flake8_pie/rules.rs deleted file mode 100644 index 77c04131d8c0a..0000000000000 --- a/crates/ruff/src/rules/flake8_pie/rules.rs +++ /dev/null @@ -1,652 +0,0 @@ -use std::collections::BTreeMap; -use std::iter; - -use itertools::Either::{Left, Right}; -use log::error; -use ruff_text_size::TextRange; -use rustc_hash::FxHashSet; -use rustpython_parser::ast::{ - self, Boolop, Constant, Expr, ExprContext, ExprLambda, Keyword, Ranged, Stmt, -}; - -use ruff_diagnostics::{AlwaysAutofixableViolation, Violation}; -use ruff_diagnostics::{Diagnostic, Edit, Fix}; -use ruff_macros::{derive_message_formats, violation}; -use ruff_python_ast::comparable::ComparableExpr; -use ruff_python_ast::helpers::trailing_comment_start_offset; -use ruff_python_ast::types::RefEquality; -use ruff_python_stdlib::identifiers::is_identifier; - -use crate::autofix::actions::delete_stmt; -use crate::checkers::ast::Checker; -use crate::registry::AsRule; - -/// ## What it does -/// Checks for unnecessary `pass` statements in class and function bodies. -/// where it is not needed syntactically (e.g., when an indented docstring is -/// present). -/// -/// ## Why is this bad? -/// When a function or class definition contains a docstring, an additional -/// `pass` statement is redundant. -/// -/// ## Example -/// ```python -/// def foo(): -/// """Placeholder docstring.""" -/// pass -/// ``` -/// -/// Use instead: -/// ```python -/// def foo(): -/// """Placeholder docstring.""" -/// ``` -/// -/// ## References -/// - [Python documentation](https://docs.python.org/3/reference/simple_stmts.html#the-pass-statement) -#[violation] -pub struct UnnecessaryPass; - -impl AlwaysAutofixableViolation for UnnecessaryPass { - #[derive_message_formats] - fn message(&self) -> String { - format!("Unnecessary `pass` statement") - } - - fn autofix_title(&self) -> String { - "Remove unnecessary `pass`".to_string() - } -} - -/// ## What it does -/// Checks for duplicate field definitions in classes. -/// -/// ## Why is this bad? -/// Defining a field multiple times in a class body is redundant and likely a -/// mistake. -/// -/// ## Example -/// ```python -/// class Person: -/// name = Tom -/// ... -/// name = Ben -/// ``` -/// -/// Use instead: -/// ```python -/// class Person: -/// name = Tom -/// ... -/// ``` -#[violation] -pub struct DuplicateClassFieldDefinition(pub String); - -impl AlwaysAutofixableViolation for DuplicateClassFieldDefinition { - #[derive_message_formats] - fn message(&self) -> String { - let DuplicateClassFieldDefinition(name) = self; - format!("Class field `{name}` is defined multiple times") - } - - fn autofix_title(&self) -> String { - let DuplicateClassFieldDefinition(name) = self; - format!("Remove duplicate field definition for `{name}`") - } -} - -/// ## What it does -/// Checks for enums that contain duplicate values. -/// -/// ## Why is this bad? -/// Enum values should be unique. Non-unique values are redundant and likely a -/// mistake. -/// -/// ## Example -/// ```python -/// from enum import Enum -/// -/// -/// class Foo(Enum): -/// A = 1 -/// B = 2 -/// C = 1 -/// ``` -/// -/// Use instead: -/// ```python -/// from enum import Enum -/// -/// -/// class Foo(Enum): -/// A = 1 -/// B = 2 -/// C = 3 -/// ``` -/// -/// ## References -/// - [Python documentation](https://docs.python.org/3/library/enum.html#enum.Enum) -#[violation] -pub struct NonUniqueEnums { - value: String, -} - -impl Violation for NonUniqueEnums { - #[derive_message_formats] - fn message(&self) -> String { - let NonUniqueEnums { value } = self; - format!("Enum contains duplicate value: `{value}`") - } -} - -/// ## What it does -/// Checks for unnecessary dictionary unpacking operators (`**`). -/// -/// ## Why is this bad? -/// Unpacking a dictionary into another dictionary is redundant. The unpacking -/// operator can be removed, making the code more readable. -/// -/// ## Example -/// ```python -/// foo = {"A": 1, "B": 2} -/// bar = {**foo, **{"C": 3}} -/// ``` -/// -/// Use instead: -/// ```python -/// foo = {"A": 1, "B": 2} -/// bar = {**foo, "C": 3} -/// ``` -/// -/// ## References -/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#dictionary-displays) -#[violation] -pub struct UnnecessarySpread; - -impl Violation for UnnecessarySpread { - #[derive_message_formats] - fn message(&self) -> String { - format!("Unnecessary spread `**`") - } -} - -/// ## What it does -/// Checks for `startswith` or `endswith` calls on the same value with -/// different prefixes or suffixes. -/// -/// ## Why is this bad? -/// The `startswith` and `endswith` methods accept tuples of prefixes or -/// suffixes respectively. Passing a tuple of prefixes or suffixes is more -/// more efficient and readable than calling the method multiple times. -/// -/// ## Example -/// ```python -/// msg = "Hello, world!" -/// if msg.startswith("Hello") or msg.startswith("Hi"): -/// print("Greetings!") -/// ``` -/// -/// Use instead: -/// ```python -/// msg = "Hello, world!" -/// if msg.startswith(("Hello", "Hi")): -/// print("Greetings!") -/// ``` -/// -/// ## References -/// - [Python documentation](https://docs.python.org/3/library/stdtypes.html#str.startswith) -/// - [Python documentation](https://docs.python.org/3/library/stdtypes.html#str.endswith) -#[violation] -pub struct MultipleStartsEndsWith { - attr: String, -} - -impl AlwaysAutofixableViolation for MultipleStartsEndsWith { - #[derive_message_formats] - fn message(&self) -> String { - let MultipleStartsEndsWith { attr } = self; - format!("Call `{attr}` once with a `tuple`") - } - - fn autofix_title(&self) -> String { - let MultipleStartsEndsWith { attr } = self; - format!("Merge into a single `{attr}` call") - } -} - -/// ## What it does -/// Checks for unnecessary `dict` kwargs. -/// -/// ## Why is this bad? -/// If the `dict` keys are valid identifiers, they can be passed as keyword -/// arguments directly. -/// -/// ## Example -/// ```python -/// def foo(bar): -/// return bar + 1 -/// -/// -/// print(foo(**{"bar": 2})) # prints 3 -/// ``` -/// -/// Use instead: -/// ```python -/// def foo(bar): -/// return bar + 1 -/// -/// -/// print(foo(bar=2)) # prints 3 -/// ``` -/// -/// ## References -/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#dictionary-displays) -/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#calls) -#[violation] -pub struct UnnecessaryDictKwargs; - -impl Violation for UnnecessaryDictKwargs { - #[derive_message_formats] - fn message(&self) -> String { - format!("Unnecessary `dict` kwargs") - } -} - -/// ## What it does -/// Checks for lambdas that can be replaced with the `list` builtin. -/// -/// ## Why is this bad? -/// Using `list` builtin is more readable. -/// -/// ## Example -/// ```python -/// from dataclasses import dataclass, field -/// -/// -/// @dataclass -/// class Foo: -/// bar: list[int] = field(default_factory=lambda: []) -/// ``` -/// -/// Use instead: -/// ```python -/// from dataclasses import dataclass, field -/// -/// -/// @dataclass -/// class Foo: -/// bar: list[int] = field(default_factory=list) -/// ``` -/// -/// ## References -/// - [Python documentation](https://docs.python.org/3/library/functions.html#func-list) -#[violation] -pub struct ReimplementedListBuiltin; - -impl AlwaysAutofixableViolation for ReimplementedListBuiltin { - #[derive_message_formats] - fn message(&self) -> String { - format!("Prefer `list` over useless lambda") - } - - fn autofix_title(&self) -> String { - "Replace with `list`".to_string() - } -} - -/// PIE790 -pub(crate) fn no_unnecessary_pass(checker: &mut Checker, body: &[Stmt]) { - if body.len() > 1 { - // This only catches the case in which a docstring makes a `pass` statement - // redundant. Consider removing all `pass` statements instead. - let docstring_stmt = &body[0]; - let pass_stmt = &body[1]; - let Stmt::Expr(ast::StmtExpr { value, range: _ } )= docstring_stmt else { - return; - }; - if matches!( - value.as_ref(), - Expr::Constant(ast::ExprConstant { - value: Constant::Str(..), - .. - }) - ) { - if pass_stmt.is_pass_stmt() { - let mut diagnostic = Diagnostic::new(UnnecessaryPass, pass_stmt.range()); - if checker.patch(diagnostic.kind.rule()) { - if let Some(index) = trailing_comment_start_offset(pass_stmt, checker.locator) { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_deletion( - pass_stmt.range().add_end(index), - ))); - } else { - #[allow(deprecated)] - diagnostic.try_set_fix_from_edit(|| { - delete_stmt( - pass_stmt, - None, - &[], - checker.locator, - checker.indexer, - checker.stylist, - ) - }); - } - } - checker.diagnostics.push(diagnostic); - } - } - } -} - -/// PIE794 -pub(crate) fn duplicate_class_field_definition<'a, 'b>( - checker: &mut Checker<'a>, - parent: &'b Stmt, - body: &'b [Stmt], -) where - 'b: 'a, -{ - let mut seen_targets: FxHashSet<&str> = FxHashSet::default(); - for stmt in body { - // Extract the property name from the assignment statement. - let target = match stmt { - Stmt::Assign(ast::StmtAssign { targets, .. }) => { - if targets.len() != 1 { - continue; - } - if let Expr::Name(ast::ExprName { id, .. }) = &targets[0] { - id - } else { - continue; - } - } - Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => { - if let Expr::Name(ast::ExprName { id, .. }) = target.as_ref() { - id - } else { - continue; - } - } - _ => continue, - }; - - if !seen_targets.insert(target) { - let mut diagnostic = Diagnostic::new( - DuplicateClassFieldDefinition(target.to_string()), - stmt.range(), - ); - if checker.patch(diagnostic.kind.rule()) { - let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); - let locator = checker.locator; - match delete_stmt( - stmt, - Some(parent), - &deleted, - locator, - checker.indexer, - checker.stylist, - ) { - Ok(fix) => { - checker.deletions.insert(RefEquality(stmt)); - #[allow(deprecated)] - diagnostic.set_fix_from_edit(fix); - } - Err(err) => { - error!("Failed to remove duplicate class definition: {}", err); - } - } - } - checker.diagnostics.push(diagnostic); - } - } -} - -/// PIE796 -pub(crate) fn non_unique_enums<'a, 'b>( - checker: &mut Checker<'a>, - parent: &'b Stmt, - body: &'b [Stmt], -) where - 'b: 'a, -{ - let Stmt::ClassDef(ast::StmtClassDef { bases, .. }) = parent else { - return; - }; - - if !bases.iter().any(|expr| { - checker - .ctx - .resolve_call_path(expr) - .map_or(false, |call_path| call_path.as_slice() == ["enum", "Enum"]) - }) { - return; - } - - let mut seen_targets: FxHashSet = FxHashSet::default(); - for stmt in body { - let Stmt::Assign(ast::StmtAssign { value, .. }) = stmt else { - continue; - }; - - if let Expr::Call(ast::ExprCall { func, .. }) = value.as_ref() { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| call_path.as_slice() == ["enum", "auto"]) - { - continue; - } - } - - if !seen_targets.insert(ComparableExpr::from(value)) { - let diagnostic = Diagnostic::new( - NonUniqueEnums { - value: checker.generator().expr(value), - }, - stmt.range(), - ); - checker.diagnostics.push(diagnostic); - } - } -} - -/// PIE800 -pub(crate) fn unnecessary_spread(checker: &mut Checker, keys: &[Option], values: &[Expr]) { - for item in keys.iter().zip(values.iter()) { - if let (None, value) = item { - // We only care about when the key is None which indicates a spread `**` - // inside a dict. - if let Expr::Dict(_) = value { - let diagnostic = Diagnostic::new(UnnecessarySpread, value.range()); - checker.diagnostics.push(diagnostic); - } - } - } -} - -/// Return `true` if a key is a valid keyword argument name. -fn is_valid_kwarg_name(key: &Expr) -> bool { - if let Expr::Constant(ast::ExprConstant { - value: Constant::Str(value), - .. - }) = key - { - is_identifier(value) - } else { - false - } -} - -/// PIE804 -pub(crate) fn unnecessary_dict_kwargs(checker: &mut Checker, expr: &Expr, kwargs: &[Keyword]) { - for kw in kwargs { - // keyword is a spread operator (indicated by None) - if kw.arg.is_none() { - if let Expr::Dict(ast::ExprDict { keys, .. }) = &kw.value { - // ensure foo(**{"bar-bar": 1}) doesn't error - if keys.iter().all(|expr| expr.as_ref().map_or(false, is_valid_kwarg_name)) || - // handle case of foo(**{**bar}) - (keys.len() == 1 && keys[0].is_none()) - { - let diagnostic = Diagnostic::new(UnnecessaryDictKwargs, expr.range()); - checker.diagnostics.push(diagnostic); - } - } - } - } -} - -/// PIE810 -pub(crate) fn multiple_starts_ends_with(checker: &mut Checker, expr: &Expr) { - let Expr::BoolOp(ast::ExprBoolOp { op: Boolop::Or, values, range: _ }) = expr else { - return; - }; - - let mut duplicates = BTreeMap::new(); - for (index, call) in values.iter().enumerate() { - let Expr::Call(ast::ExprCall { - func, - args, - keywords, - range: _ - }) = &call else { - continue - }; - - if !(args.len() == 1 && keywords.is_empty()) { - continue; - } - - let Expr::Attribute(ast::ExprAttribute { value, attr, .. } )= func.as_ref() else { - continue - }; - if attr != "startswith" && attr != "endswith" { - continue; - } - - let Expr::Name(ast::ExprName { id: arg_name, .. } )= value.as_ref() else { - continue - }; - - duplicates - .entry((attr.as_str(), arg_name.as_str())) - .or_insert_with(Vec::new) - .push(index); - } - - // Generate a `Diagnostic` for each duplicate. - for ((attr_name, arg_name), indices) in duplicates { - if indices.len() > 1 { - let mut diagnostic = Diagnostic::new( - MultipleStartsEndsWith { - attr: attr_name.to_string(), - }, - expr.range(), - ); - if checker.patch(diagnostic.kind.rule()) { - let words: Vec<&Expr> = indices - .iter() - .map(|index| &values[*index]) - .map(|expr| { - let Expr::Call(ast::ExprCall { func: _, args, keywords: _, range: _}) = expr else { - unreachable!("{}", format!("Indices should only contain `{attr_name}` calls")) - }; - args.get(0) - .unwrap_or_else(|| panic!("`{attr_name}` should have one argument")) - }) - .collect(); - - let node = Expr::Tuple(ast::ExprTuple { - elts: words - .iter() - .flat_map(|value| { - if let Expr::Tuple(ast::ExprTuple { elts, .. }) = value { - Left(elts.iter()) - } else { - Right(iter::once(*value)) - } - }) - .map(Clone::clone) - .collect(), - ctx: ExprContext::Load, - range: TextRange::default(), - }); - let node1 = Expr::Name(ast::ExprName { - id: arg_name.into(), - ctx: ExprContext::Load, - range: TextRange::default(), - }); - let node2 = Expr::Attribute(ast::ExprAttribute { - value: Box::new(node1), - attr: attr_name.into(), - ctx: ExprContext::Load, - range: TextRange::default(), - }); - let node3 = Expr::Call(ast::ExprCall { - func: Box::new(node2), - args: vec![node], - keywords: vec![], - range: TextRange::default(), - }); - let call = node3; - - // Generate the combined `BoolOp`. - let mut call = Some(call); - let node = Expr::BoolOp(ast::ExprBoolOp { - op: Boolop::Or, - values: values - .iter() - .enumerate() - .filter_map(|(index, elt)| { - if indices.contains(&index) { - std::mem::take(&mut call) - } else { - Some(elt.clone()) - } - }) - .collect(), - range: TextRange::default(), - }); - let bool_op = node; - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( - checker.generator().expr(&bool_op), - expr.range(), - ))); - } - checker.diagnostics.push(diagnostic); - } - } -} - -/// PIE807 -pub(crate) fn reimplemented_list_builtin(checker: &mut Checker, expr: &ExprLambda) { - let ExprLambda { - args, - body, - range: _, - } = expr; - - if args.args.is_empty() - && args.kwonlyargs.is_empty() - && args.posonlyargs.is_empty() - && args.vararg.is_none() - && args.kwarg.is_none() - { - if let Expr::List(ast::ExprList { elts, .. }) = body.as_ref() { - if elts.is_empty() { - let mut diagnostic = Diagnostic::new(ReimplementedListBuiltin, expr.range()); - if checker.patch(diagnostic.kind.rule()) { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( - "list".to_string(), - expr.range(), - ))); - } - checker.diagnostics.push(diagnostic); - } - } - } -} diff --git a/crates/ruff/src/rules/flake8_pie/rules/duplicate_class_field_definition.rs b/crates/ruff/src/rules/flake8_pie/rules/duplicate_class_field_definition.rs new file mode 100644 index 0000000000000..f75d9928d2b07 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/duplicate_class_field_definition.rs @@ -0,0 +1,114 @@ +use log::error; + +use rustc_hash::FxHashSet; +use rustpython_parser::ast::{self, Expr, Ranged, Stmt}; + +use ruff_diagnostics::AlwaysAutofixableViolation; +use ruff_diagnostics::Diagnostic; +use ruff_macros::{derive_message_formats, violation}; + +use ruff_python_ast::types::RefEquality; + +use crate::autofix::actions::delete_stmt; +use crate::checkers::ast::Checker; +use crate::registry::AsRule; + +/// ## What it does +/// Checks for duplicate field definitions in classes. +/// +/// ## Why is this bad? +/// Defining a field multiple times in a class body is redundant and likely a +/// mistake. +/// +/// ## Example +/// ```python +/// class Person: +/// name = Tom +/// ... +/// name = Ben +/// ``` +/// +/// Use instead: +/// ```python +/// class Person: +/// name = Tom +/// ... +/// ``` +#[violation] +pub struct DuplicateClassFieldDefinition(pub String); + +impl AlwaysAutofixableViolation for DuplicateClassFieldDefinition { + #[derive_message_formats] + fn message(&self) -> String { + let DuplicateClassFieldDefinition(name) = self; + format!("Class field `{name}` is defined multiple times") + } + + fn autofix_title(&self) -> String { + let DuplicateClassFieldDefinition(name) = self; + format!("Remove duplicate field definition for `{name}`") + } +} + +/// PIE794 +pub(crate) fn duplicate_class_field_definition<'a, 'b>( + checker: &mut Checker<'a>, + parent: &'b Stmt, + body: &'b [Stmt], +) where + 'b: 'a, +{ + let mut seen_targets: FxHashSet<&str> = FxHashSet::default(); + for stmt in body { + // Extract the property name from the assignment statement. + let target = match stmt { + Stmt::Assign(ast::StmtAssign { targets, .. }) => { + if targets.len() != 1 { + continue; + } + if let Expr::Name(ast::ExprName { id, .. }) = &targets[0] { + id + } else { + continue; + } + } + Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => { + if let Expr::Name(ast::ExprName { id, .. }) = target.as_ref() { + id + } else { + continue; + } + } + _ => continue, + }; + + if !seen_targets.insert(target) { + let mut diagnostic = Diagnostic::new( + DuplicateClassFieldDefinition(target.to_string()), + stmt.range(), + ); + if checker.patch(diagnostic.kind.rule()) { + let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); + let locator = checker.locator; + match delete_stmt( + stmt, + Some(parent), + &deleted, + locator, + checker.indexer, + checker.stylist, + ) { + Ok(fix) => { + checker.deletions.insert(RefEquality(stmt)); + #[allow(deprecated)] + diagnostic.set_fix_from_edit(fix); + } + Err(err) => { + error!("Failed to remove duplicate class definition: {}", err); + } + } + } + checker.diagnostics.push(diagnostic); + } + } +} diff --git a/crates/ruff/src/rules/flake8_pie/rules/mod.rs b/crates/ruff/src/rules/flake8_pie/rules/mod.rs new file mode 100644 index 0000000000000..75a94605eedbd --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/mod.rs @@ -0,0 +1,17 @@ +pub(crate) use duplicate_class_field_definition::{ + duplicate_class_field_definition, DuplicateClassFieldDefinition, +}; +pub(crate) use multiple_starts_ends_with::{multiple_starts_ends_with, MultipleStartsEndsWith}; +pub(crate) use no_unnecessary_pass::{no_unnecessary_pass, UnnecessaryPass}; +pub(crate) use non_unique_enums::{non_unique_enums, NonUniqueEnums}; +pub(crate) use reimplemented_list_builtin::{reimplemented_list_builtin, ReimplementedListBuiltin}; +pub(crate) use unnecessary_dict_kwargs::{unnecessary_dict_kwargs, UnnecessaryDictKwargs}; +pub(crate) use unnecessary_spread::{unnecessary_spread, UnnecessarySpread}; + +mod duplicate_class_field_definition; +mod multiple_starts_ends_with; +mod no_unnecessary_pass; +mod non_unique_enums; +mod reimplemented_list_builtin; +mod unnecessary_dict_kwargs; +mod unnecessary_spread; diff --git a/crates/ruff/src/rules/flake8_pie/rules/multiple_starts_ends_with.rs b/crates/ruff/src/rules/flake8_pie/rules/multiple_starts_ends_with.rs new file mode 100644 index 0000000000000..f00986155f083 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/multiple_starts_ends_with.rs @@ -0,0 +1,181 @@ +use std::collections::BTreeMap; +use std::iter; + +use itertools::Either::{Left, Right}; + +use ruff_text_size::TextRange; + +use rustpython_parser::ast::{self, Boolop, Expr, ExprContext, Ranged}; + +use ruff_diagnostics::AlwaysAutofixableViolation; +use ruff_diagnostics::{Diagnostic, Edit, Fix}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; +use crate::registry::AsRule; + +/// ## What it does +/// Checks for `startswith` or `endswith` calls on the same value with +/// different prefixes or suffixes. +/// +/// ## Why is this bad? +/// The `startswith` and `endswith` methods accept tuples of prefixes or +/// suffixes respectively. Passing a tuple of prefixes or suffixes is more +/// more efficient and readable than calling the method multiple times. +/// +/// ## Example +/// ```python +/// msg = "Hello, world!" +/// if msg.startswith("Hello") or msg.startswith("Hi"): +/// print("Greetings!") +/// ``` +/// +/// Use instead: +/// ```python +/// msg = "Hello, world!" +/// if msg.startswith(("Hello", "Hi")): +/// print("Greetings!") +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/stdtypes.html#str.startswith) +/// - [Python documentation](https://docs.python.org/3/library/stdtypes.html#str.endswith) +#[violation] +pub struct MultipleStartsEndsWith { + attr: String, +} + +impl AlwaysAutofixableViolation for MultipleStartsEndsWith { + #[derive_message_formats] + fn message(&self) -> String { + let MultipleStartsEndsWith { attr } = self; + format!("Call `{attr}` once with a `tuple`") + } + + fn autofix_title(&self) -> String { + let MultipleStartsEndsWith { attr } = self; + format!("Merge into a single `{attr}` call") + } +} + +/// PIE810 +pub(crate) fn multiple_starts_ends_with(checker: &mut Checker, expr: &Expr) { + let Expr::BoolOp(ast::ExprBoolOp { op: Boolop::Or, values, range: _ }) = expr else { + return; + }; + + let mut duplicates = BTreeMap::new(); + for (index, call) in values.iter().enumerate() { + let Expr::Call(ast::ExprCall { + func, + args, + keywords, + range: _ + }) = &call else { + continue + }; + + if !(args.len() == 1 && keywords.is_empty()) { + continue; + } + + let Expr::Attribute(ast::ExprAttribute { value, attr, .. } )= func.as_ref() else { + continue + }; + if attr != "startswith" && attr != "endswith" { + continue; + } + + let Expr::Name(ast::ExprName { id: arg_name, .. } )= value.as_ref() else { + continue + }; + + duplicates + .entry((attr.as_str(), arg_name.as_str())) + .or_insert_with(Vec::new) + .push(index); + } + + // Generate a `Diagnostic` for each duplicate. + for ((attr_name, arg_name), indices) in duplicates { + if indices.len() > 1 { + let mut diagnostic = Diagnostic::new( + MultipleStartsEndsWith { + attr: attr_name.to_string(), + }, + expr.range(), + ); + if checker.patch(diagnostic.kind.rule()) { + let words: Vec<&Expr> = indices + .iter() + .map(|index| &values[*index]) + .map(|expr| { + let Expr::Call(ast::ExprCall { func: _, args, keywords: _, range: _}) = expr else { + unreachable!("{}", format!("Indices should only contain `{attr_name}` calls")) + }; + args.get(0) + .unwrap_or_else(|| panic!("`{attr_name}` should have one argument")) + }) + .collect(); + + let node = Expr::Tuple(ast::ExprTuple { + elts: words + .iter() + .flat_map(|value| { + if let Expr::Tuple(ast::ExprTuple { elts, .. }) = value { + Left(elts.iter()) + } else { + Right(iter::once(*value)) + } + }) + .map(Clone::clone) + .collect(), + ctx: ExprContext::Load, + range: TextRange::default(), + }); + let node1 = Expr::Name(ast::ExprName { + id: arg_name.into(), + ctx: ExprContext::Load, + range: TextRange::default(), + }); + let node2 = Expr::Attribute(ast::ExprAttribute { + value: Box::new(node1), + attr: attr_name.into(), + ctx: ExprContext::Load, + range: TextRange::default(), + }); + let node3 = Expr::Call(ast::ExprCall { + func: Box::new(node2), + args: vec![node], + keywords: vec![], + range: TextRange::default(), + }); + let call = node3; + + // Generate the combined `BoolOp`. + let mut call = Some(call); + let node = Expr::BoolOp(ast::ExprBoolOp { + op: Boolop::Or, + values: values + .iter() + .enumerate() + .filter_map(|(index, elt)| { + if indices.contains(&index) { + std::mem::take(&mut call) + } else { + Some(elt.clone()) + } + }) + .collect(), + range: TextRange::default(), + }); + let bool_op = node; + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( + checker.generator().expr(&bool_op), + expr.range(), + ))); + } + checker.diagnostics.push(diagnostic); + } + } +} diff --git a/crates/ruff/src/rules/flake8_pie/rules/no_unnecessary_pass.rs b/crates/ruff/src/rules/flake8_pie/rules/no_unnecessary_pass.rs new file mode 100644 index 0000000000000..7b08ed371f1dc --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/no_unnecessary_pass.rs @@ -0,0 +1,93 @@ +use rustpython_parser::ast::{self, Constant, Expr, Ranged, Stmt}; + +use ruff_diagnostics::AlwaysAutofixableViolation; +use ruff_diagnostics::{Diagnostic, Edit, Fix}; +use ruff_macros::{derive_message_formats, violation}; + +use ruff_python_ast::helpers::trailing_comment_start_offset; + +use crate::autofix::actions::delete_stmt; +use crate::checkers::ast::Checker; +use crate::registry::AsRule; + +/// ## What it does +/// Checks for unnecessary `pass` statements in class and function bodies. +/// where it is not needed syntactically (e.g., when an indented docstring is +/// present). +/// +/// ## Why is this bad? +/// When a function or class definition contains a docstring, an additional +/// `pass` statement is redundant. +/// +/// ## Example +/// ```python +/// def foo(): +/// """Placeholder docstring.""" +/// pass +/// ``` +/// +/// Use instead: +/// ```python +/// def foo(): +/// """Placeholder docstring.""" +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/simple_stmts.html#the-pass-statement) +#[violation] +pub struct UnnecessaryPass; + +impl AlwaysAutofixableViolation for UnnecessaryPass { + #[derive_message_formats] + fn message(&self) -> String { + format!("Unnecessary `pass` statement") + } + + fn autofix_title(&self) -> String { + "Remove unnecessary `pass`".to_string() + } +} + +/// PIE790 +pub(crate) fn no_unnecessary_pass(checker: &mut Checker, body: &[Stmt]) { + if body.len() > 1 { + // This only catches the case in which a docstring makes a `pass` statement + // redundant. Consider removing all `pass` statements instead. + let docstring_stmt = &body[0]; + let pass_stmt = &body[1]; + let Stmt::Expr(ast::StmtExpr { value, range: _ } )= docstring_stmt else { + return; + }; + if matches!( + value.as_ref(), + Expr::Constant(ast::ExprConstant { + value: Constant::Str(..), + .. + }) + ) { + if pass_stmt.is_pass_stmt() { + let mut diagnostic = Diagnostic::new(UnnecessaryPass, pass_stmt.range()); + if checker.patch(diagnostic.kind.rule()) { + if let Some(index) = trailing_comment_start_offset(pass_stmt, checker.locator) { + diagnostic.set_fix(Fix::automatic(Edit::range_deletion( + pass_stmt.range().add_end(index), + ))); + } else { + #[allow(deprecated)] + diagnostic.try_set_fix_from_edit(|| { + delete_stmt( + pass_stmt, + None, + &[], + checker.locator, + checker.indexer, + checker.stylist, + ) + }); + } + } + checker.diagnostics.push(diagnostic); + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_pie/rules/non_unique_enums.rs b/crates/ruff/src/rules/flake8_pie/rules/non_unique_enums.rs new file mode 100644 index 0000000000000..47e1a427a55a5 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/non_unique_enums.rs @@ -0,0 +1,102 @@ +use rustc_hash::FxHashSet; +use rustpython_parser::ast::{self, Expr, Ranged, Stmt}; + +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::comparable::ComparableExpr; + +use crate::checkers::ast::Checker; + +/// ## What it does +/// Checks for enums that contain duplicate values. +/// +/// ## Why is this bad? +/// Enum values should be unique. Non-unique values are redundant and likely a +/// mistake. +/// +/// ## Example +/// ```python +/// from enum import Enum +/// +/// +/// class Foo(Enum): +/// A = 1 +/// B = 2 +/// C = 1 +/// ``` +/// +/// Use instead: +/// ```python +/// from enum import Enum +/// +/// +/// class Foo(Enum): +/// A = 1 +/// B = 2 +/// C = 3 +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/enum.html#enum.Enum) +#[violation] +pub struct NonUniqueEnums { + value: String, +} + +impl Violation for NonUniqueEnums { + #[derive_message_formats] + fn message(&self) -> String { + let NonUniqueEnums { value } = self; + format!("Enum contains duplicate value: `{value}`") + } +} + +/// PIE796 +pub(crate) fn non_unique_enums<'a, 'b>( + checker: &mut Checker<'a>, + parent: &'b Stmt, + body: &'b [Stmt], +) where + 'b: 'a, +{ + let Stmt::ClassDef(ast::StmtClassDef { bases, .. }) = parent else { + return; + }; + + if !bases.iter().any(|expr| { + checker + .semantic_model() + .resolve_call_path(expr) + .map_or(false, |call_path| call_path.as_slice() == ["enum", "Enum"]) + }) { + return; + } + + let mut seen_targets: FxHashSet = FxHashSet::default(); + for stmt in body { + let Stmt::Assign(ast::StmtAssign { value, .. }) = stmt else { + continue; + }; + + if let Expr::Call(ast::ExprCall { func, .. }) = value.as_ref() { + if checker + .semantic_model() + .resolve_call_path(func) + .map_or(false, |call_path| call_path.as_slice() == ["enum", "auto"]) + { + continue; + } + } + + if !seen_targets.insert(ComparableExpr::from(value)) { + let diagnostic = Diagnostic::new( + NonUniqueEnums { + value: checker.generator().expr(value), + }, + stmt.range(), + ); + checker.diagnostics.push(diagnostic); + } + } +} diff --git a/crates/ruff/src/rules/flake8_pie/rules/reimplemented_list_builtin.rs b/crates/ruff/src/rules/flake8_pie/rules/reimplemented_list_builtin.rs new file mode 100644 index 0000000000000..c7e562a36a548 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/reimplemented_list_builtin.rs @@ -0,0 +1,83 @@ +use rustpython_parser::ast::{self, Expr, ExprLambda, Ranged}; + +use ruff_diagnostics::{AutofixKind, Violation}; +use ruff_diagnostics::{Diagnostic, Edit, Fix}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; +use crate::registry::AsRule; + +/// ## What it does +/// Checks for lambdas that can be replaced with the `list` builtin. +/// +/// ## Why is this bad? +/// Using `list` builtin is more readable. +/// +/// ## Example +/// ```python +/// from dataclasses import dataclass, field +/// +/// +/// @dataclass +/// class Foo: +/// bar: list[int] = field(default_factory=lambda: []) +/// ``` +/// +/// Use instead: +/// ```python +/// from dataclasses import dataclass, field +/// +/// +/// @dataclass +/// class Foo: +/// bar: list[int] = field(default_factory=list) +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/functions.html#func-list) +#[violation] +pub struct ReimplementedListBuiltin; + +impl Violation for ReimplementedListBuiltin { + const AUTOFIX: AutofixKind = AutofixKind::Sometimes; + + #[derive_message_formats] + fn message(&self) -> String { + format!("Prefer `list` over useless lambda") + } + + fn autofix_title(&self) -> Option { + Some("Replace with `list`".to_string()) + } +} + +/// PIE807 +pub(crate) fn reimplemented_list_builtin(checker: &mut Checker, expr: &ExprLambda) { + let ExprLambda { + args, + body, + range: _, + } = expr; + + if args.args.is_empty() + && args.kwonlyargs.is_empty() + && args.posonlyargs.is_empty() + && args.vararg.is_none() + && args.kwarg.is_none() + { + if let Expr::List(ast::ExprList { elts, .. }) = body.as_ref() { + if elts.is_empty() { + let mut diagnostic = Diagnostic::new(ReimplementedListBuiltin, expr.range()); + if checker.patch(diagnostic.kind.rule()) { + if checker.semantic_model().is_builtin("list") { + diagnostic.set_fix(Fix::automatic(Edit::range_replacement( + "list".to_string(), + expr.range(), + ))); + } + } + checker.diagnostics.push(diagnostic); + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_pie/rules/unnecessary_dict_kwargs.rs b/crates/ruff/src/rules/flake8_pie/rules/unnecessary_dict_kwargs.rs new file mode 100644 index 0000000000000..b621d1ff5fbc3 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/unnecessary_dict_kwargs.rs @@ -0,0 +1,79 @@ +use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged}; + +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; + +use ruff_python_stdlib::identifiers::is_identifier; + +use crate::checkers::ast::Checker; + +/// ## What it does +/// Checks for unnecessary `dict` kwargs. +/// +/// ## Why is this bad? +/// If the `dict` keys are valid identifiers, they can be passed as keyword +/// arguments directly. +/// +/// ## Example +/// ```python +/// def foo(bar): +/// return bar + 1 +/// +/// +/// print(foo(**{"bar": 2})) # prints 3 +/// ``` +/// +/// Use instead: +/// ```python +/// def foo(bar): +/// return bar + 1 +/// +/// +/// print(foo(bar=2)) # prints 3 +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#dictionary-displays) +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#calls) +#[violation] +pub struct UnnecessaryDictKwargs; + +impl Violation for UnnecessaryDictKwargs { + #[derive_message_formats] + fn message(&self) -> String { + format!("Unnecessary `dict` kwargs") + } +} + +/// PIE804 +pub(crate) fn unnecessary_dict_kwargs(checker: &mut Checker, expr: &Expr, kwargs: &[Keyword]) { + for kw in kwargs { + // keyword is a spread operator (indicated by None) + if kw.arg.is_none() { + if let Expr::Dict(ast::ExprDict { keys, .. }) = &kw.value { + // ensure foo(**{"bar-bar": 1}) doesn't error + if keys.iter().all(|expr| expr.as_ref().map_or(false, is_valid_kwarg_name)) || + // handle case of foo(**{**bar}) + (keys.len() == 1 && keys[0].is_none()) + { + let diagnostic = Diagnostic::new(UnnecessaryDictKwargs, expr.range()); + checker.diagnostics.push(diagnostic); + } + } + } + } +} + +/// Return `true` if a key is a valid keyword argument name. +fn is_valid_kwarg_name(key: &Expr) -> bool { + if let Expr::Constant(ast::ExprConstant { + value: Constant::Str(value), + .. + }) = key + { + is_identifier(value) + } else { + false + } +} diff --git a/crates/ruff/src/rules/flake8_pie/rules/unnecessary_spread.rs b/crates/ruff/src/rules/flake8_pie/rules/unnecessary_spread.rs new file mode 100644 index 0000000000000..855d4ea26d3c3 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pie/rules/unnecessary_spread.rs @@ -0,0 +1,52 @@ +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::Diagnostic; +use ruff_diagnostics::Violation; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +/// ## What it does +/// Checks for unnecessary dictionary unpacking operators (`**`). +/// +/// ## Why is this bad? +/// Unpacking a dictionary into another dictionary is redundant. The unpacking +/// operator can be removed, making the code more readable. +/// +/// ## Example +/// ```python +/// foo = {"A": 1, "B": 2} +/// bar = {**foo, **{"C": 3}} +/// ``` +/// +/// Use instead: +/// ```python +/// foo = {"A": 1, "B": 2} +/// bar = {**foo, "C": 3} +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#dictionary-displays) +#[violation] +pub struct UnnecessarySpread; + +impl Violation for UnnecessarySpread { + #[derive_message_formats] + fn message(&self) -> String { + format!("Unnecessary spread `**`") + } +} + +/// PIE800 +pub(crate) fn unnecessary_spread(checker: &mut Checker, keys: &[Option], values: &[Expr]) { + for item in keys.iter().zip(values.iter()) { + if let (None, value) = item { + // We only care about when the key is None which indicates a spread `**` + // inside a dict. + if let Expr::Dict(_) = value { + let diagnostic = Diagnostic::new(UnnecessarySpread, value.range()); + checker.diagnostics.push(diagnostic); + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE790_PIE790.py.snap b/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE790_PIE790.py.snap index 1d8eb9f4d8d82..556ed22ec7030 100644 --- a/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE790_PIE790.py.snap +++ b/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE790_PIE790.py.snap @@ -307,7 +307,7 @@ PIE790.py:101:5: PIE790 [*] Unnecessary `pass` statement | = help: Remove unnecessary `pass` -β„Ή Suggested fix +β„Ή Fix 98 98 | 99 99 | def foo() -> None: 100 100 | """buzz""" diff --git a/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE807_PIE807.py.snap b/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE807_PIE807.py.snap index 82bc88464a418..dcd6048509daf 100644 --- a/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE807_PIE807.py.snap +++ b/crates/ruff/src/rules/flake8_pie/snapshots/ruff__rules__flake8_pie__tests__PIE807_PIE807.py.snap @@ -10,7 +10,7 @@ PIE807.py:3:44: PIE807 [*] Prefer `list` over useless lambda | = help: Replace with `list` -β„Ή Suggested fix +β„Ή Fix 1 1 | @dataclass 2 2 | class Foo: 3 |- foo: List[str] = field(default_factory=lambda: []) # PIE807 @@ -27,7 +27,7 @@ PIE807.py:7:36: PIE807 [*] Prefer `list` over useless lambda | = help: Replace with `list` -β„Ή Suggested fix +β„Ή Fix 4 4 | 5 5 | 6 6 | class FooTable(BaseTable): @@ -45,7 +45,7 @@ PIE807.py:11:28: PIE807 [*] Prefer `list` over useless lambda | = help: Replace with `list` -β„Ή Suggested fix +β„Ή Fix 8 8 | 9 9 | 10 10 | class FooTable(BaseTable): diff --git a/crates/ruff/src/rules/flake8_print/mod.rs b/crates/ruff/src/rules/flake8_print/mod.rs index bb0e532615c43..4c5087bf0a2cc 100644 --- a/crates/ruff/src/rules/flake8_print/mod.rs +++ b/crates/ruff/src/rules/flake8_print/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_print/rules/print_call.rs b/crates/ruff/src/rules/flake8_print/rules/print_call.rs index 366ce939627d3..13993a202cc46 100644 --- a/crates/ruff/src/rules/flake8_print/rules/print_call.rs +++ b/crates/ruff/src/rules/flake8_print/rules/print_call.rs @@ -78,7 +78,7 @@ impl Violation for PPrint { /// T201, T203 pub(crate) fn print_call(checker: &mut Checker, func: &Expr, keywords: &[Keyword]) { let diagnostic = { - let call_path = checker.ctx.resolve_call_path(func); + let call_path = checker.semantic_model().resolve_call_path(func); if call_path .as_ref() .map_or(false, |call_path| *call_path.as_slice() == ["", "print"]) @@ -91,7 +91,7 @@ pub(crate) fn print_call(checker: &mut Checker, func: &Expr, keywords: &[Keyword { if !is_const_none(&keyword.value) { if checker - .ctx + .semantic_model() .resolve_call_path(&keyword.value) .map_or(true, |call_path| { call_path.as_slice() != ["sys", "stdout"] @@ -112,7 +112,7 @@ pub(crate) fn print_call(checker: &mut Checker, func: &Expr, keywords: &[Keyword } }; - if !checker.settings.rules.enabled(diagnostic.kind.rule()) { + if !checker.enabled(diagnostic.kind.rule()) { return; } diff --git a/crates/ruff/src/rules/flake8_pyi/mod.rs b/crates/ruff/src/rules/flake8_pyi/mod.rs index e923989379a51..436c37036997c 100644 --- a/crates/ruff/src/rules/flake8_pyi/mod.rs +++ b/crates/ruff/src/rules/flake8_pyi/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; @@ -23,6 +22,8 @@ mod tests { #[test_case(Rule::DocstringInStub, Path::new("PYI021.pyi"))] #[test_case(Rule::DuplicateUnionMember, Path::new("PYI016.py"))] #[test_case(Rule::DuplicateUnionMember, Path::new("PYI016.pyi"))] + #[test_case(Rule::EllipsisInNonEmptyClassBody, Path::new("PYI013.py"))] + #[test_case(Rule::EllipsisInNonEmptyClassBody, Path::new("PYI013.pyi"))] #[test_case(Rule::NonEmptyStubBody, Path::new("PYI010.py"))] #[test_case(Rule::NonEmptyStubBody, Path::new("PYI010.pyi"))] #[test_case(Rule::PassInClassBody, Path::new("PYI012.py"))] diff --git a/crates/ruff/src/rules/flake8_pyi/rules/bad_version_info_comparison.rs b/crates/ruff/src/rules/flake8_pyi/rules/bad_version_info_comparison.rs index c1afc2f3051e4..8d923d1cfad93 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/bad_version_info_comparison.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/bad_version_info_comparison.rs @@ -1,9 +1,10 @@ use rustpython_parser::ast::{Cmpop, Expr, Ranged}; -use crate::checkers::ast::Checker; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use crate::checkers::ast::Checker; + /// ## What it does /// Checks for usages of comparators other than `<` and `>=` for /// `sys.version_info` checks in `.pyi` files. All other comparators, such @@ -68,7 +69,7 @@ pub(crate) fn bad_version_info_comparison( }; if !checker - .ctx + .semantic_model() .resolve_call_path(left) .map_or(false, |call_path| { call_path.as_slice() == ["sys", "version_info"] diff --git a/crates/ruff/src/rules/flake8_pyi/rules/ellipsis_in_non_empty_class_body.rs b/crates/ruff/src/rules/flake8_pyi/rules/ellipsis_in_non_empty_class_body.rs new file mode 100644 index 0000000000000..966c0fa8b500d --- /dev/null +++ b/crates/ruff/src/rules/flake8_pyi/rules/ellipsis_in_non_empty_class_body.rs @@ -0,0 +1,93 @@ +use rustpython_parser::ast::{Expr, ExprConstant, Ranged, Stmt, StmtExpr}; + +use ruff_diagnostics::{AutofixKind, Diagnostic, Fix, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::types::RefEquality; + +use crate::autofix::actions::delete_stmt; +use crate::checkers::ast::Checker; +use crate::registry::AsRule; + +/// ## What it does +/// Removes ellipses (`...`) in otherwise non-empty class bodies. +/// +/// ## Why is this bad? +/// An ellipsis in a class body is only necessary if the class body is +/// otherwise empty. If the class body is non-empty, then the ellipsis +/// is redundant. +/// +/// ## Example +/// ```python +/// class Foo: +/// ... +/// value: int +/// ``` +/// +/// Use instead: +/// ```python +/// class Foo: +/// value: int +/// ``` +#[violation] +pub struct EllipsisInNonEmptyClassBody; + +impl Violation for EllipsisInNonEmptyClassBody { + const AUTOFIX: AutofixKind = AutofixKind::Sometimes; + + #[derive_message_formats] + fn message(&self) -> String { + format!("Non-empty class body must not contain `...`") + } + + fn autofix_title(&self) -> Option { + Some("Remove unnecessary `...`".to_string()) + } +} + +/// PYI013 +pub(crate) fn ellipsis_in_non_empty_class_body<'a>( + checker: &mut Checker<'a>, + parent: &'a Stmt, + body: &'a [Stmt], +) { + // If the class body contains a single statement, then it's fine for it to be an ellipsis. + if body.len() == 1 { + return; + } + + for stmt in body { + if let Stmt::Expr(StmtExpr { value, .. }) = &stmt { + if let Expr::Constant(ExprConstant { value, .. }) = value.as_ref() { + if value.is_ellipsis() { + let mut diagnostic = Diagnostic::new(EllipsisInNonEmptyClassBody, stmt.range()); + + if checker.patch(diagnostic.kind.rule()) { + diagnostic.try_set_fix(|| { + let deleted: Vec<&Stmt> = + checker.deletions.iter().map(Into::into).collect(); + let edit = delete_stmt( + stmt, + Some(parent), + &deleted, + checker.locator, + checker.indexer, + checker.stylist, + )?; + + // In the unlikely event the class body consists solely of several + // consecutive ellipses, `delete_stmt` can actually result in a + // `pass`. + if edit.is_deletion() || edit.content() == Some("pass") { + checker.deletions.insert(RefEquality(stmt)); + } + + Ok(Fix::automatic(edit)) + }); + } + + checker.diagnostics.push(diagnostic); + } + } + } + } +} diff --git a/crates/ruff/src/rules/flake8_pyi/rules/mod.rs b/crates/ruff/src/rules/flake8_pyi/rules/mod.rs index c26ec99be5990..14f00d1d24f22 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/mod.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/mod.rs @@ -3,6 +3,9 @@ pub(crate) use bad_version_info_comparison::{ }; pub(crate) use docstring_in_stubs::{docstring_in_stubs, DocstringInStub}; pub(crate) use duplicate_union_member::{duplicate_union_member, DuplicateUnionMember}; +pub(crate) use ellipsis_in_non_empty_class_body::{ + ellipsis_in_non_empty_class_body, EllipsisInNonEmptyClassBody, +}; pub(crate) use non_empty_stub_body::{non_empty_stub_body, NonEmptyStubBody}; pub(crate) use pass_in_class_body::{pass_in_class_body, PassInClassBody}; pub(crate) use pass_statement_stub_body::{pass_statement_stub_body, PassStatementStubBody}; @@ -24,6 +27,7 @@ pub(crate) use unrecognized_platform::{ mod bad_version_info_comparison; mod docstring_in_stubs; mod duplicate_union_member; +mod ellipsis_in_non_empty_class_body; mod non_empty_stub_body; mod pass_in_class_body; mod pass_statement_stub_body; diff --git a/crates/ruff/src/rules/flake8_pyi/rules/pass_in_class_body.rs b/crates/ruff/src/rules/flake8_pyi/rules/pass_in_class_body.rs index 4518708979bec..5e73f9e7f40c0 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/pass_in_class_body.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/pass_in_class_body.rs @@ -1,13 +1,13 @@ -use crate::autofix::actions::delete_stmt; use log::error; +use rustpython_parser::ast::{Ranged, Stmt}; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::types::RefEquality; +use crate::autofix::actions::delete_stmt; use crate::checkers::ast::Checker; - use crate::registry::AsRule; -use rustpython_parser::ast::{Ranged, Stmt}; #[violation] pub struct PassInClassBody; diff --git a/crates/ruff/src/rules/flake8_pyi/rules/pass_statement_stub_body.rs b/crates/ruff/src/rules/flake8_pyi/rules/pass_statement_stub_body.rs index 118047c9dfb7e..54f747985bb8f 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/pass_statement_stub_body.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/pass_statement_stub_body.rs @@ -1,18 +1,23 @@ use rustpython_parser::ast::{Ranged, Stmt}; -use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; use crate::checkers::ast::Checker; +use crate::registry::Rule; #[violation] pub struct PassStatementStubBody; -impl Violation for PassStatementStubBody { +impl AlwaysAutofixableViolation for PassStatementStubBody { #[derive_message_formats] fn message(&self) -> String { format!("Empty body should contain `...`, not `pass`") } + + fn autofix_title(&self) -> String { + format!("Replace `pass` with `...`") + } } /// PYI009 @@ -21,8 +26,13 @@ pub(crate) fn pass_statement_stub_body(checker: &mut Checker, body: &[Stmt]) { return; } if body[0].is_pass_stmt() { - checker - .diagnostics - .push(Diagnostic::new(PassStatementStubBody, body[0].range())); + let mut diagnostic = Diagnostic::new(PassStatementStubBody, body[0].range()); + if checker.patch(Rule::PassStatementStubBody) { + diagnostic.set_fix(Fix::automatic(Edit::range_replacement( + format!("..."), + body[0].range(), + ))); + }; + checker.diagnostics.push(diagnostic); } } diff --git a/crates/ruff/src/rules/flake8_pyi/rules/prefix_type_params.rs b/crates/ruff/src/rules/flake8_pyi/rules/prefix_type_params.rs index 358ea50626bfe..e6c561191ea0d 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/prefix_type_params.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/prefix_type_params.rs @@ -70,12 +70,12 @@ pub(crate) fn prefix_type_params(checker: &mut Checker, value: &Expr, targets: & }; if let Expr::Call(ast::ExprCall { func, .. }) = value { - let Some(kind) = checker.ctx.resolve_call_path(func).and_then(|call_path| { - if checker.ctx.match_typing_call_path(&call_path, "ParamSpec") { + let Some(kind) = checker.semantic_model().resolve_call_path(func).and_then(|call_path| { + if checker.semantic_model().match_typing_call_path(&call_path, "ParamSpec") { Some(VarKind::ParamSpec) - } else if checker.ctx.match_typing_call_path(&call_path, "TypeVar") { + } else if checker.semantic_model().match_typing_call_path(&call_path, "TypeVar") { Some(VarKind::TypeVar) - } else if checker.ctx.match_typing_call_path(&call_path, "TypeVarTuple") { + } else if checker.semantic_model().match_typing_call_path(&call_path, "TypeVarTuple") { Some(VarKind::TypeVarTuple) } else { None diff --git a/crates/ruff/src/rules/flake8_pyi/rules/quoted_annotation_in_stub.rs b/crates/ruff/src/rules/flake8_pyi/rules/quoted_annotation_in_stub.rs index 8ba8d3fbf3bfa..6041af59d2ee6 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/quoted_annotation_in_stub.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/quoted_annotation_in_stub.rs @@ -1,6 +1,7 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; use crate::checkers::ast::Checker; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_pyi/rules/simple_defaults.rs b/crates/ruff/src/rules/flake8_pyi/rules/simple_defaults.rs index d1e5313797103..14fbcb861d9b0 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/simple_defaults.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/simple_defaults.rs @@ -2,7 +2,8 @@ use rustpython_parser::ast::{self, Arguments, Constant, Expr, Operator, Ranged, use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; -use ruff_python_semantic::context::Context; +use ruff_python_ast::source_code::Locator; +use ruff_python_semantic::model::SemanticModel; use ruff_python_semantic::scope::{ClassDef, ScopeKind}; use crate::checkers::ast::Checker; @@ -90,8 +91,9 @@ const ALLOWED_ATTRIBUTES_IN_DEFAULTS: &[&[&str]] = &[ fn is_valid_default_value_with_annotation( default: &Expr, - checker: &Checker, allow_container: bool, + locator: &Locator, + model: &SemanticModel, ) -> bool { match default { Expr::List(ast::ExprList { elts, .. }) @@ -101,7 +103,7 @@ fn is_valid_default_value_with_annotation( && elts.len() <= 10 && elts .iter() - .all(|e| is_valid_default_value_with_annotation(e, checker, false)); + .all(|e| is_valid_default_value_with_annotation(e, false, locator, model)); } Expr::Dict(ast::ExprDict { keys, @@ -112,8 +114,8 @@ fn is_valid_default_value_with_annotation( && keys.len() <= 10 && keys.iter().zip(values).all(|(k, v)| { k.as_ref().map_or(false, |k| { - is_valid_default_value_with_annotation(k, checker, false) - }) && is_valid_default_value_with_annotation(v, checker, false) + is_valid_default_value_with_annotation(k, false, locator, model) + }) && is_valid_default_value_with_annotation(v, false, locator, model) }); } Expr::Constant(ast::ExprConstant { @@ -125,17 +127,17 @@ fn is_valid_default_value_with_annotation( Expr::Constant(ast::ExprConstant { value: Constant::Str(..), .. - }) => return checker.locator.slice(default.range()).len() <= 50, + }) => return locator.slice(default.range()).len() <= 50, Expr::Constant(ast::ExprConstant { value: Constant::Bytes(..), .. - }) => return checker.locator.slice(default.range()).len() <= 50, + }) => return locator.slice(default.range()).len() <= 50, // Ex) `123`, `True`, `False`, `3.14` Expr::Constant(ast::ExprConstant { value: Constant::Int(..) | Constant::Bool(..) | Constant::Float(..), .. }) => { - return checker.locator.slice(default.range()).len() <= 10; + return locator.slice(default.range()).len() <= 10; } // Ex) `2j` Expr::Constant(ast::ExprConstant { @@ -143,7 +145,7 @@ fn is_valid_default_value_with_annotation( .. }) => { if *real == 0.0 { - return checker.locator.slice(default.range()).len() <= 10; + return locator.slice(default.range()).len() <= 10; } } Expr::UnaryOp(ast::ExprUnaryOp { @@ -157,7 +159,7 @@ fn is_valid_default_value_with_annotation( .. }) = operand.as_ref() { - return checker.locator.slice(operand.range()).len() <= 10; + return locator.slice(operand.range()).len() <= 10; } // Ex) `-2j` if let Expr::Constant(ast::ExprConstant { @@ -166,21 +168,17 @@ fn is_valid_default_value_with_annotation( }) = operand.as_ref() { if *real == 0.0 { - return checker.locator.slice(operand.range()).len() <= 10; + return locator.slice(operand.range()).len() <= 10; } } // Ex) `-math.inf`, `-math.pi`, etc. if let Expr::Attribute(_) = operand.as_ref() { - if checker - .ctx - .resolve_call_path(operand) - .map_or(false, |call_path| { - ALLOWED_MATH_ATTRIBUTES_IN_DEFAULTS.iter().any(|target| { - // reject `-math.nan` - call_path.as_slice() == *target && *target != ["math", "nan"] - }) + if model.resolve_call_path(operand).map_or(false, |call_path| { + ALLOWED_MATH_ATTRIBUTES_IN_DEFAULTS.iter().any(|target| { + // reject `-math.nan` + call_path.as_slice() == *target && *target != ["math", "nan"] }) - { + }) { return true; } } @@ -203,7 +201,7 @@ fn is_valid_default_value_with_annotation( .. }) = left.as_ref() { - return checker.locator.slice(left.range()).len() <= 10; + return locator.slice(left.range()).len() <= 10; } else if let Expr::UnaryOp(ast::ExprUnaryOp { op: Unaryop::USub, operand, @@ -216,23 +214,19 @@ fn is_valid_default_value_with_annotation( .. }) = operand.as_ref() { - return checker.locator.slice(operand.range()).len() <= 10; + return locator.slice(operand.range()).len() <= 10; } } } } // Ex) `math.inf`, `sys.stdin`, etc. Expr::Attribute(_) => { - if checker - .ctx - .resolve_call_path(default) - .map_or(false, |call_path| { - ALLOWED_MATH_ATTRIBUTES_IN_DEFAULTS - .iter() - .chain(ALLOWED_ATTRIBUTES_IN_DEFAULTS.iter()) - .any(|target| call_path.as_slice() == *target) - }) - { + if model.resolve_call_path(default).map_or(false, |call_path| { + ALLOWED_MATH_ATTRIBUTES_IN_DEFAULTS + .iter() + .chain(ALLOWED_ATTRIBUTES_IN_DEFAULTS.iter()) + .any(|target| call_path.as_slice() == *target) + }) { return true; } } @@ -278,11 +272,11 @@ fn is_valid_default_value_without_annotation(default: &Expr) -> bool { /// Returns `true` if an [`Expr`] appears to be `TypeVar`, `TypeVarTuple`, `NewType`, or `ParamSpec` /// call. -fn is_type_var_like_call(context: &Context, expr: &Expr) -> bool { +fn is_type_var_like_call(model: &SemanticModel, expr: &Expr) -> bool { let Expr::Call(ast::ExprCall { func, .. } )= expr else { return false; }; - context.resolve_call_path(func).map_or(false, |call_path| { + model.resolve_call_path(func).map_or(false, |call_path| { matches!( call_path.as_slice(), [ @@ -295,11 +289,11 @@ fn is_type_var_like_call(context: &Context, expr: &Expr) -> bool { /// Returns `true` if this is a "special" assignment which must have a value (e.g., an assignment to /// `__all__`). -fn is_special_assignment(context: &Context, target: &Expr) -> bool { +fn is_special_assignment(model: &SemanticModel, target: &Expr) -> bool { if let Expr::Name(ast::ExprName { id, .. }) = target { match id.as_str() { - "__all__" => context.scope().kind.is_module(), - "__match_args__" | "__slots__" => context.scope().kind.is_class(), + "__all__" => model.scope().kind.is_module(), + "__match_args__" | "__slots__" => model.scope().kind.is_class(), _ => false, } } else { @@ -308,9 +302,9 @@ fn is_special_assignment(context: &Context, target: &Expr) -> bool { } /// Returns `true` if the a class is an enum, based on its base classes. -fn is_enum(context: &Context, bases: &[Expr]) -> bool { +fn is_enum(model: &SemanticModel, bases: &[Expr]) -> bool { return bases.iter().any(|expr| { - context.resolve_call_path(expr).map_or(false, |call_path| { + model.resolve_call_path(expr).map_or(false, |call_path| { matches!( call_path.as_slice(), [ @@ -332,7 +326,12 @@ pub(crate) fn typed_argument_simple_defaults(checker: &mut Checker, args: &Argum .and_then(|i| args.defaults.get(i)) { if arg.annotation.is_some() { - if !is_valid_default_value_with_annotation(default, checker, true) { + if !is_valid_default_value_with_annotation( + default, + true, + checker.locator, + checker.semantic_model(), + ) { let mut diagnostic = Diagnostic::new(TypedArgumentDefaultInStub, default.range()); @@ -359,7 +358,12 @@ pub(crate) fn typed_argument_simple_defaults(checker: &mut Checker, args: &Argum .and_then(|i| args.kw_defaults.get(i)) { if kwarg.annotation.is_some() { - if !is_valid_default_value_with_annotation(default, checker, true) { + if !is_valid_default_value_with_annotation( + default, + true, + checker.locator, + checker.semantic_model(), + ) { let mut diagnostic = Diagnostic::new(TypedArgumentDefaultInStub, default.range()); @@ -389,7 +393,12 @@ pub(crate) fn argument_simple_defaults(checker: &mut Checker, args: &Arguments) .and_then(|i| args.defaults.get(i)) { if arg.annotation.is_none() { - if !is_valid_default_value_with_annotation(default, checker, true) { + if !is_valid_default_value_with_annotation( + default, + true, + checker.locator, + checker.semantic_model(), + ) { let mut diagnostic = Diagnostic::new(ArgumentDefaultInStub, default.range()); @@ -416,7 +425,12 @@ pub(crate) fn argument_simple_defaults(checker: &mut Checker, args: &Arguments) .and_then(|i| args.kw_defaults.get(i)) { if kwarg.annotation.is_none() { - if !is_valid_default_value_with_annotation(default, checker, true) { + if !is_valid_default_value_with_annotation( + default, + true, + checker.locator, + checker.semantic_model(), + ) { let mut diagnostic = Diagnostic::new(ArgumentDefaultInStub, default.range()); @@ -445,16 +459,21 @@ pub(crate) fn assignment_default_in_stub(checker: &mut Checker, targets: &[Expr] if !target.is_name_expr() { return; } - if is_special_assignment(&checker.ctx, target) { + if is_special_assignment(checker.semantic_model(), target) { return; } - if is_type_var_like_call(&checker.ctx, value) { + if is_type_var_like_call(checker.semantic_model(), value) { return; } if is_valid_default_value_without_annotation(value) { return; } - if is_valid_default_value_with_annotation(value, checker, true) { + if is_valid_default_value_with_annotation( + value, + true, + checker.locator, + checker.semantic_model(), + ) { return; } @@ -476,16 +495,24 @@ pub(crate) fn annotated_assignment_default_in_stub( value: &Expr, annotation: &Expr, ) { - if checker.ctx.match_typing_expr(annotation, "TypeAlias") { + if checker + .semantic_model() + .match_typing_expr(annotation, "TypeAlias") + { return; } - if is_special_assignment(&checker.ctx, target) { + if is_special_assignment(checker.semantic_model(), target) { return; } - if is_type_var_like_call(&checker.ctx, value) { + if is_type_var_like_call(checker.semantic_model(), value) { return; } - if is_valid_default_value_with_annotation(value, checker, true) { + if is_valid_default_value_with_annotation( + value, + true, + checker.locator, + checker.semantic_model(), + ) { return; } @@ -513,21 +540,26 @@ pub(crate) fn unannotated_assignment_in_stub( let Expr::Name(ast::ExprName { id, .. }) = target else { return; }; - if is_special_assignment(&checker.ctx, target) { + if is_special_assignment(checker.semantic_model(), target) { return; } - if is_type_var_like_call(&checker.ctx, value) { + if is_type_var_like_call(checker.semantic_model(), value) { return; } if is_valid_default_value_without_annotation(value) { return; } - if !is_valid_default_value_with_annotation(value, checker, true) { + if !is_valid_default_value_with_annotation( + value, + true, + checker.locator, + checker.semantic_model(), + ) { return; } - if let ScopeKind::Class(ClassDef { bases, .. }) = &checker.ctx.scope().kind { - if is_enum(&checker.ctx, bases) { + if let ScopeKind::Class(ClassDef { bases, .. }) = checker.semantic_model().scope().kind { + if is_enum(checker.semantic_model(), bases) { return; } } diff --git a/crates/ruff/src/rules/flake8_pyi/rules/type_comment_in_stub.rs b/crates/ruff/src/rules/flake8_pyi/rules/type_comment_in_stub.rs index 365018311fbc2..a4ca3bbc14ad9 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/type_comment_in_stub.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/type_comment_in_stub.rs @@ -1,7 +1,6 @@ use once_cell::sync::Lazy; use regex::Regex; -use rustpython_parser::lexer::LexResult; -use rustpython_parser::Tok; +use ruff_python_ast::source_code::{Indexer, Locator}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; @@ -35,14 +34,14 @@ impl Violation for TypeCommentInStub { } /// PYI033 -pub(crate) fn type_comment_in_stub(tokens: &[LexResult]) -> Vec { +pub(crate) fn type_comment_in_stub(indexer: &Indexer, locator: &Locator) -> Vec { let mut diagnostics = vec![]; - for token in tokens.iter().flatten() { - if let (Tok::Comment(comment), range) = token { - if TYPE_COMMENT_REGEX.is_match(comment) && !TYPE_IGNORE_REGEX.is_match(comment) { - diagnostics.push(Diagnostic::new(TypeCommentInStub, *range)); - } + for range in indexer.comment_ranges() { + let comment = locator.slice(*range); + + if TYPE_COMMENT_REGEX.is_match(comment) && !TYPE_IGNORE_REGEX.is_match(comment) { + diagnostics.push(Diagnostic::new(TypeCommentInStub, *range)); } } diff --git a/crates/ruff/src/rules/flake8_pyi/rules/unrecognized_platform.rs b/crates/ruff/src/rules/flake8_pyi/rules/unrecognized_platform.rs index 20f4cb08158a3..1efb416a0ae03 100644 --- a/crates/ruff/src/rules/flake8_pyi/rules/unrecognized_platform.rs +++ b/crates/ruff/src/rules/flake8_pyi/rules/unrecognized_platform.rs @@ -103,7 +103,7 @@ pub(crate) fn unrecognized_platform( let diagnostic_unrecognized_platform_check = Diagnostic::new(UnrecognizedPlatformCheck, expr.range()); if !checker - .ctx + .semantic_model() .resolve_call_path(left) .map_or(false, |call_path| { call_path.as_slice() == ["sys", "platform"] @@ -113,12 +113,7 @@ pub(crate) fn unrecognized_platform( } // "in" might also make sense but we don't currently have one. - if !matches!(op, Cmpop::Eq | Cmpop::NotEq) - && checker - .settings - .rules - .enabled(Rule::UnrecognizedPlatformCheck) - { + if !matches!(op, Cmpop::Eq | Cmpop::NotEq) && checker.enabled(Rule::UnrecognizedPlatformCheck) { checker .diagnostics .push(diagnostic_unrecognized_platform_check); @@ -133,10 +128,7 @@ pub(crate) fn unrecognized_platform( // Other values are possible but we don't need them right now. // This protects against typos. if !["linux", "win32", "cygwin", "darwin"].contains(&value.as_str()) - && checker - .settings - .rules - .enabled(Rule::UnrecognizedPlatformName) + && checker.enabled(Rule::UnrecognizedPlatformName) { checker.diagnostics.push(Diagnostic::new( UnrecognizedPlatformName { @@ -147,11 +139,7 @@ pub(crate) fn unrecognized_platform( } } _ => { - if checker - .settings - .rules - .enabled(Rule::UnrecognizedPlatformCheck) - { + if checker.enabled(Rule::UnrecognizedPlatformCheck) { checker .diagnostics .push(diagnostic_unrecognized_platform_check); diff --git a/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI009_PYI009.pyi.snap b/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI009_PYI009.pyi.snap index edabe9cf803ea..f5e18fe99ea3b 100644 --- a/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI009_PYI009.pyi.snap +++ b/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI009_PYI009.pyi.snap @@ -1,7 +1,7 @@ --- source: crates/ruff/src/rules/flake8_pyi/mod.rs --- -PYI009.pyi:3:5: PYI009 Empty body should contain `...`, not `pass` +PYI009.pyi:3:5: PYI009 [*] Empty body should contain `...`, not `pass` | 3 | def bar(): ... # OK 4 | def foo(): @@ -10,12 +10,30 @@ PYI009.pyi:3:5: PYI009 Empty body should contain `...`, not `pass` 6 | 7 | class Bar: ... # OK | + = help: Replace `pass` with `...` -PYI009.pyi:8:5: PYI009 Empty body should contain `...`, not `pass` +β„Ή Fix +1 1 | def bar(): ... # OK +2 2 | def foo(): +3 |- pass # ERROR PYI009, since we're in a stub file + 3 |+ ... # ERROR PYI009, since we're in a stub file +4 4 | +5 5 | class Bar: ... # OK +6 6 | + +PYI009.pyi:8:5: PYI009 [*] Empty body should contain `...`, not `pass` | 8 | class Foo: 9 | pass # ERROR PYI009, since we're in a stub file | ^^^^ PYI009 | + = help: Replace `pass` with `...` + +β„Ή Fix +5 5 | class Bar: ... # OK +6 6 | +7 7 | class Foo: +8 |- pass # ERROR PYI009, since we're in a stub file + 8 |+ ... # ERROR PYI009, since we're in a stub file diff --git a/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI013_PYI013.py.snap b/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI013_PYI013.py.snap new file mode 100644 index 0000000000000..d1aa2e9116558 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI013_PYI013.py.snap @@ -0,0 +1,4 @@ +--- +source: crates/ruff/src/rules/flake8_pyi/mod.rs +--- + diff --git a/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI013_PYI013.pyi.snap b/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI013_PYI013.pyi.snap new file mode 100644 index 0000000000000..b8998c167b2f8 --- /dev/null +++ b/crates/ruff/src/rules/flake8_pyi/snapshots/ruff__rules__flake8_pyi__tests__PYI013_PYI013.pyi.snap @@ -0,0 +1,177 @@ +--- +source: crates/ruff/src/rules/flake8_pyi/mod.rs +--- +PYI013.pyi:5:5: PYI013 [*] Non-empty class body must not contain `...` + | +5 | class OneAttributeClass: +6 | value: int +7 | ... # Error + | ^^^ PYI013 +8 | +9 | class OneAttributeClass2: + | + = help: Remove unnecessary `...` + +β„Ή Fix +2 2 | +3 3 | class OneAttributeClass: +4 4 | value: int +5 |- ... # Error +6 5 | +7 6 | class OneAttributeClass2: +8 7 | ... # Error + +PYI013.pyi:8:5: PYI013 [*] Non-empty class body must not contain `...` + | + 8 | class OneAttributeClass2: + 9 | ... # Error + | ^^^ PYI013 +10 | value: int + | + = help: Remove unnecessary `...` + +β„Ή Fix +5 5 | ... # Error +6 6 | +7 7 | class OneAttributeClass2: +8 |- ... # Error +9 8 | value: int +10 9 | +11 10 | class MyClass: + +PYI013.pyi:12:5: PYI013 [*] Non-empty class body must not contain `...` + | +12 | class MyClass: +13 | ... + | ^^^ PYI013 +14 | value: int + | + = help: Remove unnecessary `...` + +β„Ή Fix +9 9 | value: int +10 10 | +11 11 | class MyClass: +12 |- ... +13 12 | value: int +14 13 | +15 14 | class TwoEllipsesClass: + +PYI013.pyi:16:5: PYI013 [*] Non-empty class body must not contain `...` + | +16 | class TwoEllipsesClass: +17 | ... + | ^^^ PYI013 +18 | ... # Error + | + = help: Remove unnecessary `...` + +β„Ή Fix +13 13 | value: int +14 14 | +15 15 | class TwoEllipsesClass: +16 |- ... +17 16 | ... # Error +18 17 | +19 18 | class DocstringClass: + +PYI013.pyi:17:5: PYI013 [*] Non-empty class body must not contain `...` + | +17 | class TwoEllipsesClass: +18 | ... +19 | ... # Error + | ^^^ PYI013 +20 | +21 | class DocstringClass: + | + = help: Remove unnecessary `...` + +β„Ή Fix +14 14 | +15 15 | class TwoEllipsesClass: +16 16 | ... +17 |- ... # Error + 17 |+ pass # Error +18 18 | +19 19 | class DocstringClass: +20 20 | """ + +PYI013.pyi:24:5: PYI013 [*] Non-empty class body must not contain `...` + | +24 | """ +25 | +26 | ... # Error + | ^^^ PYI013 +27 | +28 | class NonEmptyChild(Exception): + | + = help: Remove unnecessary `...` + +β„Ή Fix +21 21 | My body only contains an ellipsis. +22 22 | """ +23 23 | +24 |- ... # Error +25 24 | +26 25 | class NonEmptyChild(Exception): +27 26 | value: int + +PYI013.pyi:28:5: PYI013 [*] Non-empty class body must not contain `...` + | +28 | class NonEmptyChild(Exception): +29 | value: int +30 | ... # Error + | ^^^ PYI013 +31 | +32 | class NonEmptyChild2(Exception): + | + = help: Remove unnecessary `...` + +β„Ή Fix +25 25 | +26 26 | class NonEmptyChild(Exception): +27 27 | value: int +28 |- ... # Error +29 28 | +30 29 | class NonEmptyChild2(Exception): +31 30 | ... # Error + +PYI013.pyi:31:5: PYI013 [*] Non-empty class body must not contain `...` + | +31 | class NonEmptyChild2(Exception): +32 | ... # Error + | ^^^ PYI013 +33 | value: int + | + = help: Remove unnecessary `...` + +β„Ή Fix +28 28 | ... # Error +29 29 | +30 30 | class NonEmptyChild2(Exception): +31 |- ... # Error +32 31 | value: int +33 32 | +34 33 | class NonEmptyWithInit: + +PYI013.pyi:36:5: PYI013 [*] Non-empty class body must not contain `...` + | +36 | class NonEmptyWithInit: +37 | value: int +38 | ... # Error + | ^^^ PYI013 +39 | +40 | def __init__(): + | + = help: Remove unnecessary `...` + +β„Ή Fix +33 33 | +34 34 | class NonEmptyWithInit: +35 35 | value: int +36 |- ... # Error +37 36 | +38 37 | def __init__(): +39 38 | pass + + diff --git a/crates/ruff/src/rules/flake8_pytest_style/mod.rs b/crates/ruff/src/rules/flake8_pytest_style/mod.rs index 1ef7da6cf0020..1c116551a2f4a 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/mod.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/mod.rs @@ -8,7 +8,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/assertion.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/assertion.rs index d59faa9d4317d..37fe645c19133 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/assertion.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/assertion.rs @@ -1,12 +1,13 @@ +use std::borrow::Cow; + use anyhow::bail; use anyhow::Result; use libcst_native::{ Assert, BooleanOp, Codegen, CodegenState, CompoundStatement, Expression, ParenthesizableWhitespace, ParenthesizedNode, SimpleStatementLine, SimpleWhitespace, - SmallStatement, Statement, Suite, TrailingWhitespace, UnaryOp, UnaryOperation, + SmallStatement, Statement, TrailingWhitespace, UnaryOp, UnaryOperation, }; use rustpython_parser::ast::{self, Boolop, Excepthandler, Expr, Keyword, Ranged, Stmt, Unaryop}; -use std::borrow::Cow; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; @@ -16,6 +17,7 @@ use ruff_python_ast::visitor::Visitor; use ruff_python_ast::{visitor, whitespace}; use crate::checkers::ast::Checker; +use crate::cst::matchers::match_indented_block; use crate::cst::matchers::match_module; use crate::registry::AsRule; @@ -184,9 +186,9 @@ pub(crate) fn unittest_assertion( if let Ok(unittest_assert) = UnittestAssert::try_from(attr.as_str()) { // We're converting an expression to a statement, so avoid applying the fix if // the assertion is part of a larger expression. - let fixable = checker.ctx.stmt().is_expr_stmt() - && checker.ctx.expr_parent().is_none() - && !checker.ctx.scope().kind.is_lambda() + let fixable = checker.semantic_model().stmt().is_expr_stmt() + && checker.semantic_model().expr_parent().is_none() + && !checker.semantic_model().scope().kind.is_lambda() && !has_comments_in(expr.range(), checker.locator); let mut diagnostic = Diagnostic::new( PytestUnittestAssertion { @@ -214,7 +216,7 @@ pub(crate) fn unittest_assertion( /// PT015 pub(crate) fn assert_falsy(checker: &mut Checker, stmt: &Stmt, test: &Expr) { - if Truthiness::from_expr(test, |id| checker.ctx.is_builtin(id)).is_falsey() { + if Truthiness::from_expr(test, |id| checker.semantic_model().is_builtin(id)).is_falsey() { checker .diagnostics .push(Diagnostic::new(PytestAssertAlwaysFalse, stmt.range())); @@ -344,9 +346,7 @@ fn fix_composite_condition(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> bail!("Expected statement to be embedded in a function definition") }; - let Suite::IndentedBlock(indented_block) = &mut embedding.body else { - bail!("Expected indented block") - }; + let indented_block = match_indented_block(&mut embedding.body)?; indented_block.indent = Some(outer_indent); &mut indented_block.body diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/fail.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/fail.rs index f634e3be486aa..6faa0915f4397 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/fail.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/fail.rs @@ -19,7 +19,7 @@ impl Violation for PytestFailWithoutMessage { } pub(crate) fn fail_call(checker: &mut Checker, func: &Expr, args: &[Expr], keywords: &[Keyword]) { - if is_pytest_fail(&checker.ctx, func) { + if is_pytest_fail(checker.semantic_model(), func) { let call_args = SimpleCallArgs::new(args, keywords); let msg = call_args.argument("msg", 0); diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/fixture.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/fixture.rs index 7df5b1bd08ea3..78c8623225ab7 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/fixture.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/fixture.rs @@ -1,7 +1,8 @@ +use std::fmt; + use anyhow::Result; use ruff_text_size::{TextLen, TextRange, TextSize}; use rustpython_parser::ast::{self, Arguments, Expr, Keyword, Ranged, Stmt}; -use std::fmt; use ruff_diagnostics::{AlwaysAutofixableViolation, Violation}; use ruff_diagnostics::{Diagnostic, Edit, Fix}; @@ -12,7 +13,7 @@ use ruff_python_ast::source_code::Locator; use ruff_python_ast::visitor::Visitor; use ruff_python_ast::{helpers, visitor}; use ruff_python_semantic::analyze::visibility::is_abstract; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::autofix::actions::remove_argument; use crate::checkers::ast::Checker; @@ -242,9 +243,9 @@ where } } -fn get_fixture_decorator<'a>(context: &Context, decorators: &'a [Expr]) -> Option<&'a Expr> { +fn get_fixture_decorator<'a>(model: &SemanticModel, decorators: &'a [Expr]) -> Option<&'a Expr> { decorators.iter().find(|decorator| { - is_pytest_fixture(context, decorator) || is_pytest_yield_fixture(context, decorator) + is_pytest_fixture(model, decorator) || is_pytest_yield_fixture(model, decorator) }) } @@ -284,10 +285,7 @@ fn check_fixture_decorator(checker: &mut Checker, func_name: &str, decorator: &E keywords, range: _, }) => { - if checker - .settings - .rules - .enabled(Rule::PytestFixtureIncorrectParenthesesStyle) + if checker.enabled(Rule::PytestFixtureIncorrectParenthesesStyle) && !checker.settings.flake8_pytest_style.fixture_parentheses && args.is_empty() && keywords.is_empty() @@ -303,12 +301,7 @@ fn check_fixture_decorator(checker: &mut Checker, func_name: &str, decorator: &E ); } - if checker - .settings - .rules - .enabled(Rule::PytestFixturePositionalArgs) - && !args.is_empty() - { + if checker.enabled(Rule::PytestFixturePositionalArgs) && !args.is_empty() { checker.diagnostics.push(Diagnostic::new( PytestFixturePositionalArgs { function: func_name.to_string(), @@ -317,11 +310,7 @@ fn check_fixture_decorator(checker: &mut Checker, func_name: &str, decorator: &E )); } - if checker - .settings - .rules - .enabled(Rule::PytestExtraneousScopeFunction) - { + if checker.enabled(Rule::PytestExtraneousScopeFunction) { let scope_keyword = keywords .iter() .find(|kw| kw.arg.as_ref().map_or(false, |arg| arg == "scope")); @@ -349,10 +338,7 @@ fn check_fixture_decorator(checker: &mut Checker, func_name: &str, decorator: &E } } _ => { - if checker - .settings - .rules - .enabled(Rule::PytestFixtureIncorrectParenthesesStyle) + if checker.enabled(Rule::PytestFixtureIncorrectParenthesesStyle) && checker.settings.flake8_pytest_style.fixture_parentheses { #[allow(deprecated)] @@ -380,10 +366,7 @@ fn check_fixture_returns(checker: &mut Checker, stmt: &Stmt, name: &str, body: & visitor.visit_stmt(stmt); } - if checker - .settings - .rules - .enabled(Rule::PytestIncorrectFixtureNameUnderscore) + if checker.enabled(Rule::PytestIncorrectFixtureNameUnderscore) && visitor.has_return_with_value && name.starts_with('_') { @@ -393,10 +376,7 @@ fn check_fixture_returns(checker: &mut Checker, stmt: &Stmt, name: &str, body: & }, helpers::identifier_range(stmt, checker.locator), )); - } else if checker - .settings - .rules - .enabled(Rule::PytestMissingFixtureNameUnderscore) + } else if checker.enabled(Rule::PytestMissingFixtureNameUnderscore) && !visitor.has_return_with_value && !visitor.has_yield_from && !name.starts_with('_') @@ -409,11 +389,7 @@ fn check_fixture_returns(checker: &mut Checker, stmt: &Stmt, name: &str, body: & )); } - if checker - .settings - .rules - .enabled(Rule::PytestUselessYieldFixture) - { + if checker.enabled(Rule::PytestUselessYieldFixture) { if let Some(stmt) = body.last() { if let Stmt::Expr(ast::StmtExpr { value, range: _ }) = stmt { if value.is_yield_expr() { @@ -456,7 +432,7 @@ fn check_test_function_args(checker: &mut Checker, args: &Arguments) { /// PT020 fn check_fixture_decorator_name(checker: &mut Checker, decorator: &Expr) { - if is_pytest_yield_fixture(&checker.ctx, decorator) { + if is_pytest_yield_fixture(checker.semantic_model(), decorator) { checker.diagnostics.push(Diagnostic::new( PytestDeprecatedYieldFixture, decorator.range(), @@ -488,11 +464,7 @@ fn check_fixture_addfinalizer(checker: &mut Checker, args: &Arguments, body: &[S fn check_fixture_marks(checker: &mut Checker, decorators: &[Expr]) { for (expr, call_path) in get_mark_decorators(decorators) { let name = call_path.last().expect("Expected a mark name"); - if checker - .settings - .rules - .enabled(Rule::PytestUnnecessaryAsyncioMarkOnFixture) - { + if checker.enabled(Rule::PytestUnnecessaryAsyncioMarkOnFixture) { if *name == "asyncio" { let mut diagnostic = Diagnostic::new(PytestUnnecessaryAsyncioMarkOnFixture, expr.range()); @@ -505,11 +477,7 @@ fn check_fixture_marks(checker: &mut Checker, decorators: &[Expr]) { } } - if checker - .settings - .rules - .enabled(Rule::PytestErroneousUseFixturesOnFixture) - { + if checker.enabled(Rule::PytestErroneousUseFixturesOnFixture) { if *name == "usefixtures" { let mut diagnostic = Diagnostic::new(PytestErroneousUseFixturesOnFixture, expr.range()); @@ -532,77 +500,41 @@ pub(crate) fn fixture( decorators: &[Expr], body: &[Stmt], ) { - let decorator = get_fixture_decorator(&checker.ctx, decorators); + let decorator = get_fixture_decorator(checker.semantic_model(), decorators); if let Some(decorator) = decorator { - if checker - .settings - .rules - .enabled(Rule::PytestFixtureIncorrectParenthesesStyle) - || checker - .settings - .rules - .enabled(Rule::PytestFixturePositionalArgs) - || checker - .settings - .rules - .enabled(Rule::PytestExtraneousScopeFunction) + if checker.enabled(Rule::PytestFixtureIncorrectParenthesesStyle) + || checker.enabled(Rule::PytestFixturePositionalArgs) + || checker.enabled(Rule::PytestExtraneousScopeFunction) { check_fixture_decorator(checker, name, decorator); } - if checker - .settings - .rules - .enabled(Rule::PytestDeprecatedYieldFixture) + if checker.enabled(Rule::PytestDeprecatedYieldFixture) && checker.settings.flake8_pytest_style.fixture_parentheses { check_fixture_decorator_name(checker, decorator); } - if (checker - .settings - .rules - .enabled(Rule::PytestMissingFixtureNameUnderscore) - || checker - .settings - .rules - .enabled(Rule::PytestIncorrectFixtureNameUnderscore) - || checker - .settings - .rules - .enabled(Rule::PytestUselessYieldFixture)) - && !is_abstract(&checker.ctx, decorators) + if (checker.enabled(Rule::PytestMissingFixtureNameUnderscore) + || checker.enabled(Rule::PytestIncorrectFixtureNameUnderscore) + || checker.enabled(Rule::PytestUselessYieldFixture)) + && !is_abstract(checker.semantic_model(), decorators) { check_fixture_returns(checker, stmt, name, body); } - if checker - .settings - .rules - .enabled(Rule::PytestFixtureFinalizerCallback) - { + if checker.enabled(Rule::PytestFixtureFinalizerCallback) { check_fixture_addfinalizer(checker, args, body); } - if checker - .settings - .rules - .enabled(Rule::PytestUnnecessaryAsyncioMarkOnFixture) - || checker - .settings - .rules - .enabled(Rule::PytestErroneousUseFixturesOnFixture) + if checker.enabled(Rule::PytestUnnecessaryAsyncioMarkOnFixture) + || checker.enabled(Rule::PytestErroneousUseFixturesOnFixture) { check_fixture_marks(checker, decorators); } } - if checker - .settings - .rules - .enabled(Rule::PytestFixtureParamWithoutValue) - && name.starts_with("test_") - { + if checker.enabled(Rule::PytestFixtureParamWithoutValue) && name.starts_with("test_") { check_test_function_args(checker, args); } } diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/helpers.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/helpers.rs index cd6ea32b81d46..36beb36f79b35 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/helpers.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/helpers.rs @@ -2,7 +2,7 @@ use rustpython_parser::ast::{self, Constant, Expr, Keyword}; use ruff_python_ast::call_path::{collect_call_path, CallPath}; use ruff_python_ast::helpers::map_callable; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; pub(super) fn get_mark_decorators(decorators: &[Expr]) -> impl Iterator { decorators.iter().filter_map(|decorator| { @@ -17,30 +17,30 @@ pub(super) fn get_mark_decorators(decorators: &[Expr]) -> impl Iterator bool { - context.resolve_call_path(call).map_or(false, |call_path| { +pub(super) fn is_pytest_fail(model: &SemanticModel, call: &Expr) -> bool { + model.resolve_call_path(call).map_or(false, |call_path| { call_path.as_slice() == ["pytest", "fail"] }) } -pub(super) fn is_pytest_fixture(context: &Context, decorator: &Expr) -> bool { - context +pub(super) fn is_pytest_fixture(model: &SemanticModel, decorator: &Expr) -> bool { + model .resolve_call_path(map_callable(decorator)) .map_or(false, |call_path| { call_path.as_slice() == ["pytest", "fixture"] }) } -pub(super) fn is_pytest_yield_fixture(context: &Context, decorator: &Expr) -> bool { - context +pub(super) fn is_pytest_yield_fixture(model: &SemanticModel, decorator: &Expr) -> bool { + model .resolve_call_path(map_callable(decorator)) .map_or(false, |call_path| { call_path.as_slice() == ["pytest", "yield_fixture"] }) } -pub(super) fn is_pytest_parametrize(context: &Context, decorator: &Expr) -> bool { - context +pub(super) fn is_pytest_parametrize(model: &SemanticModel, decorator: &Expr) -> bool { + model .resolve_call_path(map_callable(decorator)) .map_or(false, |call_path| { call_path.as_slice() == ["pytest", "mark", "parametrize"] diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/marks.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/marks.rs index d0dc8fea35abf..6c2e178a7670e 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/marks.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/marks.rs @@ -125,14 +125,8 @@ fn check_useless_usefixtures(checker: &mut Checker, decorator: &Expr, call_path: } pub(crate) fn marks(checker: &mut Checker, decorators: &[Expr]) { - let enforce_parentheses = checker - .settings - .rules - .enabled(Rule::PytestIncorrectMarkParenthesesStyle); - let enforce_useless_usefixtures = checker - .settings - .rules - .enabled(Rule::PytestUseFixturesWithoutParameters); + let enforce_parentheses = checker.enabled(Rule::PytestIncorrectMarkParenthesesStyle); + let enforce_useless_usefixtures = checker.enabled(Rule::PytestUseFixturesWithoutParameters); for (expr, call_path) in get_mark_decorators(decorators) { if enforce_parentheses { diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/parametrize.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/parametrize.rs index 4d225f422bf9c..51e8f0a1fe2b6 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/parametrize.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/parametrize.rs @@ -4,6 +4,7 @@ use rustpython_parser::{lexer, Mode, Tok}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::source_code::{Generator, Locator}; use crate::checkers::ast::Checker; use crate::registry::{AsRule, Rule}; @@ -45,7 +46,7 @@ impl Violation for PytestParametrizeValuesWrongType { } } -fn elts_to_csv(elts: &[Expr], checker: &Checker) -> Option { +fn elts_to_csv(elts: &[Expr], generator: Generator) -> Option { let all_literals = elts.iter().all(|e| { matches!( e, @@ -77,7 +78,7 @@ fn elts_to_csv(elts: &[Expr], checker: &Checker) -> Option { kind: None, range: TextRange::default(), }); - Some(checker.generator().expr(&node)) + Some(generator.expr(&node)) } /// Returns the range of the `name` argument of `@pytest.mark.parametrize`. @@ -93,14 +94,14 @@ fn elts_to_csv(elts: &[Expr], checker: &Checker) -> Option { /// ``` /// /// This method assumes that the first argument is a string. -fn get_parametrize_name_range(checker: &Checker, decorator: &Expr, expr: &Expr) -> TextRange { +fn get_parametrize_name_range(decorator: &Expr, expr: &Expr, locator: &Locator) -> TextRange { let mut locations = Vec::new(); let mut implicit_concat = None; // The parenthesis are not part of the AST, so we need to tokenize the // decorator to find them. for (tok, range) in lexer::lex_starts_at( - checker.locator.slice(decorator.range()), + locator.slice(decorator.range()), Mode::Module, decorator.start(), ) @@ -139,7 +140,8 @@ fn check_names(checker: &mut Checker, decorator: &Expr, expr: &Expr) { if names.len() > 1 { match names_type { types::ParametrizeNameType::Tuple => { - let name_range = get_parametrize_name_range(checker, decorator, expr); + let name_range = + get_parametrize_name_range(decorator, expr, checker.locator); let mut diagnostic = Diagnostic::new( PytestParametrizeNamesWrongType { expected: names_type, @@ -170,7 +172,8 @@ fn check_names(checker: &mut Checker, decorator: &Expr, expr: &Expr) { checker.diagnostics.push(diagnostic); } types::ParametrizeNameType::List => { - let name_range = get_parametrize_name_range(checker, decorator, expr); + let name_range = + get_parametrize_name_range(decorator, expr, checker.locator); let mut diagnostic = Diagnostic::new( PytestParametrizeNamesWrongType { expected: names_type, @@ -241,7 +244,7 @@ fn check_names(checker: &mut Checker, decorator: &Expr, expr: &Expr) { expr.range(), ); if checker.patch(diagnostic.kind.rule()) { - if let Some(content) = elts_to_csv(elts, checker) { + if let Some(content) = elts_to_csv(elts, checker.generator()) { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( content, @@ -291,7 +294,7 @@ fn check_names(checker: &mut Checker, decorator: &Expr, expr: &Expr) { expr.range(), ); if checker.patch(diagnostic.kind.rule()) { - if let Some(content) = elts_to_csv(elts, checker) { + if let Some(content) = elts_to_csv(elts, checker.generator()) { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( content, @@ -416,22 +419,14 @@ fn handle_value_rows( pub(crate) fn parametrize(checker: &mut Checker, decorators: &[Expr]) { for decorator in decorators { - if is_pytest_parametrize(&checker.ctx, decorator) { + if is_pytest_parametrize(checker.semantic_model(), decorator) { if let Expr::Call(ast::ExprCall { args, .. }) = decorator { - if checker - .settings - .rules - .enabled(Rule::PytestParametrizeNamesWrongType) - { + if checker.enabled(Rule::PytestParametrizeNamesWrongType) { if let Some(names) = args.get(0) { check_names(checker, decorator, names); } } - if checker - .settings - .rules - .enabled(Rule::PytestParametrizeValuesWrongType) - { + if checker.enabled(Rule::PytestParametrizeValuesWrongType) { if let Some(names) = args.get(0) { if let Some(values) = args.get(1) { check_values(checker, names, values); diff --git a/crates/ruff/src/rules/flake8_pytest_style/rules/raises.rs b/crates/ruff/src/rules/flake8_pytest_style/rules/raises.rs index dd541156aede6..dc79b828399e5 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/rules/raises.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/rules/raises.rs @@ -4,6 +4,7 @@ use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::call_path::format_call_path; use ruff_python_ast::call_path::from_qualified_name; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; use crate::registry::Rule; @@ -46,13 +47,10 @@ impl Violation for PytestRaisesWithoutException { } } -fn is_pytest_raises(checker: &Checker, func: &Expr) -> bool { - checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["pytest", "raises"] - }) +fn is_pytest_raises(func: &Expr, model: &SemanticModel) -> bool { + model.resolve_call_path(func).map_or(false, |call_path| { + call_path.as_slice() == ["pytest", "raises"] + }) } const fn is_non_trivial_with_body(body: &[Stmt]) -> bool { @@ -66,12 +64,8 @@ const fn is_non_trivial_with_body(body: &[Stmt]) -> bool { } pub(crate) fn raises_call(checker: &mut Checker, func: &Expr, args: &[Expr], keywords: &[Keyword]) { - if is_pytest_raises(checker, func) { - if checker - .settings - .rules - .enabled(Rule::PytestRaisesWithoutException) - { + if is_pytest_raises(func, checker.semantic_model()) { + if checker.enabled(Rule::PytestRaisesWithoutException) { if args.is_empty() && keywords.is_empty() { checker .diagnostics @@ -79,7 +73,7 @@ pub(crate) fn raises_call(checker: &mut Checker, func: &Expr, args: &[Expr], key } } - if checker.settings.rules.enabled(Rule::PytestRaisesTooBroad) { + if checker.enabled(Rule::PytestRaisesTooBroad) { let match_keyword = keywords .iter() .find(|kw| kw.arg == Some(Identifier::new("match"))); @@ -106,7 +100,7 @@ pub(crate) fn complex_raises( let mut is_too_complex = false; let raises_called = items.iter().any(|item| match &item.context_expr { - Expr::Call(ast::ExprCall { func, .. }) => is_pytest_raises(checker, func), + Expr::Call(ast::ExprCall { func, .. }) => is_pytest_raises(func, checker.semantic_model()), _ => false, }); @@ -147,7 +141,7 @@ pub(crate) fn complex_raises( /// PT011 fn exception_needs_match(checker: &mut Checker, exception: &Expr) { if let Some(call_path) = checker - .ctx + .semantic_model() .resolve_call_path(exception) .and_then(|call_path| { let is_broad_exception = checker diff --git a/crates/ruff/src/rules/flake8_pytest_style/types.rs b/crates/ruff/src/rules/flake8_pytest_style/types.rs index e1c4314726eb9..f666a8a909e6f 100644 --- a/crates/ruff/src/rules/flake8_pytest_style/types.rs +++ b/crates/ruff/src/rules/flake8_pytest_style/types.rs @@ -1,7 +1,9 @@ -use ruff_macros::CacheKey; -use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; +use serde::{Deserialize, Serialize}; + +use ruff_macros::CacheKey; + #[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] pub enum ParametrizeNameType { diff --git a/crates/ruff/src/rules/flake8_quotes/mod.rs b/crates/ruff/src/rules/flake8_quotes/mod.rs index e542478e93219..ee9648bcb3c70 100644 --- a/crates/ruff/src/rules/flake8_quotes/mod.rs +++ b/crates/ruff/src/rules/flake8_quotes/mod.rs @@ -6,11 +6,10 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/flake8_quotes/rules.rs b/crates/ruff/src/rules/flake8_quotes/rules/from_tokens.rs similarity index 99% rename from crates/ruff/src/rules/flake8_quotes/rules.rs rename to crates/ruff/src/rules/flake8_quotes/rules/from_tokens.rs index b9f5efc6b0b9f..7c6f990dd334f 100644 --- a/crates/ruff/src/rules/flake8_quotes/rules.rs +++ b/crates/ruff/src/rules/flake8_quotes/rules/from_tokens.rs @@ -10,7 +10,7 @@ use crate::lex::docstring_detection::StateMachine; use crate::registry::Rule; use crate::settings::Settings; -use super::settings::Quote; +use super::super::settings::Quote; /// ## What it does /// Checks for inline strings that use single quotes or double quotes, diff --git a/crates/ruff/src/rules/flake8_quotes/rules/mod.rs b/crates/ruff/src/rules/flake8_quotes/rules/mod.rs new file mode 100644 index 0000000000000..7f04e8908fdaf --- /dev/null +++ b/crates/ruff/src/rules/flake8_quotes/rules/mod.rs @@ -0,0 +1,6 @@ +pub(crate) use from_tokens::{ + from_tokens, AvoidableEscapedQuote, BadQuotesDocstring, BadQuotesInlineString, + BadQuotesMultilineString, +}; + +mod from_tokens; diff --git a/crates/ruff/src/rules/flake8_raise/mod.rs b/crates/ruff/src/rules/flake8_raise/mod.rs index 8ab2d26a740e4..d5233d1bd441f 100644 --- a/crates/ruff/src/rules/flake8_raise/mod.rs +++ b/crates/ruff/src/rules/flake8_raise/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_return/mod.rs b/crates/ruff/src/rules/flake8_return/mod.rs index 21763cb57099b..cdde36ef05928 100644 --- a/crates/ruff/src/rules/flake8_return/mod.rs +++ b/crates/ruff/src/rules/flake8_return/mod.rs @@ -8,11 +8,10 @@ mod visitor; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/flake8_return/rules.rs b/crates/ruff/src/rules/flake8_return/rules/function.rs similarity index 95% rename from crates/ruff/src/rules/flake8_return/rules.rs rename to crates/ruff/src/rules/flake8_return/rules/function.rs index c293df839601b..cc84657be0990 100644 --- a/crates/ruff/src/rules/flake8_return/rules.rs +++ b/crates/ruff/src/rules/flake8_return/rules/function.rs @@ -9,15 +9,15 @@ use ruff_python_ast::helpers::elif_else_range; use ruff_python_ast::helpers::is_const_none; use ruff_python_ast::visitor::Visitor; use ruff_python_ast::whitespace::indentation; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; use crate::registry::{AsRule, Rule}; use crate::rules::flake8_return::helpers::end_of_last_statement; -use super::branch::Branch; -use super::helpers::result_exists; -use super::visitor::{ReturnVisitor, Stack}; +use super::super::branch::Branch; +use super::super::helpers::result_exists; +use super::super::visitor::{ReturnVisitor, Stack}; /// ## What it does /// Checks for the presence of a `return None` statement when `None` is the only @@ -391,12 +391,12 @@ const NORETURN_FUNCS: &[&[&str]] = &[ ]; /// Return `true` if the `func` is a known function that never returns. -fn is_noreturn_func(context: &Context, func: &Expr) -> bool { - context.resolve_call_path(func).map_or(false, |call_path| { +fn is_noreturn_func(model: &SemanticModel, func: &Expr) -> bool { + model.resolve_call_path(func).map_or(false, |call_path| { NORETURN_FUNCS .iter() .any(|target| call_path.as_slice() == *target) - || context.match_typing_call_path(&call_path, "assert_never") + || model.match_typing_call_path(&call_path, "assert_never") }) } @@ -484,7 +484,7 @@ fn implicit_return(checker: &mut Checker, stmt: &Stmt) { if matches!( value.as_ref(), Expr::Call(ast::ExprCall { func, .. }) - if is_noreturn_func(&checker.ctx, func) + if is_noreturn_func(checker.semantic_model(), func) ) => {} _ => { let mut diagnostic = Diagnostic::new(ImplicitReturn, stmt.range()); @@ -633,7 +633,7 @@ fn superfluous_else_node(checker: &mut Checker, stmt: &Stmt, branch: Branch) -> SuperfluousElseReturn { branch }, elif_else_range(stmt, checker.locator).unwrap_or_else(|| stmt.range()), ); - if checker.settings.rules.enabled(diagnostic.kind.rule()) { + if checker.enabled(diagnostic.kind.rule()) { checker.diagnostics.push(diagnostic); } return true; @@ -642,7 +642,7 @@ fn superfluous_else_node(checker: &mut Checker, stmt: &Stmt, branch: Branch) -> SuperfluousElseBreak { branch }, elif_else_range(stmt, checker.locator).unwrap_or_else(|| stmt.range()), ); - if checker.settings.rules.enabled(diagnostic.kind.rule()) { + if checker.enabled(diagnostic.kind.rule()) { checker.diagnostics.push(diagnostic); } return true; @@ -651,7 +651,7 @@ fn superfluous_else_node(checker: &mut Checker, stmt: &Stmt, branch: Branch) -> SuperfluousElseRaise { branch }, elif_else_range(stmt, checker.locator).unwrap_or_else(|| stmt.range()), ); - if checker.settings.rules.enabled(diagnostic.kind.rule()) { + if checker.enabled(diagnostic.kind.rule()) { checker.diagnostics.push(diagnostic); } return true; @@ -660,7 +660,7 @@ fn superfluous_else_node(checker: &mut Checker, stmt: &Stmt, branch: Branch) -> SuperfluousElseContinue { branch }, elif_else_range(stmt, checker.locator).unwrap_or_else(|| stmt.range()), ); - if checker.settings.rules.enabled(diagnostic.kind.rule()) { + if checker.enabled(diagnostic.kind.rule()) { checker.diagnostics.push(diagnostic); } return true; @@ -710,7 +710,7 @@ pub(crate) fn function(checker: &mut Checker, body: &[Stmt], returns: Option<&Ex return; } - if checker.settings.rules.any_enabled(&[ + if checker.any_enabled(&[ Rule::SuperfluousElseReturn, Rule::SuperfluousElseRaise, Rule::SuperfluousElseContinue, @@ -727,14 +727,14 @@ pub(crate) fn function(checker: &mut Checker, body: &[Stmt], returns: Option<&Ex // If we have at least one non-`None` return... if result_exists(&stack.returns) { - if checker.settings.rules.enabled(Rule::ImplicitReturnValue) { + if checker.enabled(Rule::ImplicitReturnValue) { implicit_return_value(checker, &stack); } - if checker.settings.rules.enabled(Rule::ImplicitReturn) { + if checker.enabled(Rule::ImplicitReturn) { implicit_return(checker, last_stmt); } - if checker.settings.rules.enabled(Rule::UnnecessaryAssign) { + if checker.enabled(Rule::UnnecessaryAssign) { for (_, expr) in &stack.returns { if let Some(expr) = expr { unnecessary_assign(checker, &stack, expr); @@ -742,7 +742,7 @@ pub(crate) fn function(checker: &mut Checker, body: &[Stmt], returns: Option<&Ex } } } else { - if checker.settings.rules.enabled(Rule::UnnecessaryReturnNone) { + if checker.enabled(Rule::UnnecessaryReturnNone) { // Skip functions that have a return annotation that is not `None`. if returns.map_or(true, is_const_none) { unnecessary_return_none(checker, &stack); diff --git a/crates/ruff/src/rules/flake8_return/rules/mod.rs b/crates/ruff/src/rules/flake8_return/rules/mod.rs new file mode 100644 index 0000000000000..7e6a3fbcf7448 --- /dev/null +++ b/crates/ruff/src/rules/flake8_return/rules/mod.rs @@ -0,0 +1,6 @@ +pub(crate) use function::{ + function, ImplicitReturn, ImplicitReturnValue, SuperfluousElseBreak, SuperfluousElseContinue, + SuperfluousElseRaise, SuperfluousElseReturn, UnnecessaryAssign, UnnecessaryReturnNone, +}; + +mod function; diff --git a/crates/ruff/src/rules/flake8_self/mod.rs b/crates/ruff/src/rules/flake8_self/mod.rs index 1d75aeef6364b..debfc7d5e57f4 100644 --- a/crates/ruff/src/rules/flake8_self/mod.rs +++ b/crates/ruff/src/rules/flake8_self/mod.rs @@ -8,7 +8,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_self/rules/private_member_access.rs b/crates/ruff/src/rules/flake8_self/rules/private_member_access.rs index da01985e1c327..76dde4f57f5a7 100644 --- a/crates/ruff/src/rules/flake8_self/rules/private_member_access.rs +++ b/crates/ruff/src/rules/flake8_self/rules/private_member_access.rs @@ -94,7 +94,7 @@ pub(crate) fn private_member_access(checker: &mut Checker, expr: &Expr) { // Ignore accesses on class members from _within_ the class. if checker - .ctx + .semantic_model() .scopes .iter() .rev() @@ -105,7 +105,7 @@ pub(crate) fn private_member_access(checker: &mut Checker, expr: &Expr) { .map_or(false, |class_def| { if call_path.as_slice() == [class_def.name] { checker - .ctx + .semantic_model() .find_binding(class_def.name) .map_or(false, |binding| { // TODO(charlie): Could the name ever be bound to a diff --git a/crates/ruff/src/rules/flake8_simplify/mod.rs b/crates/ruff/src/rules/flake8_simplify/mod.rs index 027d9ddb91f3d..45937fdff0955 100644 --- a/crates/ruff/src/rules/flake8_simplify/mod.rs +++ b/crates/ruff/src/rules/flake8_simplify/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_simplify/rules/ast_bool_op.rs b/crates/ruff/src/rules/flake8_simplify/rules/ast_bool_op.rs index 76cc3db4cbf65..2b032172c76ab 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/ast_bool_op.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/ast_bool_op.rs @@ -271,7 +271,7 @@ pub(crate) fn duplicate_isinstance_call(checker: &mut Checker, expr: &Expr) { if func_name != "isinstance" { continue; } - if !checker.ctx.is_builtin("isinstance") { + if !checker.semantic_model().is_builtin("isinstance") { continue; } @@ -293,7 +293,7 @@ pub(crate) fn duplicate_isinstance_call(checker: &mut Checker, expr: &Expr) { } else { unreachable!("Indices should only contain `isinstance` calls") }; - let fixable = !contains_effect(target, |id| checker.ctx.is_builtin(id)); + let fixable = !contains_effect(target, |id| checker.semantic_model().is_builtin(id)); let mut diagnostic = Diagnostic::new( DuplicateIsinstanceCall { name: if let Expr::Name(ast::ExprName { id, .. }) = target { @@ -425,7 +425,7 @@ pub(crate) fn compare_with_tuple(checker: &mut Checker, expr: &Expr) { // Avoid rewriting (e.g.) `a == "foo" or a == f()`. if comparators .iter() - .any(|expr| contains_effect(expr, |id| checker.ctx.is_builtin(id))) + .any(|expr| contains_effect(expr, |id| checker.semantic_model().is_builtin(id))) { continue; } @@ -516,7 +516,7 @@ pub(crate) fn expr_and_not_expr(checker: &mut Checker, expr: &Expr) { return; } - if contains_effect(expr, |id| checker.ctx.is_builtin(id)) { + if contains_effect(expr, |id| checker.semantic_model().is_builtin(id)) { return; } @@ -571,7 +571,7 @@ pub(crate) fn expr_or_not_expr(checker: &mut Checker, expr: &Expr) { return; } - if contains_effect(expr, |id| checker.ctx.is_builtin(id)) { + if contains_effect(expr, |id| checker.semantic_model().is_builtin(id)) { return; } @@ -640,14 +640,15 @@ fn is_short_circuit( for (index, (value, next_value)) in values.iter().tuple_windows().enumerate() { // Keep track of the location of the furthest-right, truthy or falsey expression. - let value_truthiness = Truthiness::from_expr(value, |id| checker.ctx.is_builtin(id)); + let value_truthiness = + Truthiness::from_expr(value, |id| checker.semantic_model().is_builtin(id)); let next_value_truthiness = - Truthiness::from_expr(next_value, |id| checker.ctx.is_builtin(id)); + Truthiness::from_expr(next_value, |id| checker.semantic_model().is_builtin(id)); // Keep track of the location of the furthest-right, non-effectful expression. if value_truthiness.is_unknown() - && (!checker.ctx.in_boolean_test() - || contains_effect(value, |id| checker.ctx.is_builtin(id))) + && (!checker.semantic_model().in_boolean_test() + || contains_effect(value, |id| checker.semantic_model().is_builtin(id))) { location = next_value.start(); continue; @@ -667,7 +668,7 @@ fn is_short_circuit( value, TextRange::new(location, expr.end()), short_circuit_truthiness, - checker.ctx.in_boolean_test(), + checker.semantic_model().in_boolean_test(), checker, )); break; @@ -685,7 +686,7 @@ fn is_short_circuit( next_value, TextRange::new(location, expr.end()), short_circuit_truthiness, - checker.ctx.in_boolean_test(), + checker.semantic_model().in_boolean_test(), checker, )); break; diff --git a/crates/ruff/src/rules/flake8_simplify/rules/ast_expr.rs b/crates/ruff/src/rules/flake8_simplify/rules/ast_expr.rs index c9ba60dc20782..83006f6f8dcc8 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/ast_expr.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/ast_expr.rs @@ -87,7 +87,7 @@ pub(crate) fn use_capital_environment_variables(checker: &mut Checker, expr: &Ex return; }; if !checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["os", "environ", "get"] diff --git a/crates/ruff/src/rules/flake8_simplify/rules/ast_if.rs b/crates/ruff/src/rules/flake8_simplify/rules/ast_if.rs index 1449957c8093c..a78d8f6df61d3 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/ast_if.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/ast_if.rs @@ -2,7 +2,6 @@ use log::error; use ruff_text_size::TextRange; use rustc_hash::FxHashSet; use rustpython_parser::ast::{self, Cmpop, Constant, Expr, ExprContext, Ranged, Stmt}; -use unicode_width::UnicodeWidthStr; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; @@ -11,9 +10,10 @@ use ruff_python_ast::helpers::{ any_over_expr, contains_effect, first_colon_range, has_comments, has_comments_in, }; use ruff_python_ast::newlines::StrExt; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; +use crate::line_width::LineWidth; use crate::registry::AsRule; use crate::rules::flake8_simplify::rules::fix_if; @@ -288,7 +288,10 @@ pub(crate) fn nested_if_statements( .content() .unwrap_or_default() .universal_newlines() - .all(|line| line.width() <= checker.settings.line_length) + .all(|line| { + LineWidth::new(checker.settings.tab_size).add_str(&line) + <= checker.settings.line_length + }) { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(edit)); @@ -351,7 +354,7 @@ pub(crate) fn needless_bool(checker: &mut Checker, stmt: &Stmt) { let fixable = matches!(if_return, Bool::True) && matches!(else_return, Bool::False) && !has_comments(stmt, checker.locator) - && (test.is_compare_expr() || checker.ctx.is_builtin("bool")); + && (test.is_compare_expr() || checker.semantic_model().is_builtin("bool")); let mut diagnostic = Diagnostic::new(NeedlessBool { condition }, stmt.range()); if fixable && checker.patch(diagnostic.kind.rule()) { @@ -411,9 +414,10 @@ fn ternary(target_var: &Expr, body_value: &Expr, test: &Expr, orelse_value: &Exp } /// Return `true` if the `Expr` contains a reference to `${module}.${target}`. -fn contains_call_path(ctx: &Context, expr: &Expr, target: &[&str]) -> bool { +fn contains_call_path(model: &SemanticModel, expr: &Expr, target: &[&str]) -> bool { any_over_expr(expr, &|expr| { - ctx.resolve_call_path(expr) + model + .resolve_call_path(expr) .map_or(false, |call_path| call_path.as_slice() == target) }) } @@ -446,13 +450,13 @@ pub(crate) fn use_ternary_operator(checker: &mut Checker, stmt: &Stmt, parent: O } // Avoid suggesting ternary for `if sys.version_info >= ...`-style checks. - if contains_call_path(&checker.ctx, test, &["sys", "version_info"]) { + if contains_call_path(checker.semantic_model(), test, &["sys", "version_info"]) { return; } // Avoid suggesting ternary for `if sys.platform.startswith("...")`-style // checks. - if contains_call_path(&checker.ctx, test, &["sys", "platform"]) { + if contains_call_path(checker.semantic_model(), test, &["sys", "platform"]) { return; } @@ -507,8 +511,9 @@ pub(crate) fn use_ternary_operator(checker: &mut Checker, stmt: &Stmt, parent: O // Don't flag if the resulting expression would exceed the maximum line length. let line_start = checker.locator.line_start(stmt.start()); - if checker.locator.contents()[TextRange::new(line_start, stmt.start())].width() - + contents.width() + if LineWidth::new(checker.settings.tab_size) + .add_str(&checker.locator.contents()[TextRange::new(line_start, stmt.start())]) + .add_str(&contents) > checker.settings.line_length { return; @@ -647,7 +652,7 @@ pub(crate) fn manual_dict_lookup( return; }; if value.as_ref().map_or(false, |value| { - contains_effect(value, |id| checker.ctx.is_builtin(id)) + contains_effect(value, |id| checker.semantic_model().is_builtin(id)) }) { return; } @@ -720,7 +725,7 @@ pub(crate) fn manual_dict_lookup( return; }; if value.as_ref().map_or(false, |value| { - contains_effect(value, |id| checker.ctx.is_builtin(id)) + contains_effect(value, |id| checker.semantic_model().is_builtin(id)) }) { return; }; @@ -803,7 +808,7 @@ pub(crate) fn use_dict_get_with_default( } // Check that the default value is not "complex". - if contains_effect(default_value, |id| checker.ctx.is_builtin(id)) { + if contains_effect(default_value, |id| checker.semantic_model().is_builtin(id)) { return; } @@ -862,8 +867,9 @@ pub(crate) fn use_dict_get_with_default( // Don't flag if the resulting expression would exceed the maximum line length. let line_start = checker.locator.line_start(stmt.start()); - if checker.locator.contents()[TextRange::new(line_start, stmt.start())].width() - + contents.width() + if LineWidth::new(checker.settings.tab_size) + .add_str(&checker.locator.contents()[TextRange::new(line_start, stmt.start())]) + .add_str(&contents) > checker.settings.line_length { return; diff --git a/crates/ruff/src/rules/flake8_simplify/rules/ast_ifexp.rs b/crates/ruff/src/rules/flake8_simplify/rules/ast_ifexp.rs index 2879fb5798f7e..cbbac3b6a9c59 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/ast_ifexp.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/ast_ifexp.rs @@ -107,7 +107,7 @@ pub(crate) fn explicit_true_false_in_ifexpr( checker.generator().expr(&test.clone()), expr.range(), ))); - } else if checker.ctx.is_builtin("bool") { + } else if checker.semantic_model().is_builtin("bool") { let node = ast::ExprName { id: "bool".into(), ctx: ExprContext::Load, diff --git a/crates/ruff/src/rules/flake8_simplify/rules/ast_unary_op.rs b/crates/ruff/src/rules/flake8_simplify/rules/ast_unary_op.rs index 39128b5f9c46b..95073b5574cb1 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/ast_unary_op.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/ast_unary_op.rs @@ -3,7 +3,6 @@ use rustpython_parser::ast::{self, Cmpop, Expr, ExprContext, Ranged, Stmt, Unary use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_semantic::scope::ScopeKind; use crate::checkers::ast::Checker; @@ -94,12 +93,12 @@ pub(crate) fn negation_with_equal_op( if !matches!(&ops[..], [Cmpop::Eq]) { return; } - if is_exception_check(checker.ctx.stmt()) { + if is_exception_check(checker.semantic_model().stmt()) { return; } // Avoid flagging issues in dunder implementations. - if let ScopeKind::Function(def) = &checker.ctx.scope().kind { + if let ScopeKind::Function(def) = &checker.semantic_model().scope().kind { if DUNDER_METHODS.contains(&def.name) { return; } @@ -144,12 +143,12 @@ pub(crate) fn negation_with_not_equal_op( if !matches!(&ops[..], [Cmpop::NotEq]) { return; } - if is_exception_check(checker.ctx.stmt()) { + if is_exception_check(checker.semantic_model().stmt()) { return; } // Avoid flagging issues in dunder implementations. - if let ScopeKind::Function(def) = &checker.ctx.scope().kind { + if let ScopeKind::Function(def) = &checker.semantic_model().scope().kind { if DUNDER_METHODS.contains(&def.name) { return; } @@ -197,13 +196,13 @@ pub(crate) fn double_negation(checker: &mut Checker, expr: &Expr, op: Unaryop, o expr.range(), ); if checker.patch(diagnostic.kind.rule()) { - if checker.ctx.in_boolean_test() { + if checker.semantic_model().in_boolean_test() { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( checker.generator().expr(operand), expr.range(), ))); - } else if checker.ctx.is_builtin("bool") { + } else if checker.semantic_model().is_builtin("bool") { let node = ast::ExprName { id: "bool".into(), ctx: ExprContext::Load, diff --git a/crates/ruff/src/rules/flake8_simplify/rules/ast_with.rs b/crates/ruff/src/rules/flake8_simplify/rules/ast_with.rs index fa4cdb0497abf..5d8f3406d62fb 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/ast_with.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/ast_with.rs @@ -1,7 +1,6 @@ use log::error; use ruff_text_size::TextRange; use rustpython_parser::ast::{self, Ranged, Stmt, Withitem}; -use unicode_width::UnicodeWidthStr; use ruff_diagnostics::{AutofixKind, Violation}; use ruff_diagnostics::{Diagnostic, Fix}; @@ -10,6 +9,7 @@ use ruff_python_ast::helpers::{first_colon_range, has_comments_in}; use ruff_python_ast::newlines::StrExt; use crate::checkers::ast::Checker; +use crate::line_width::LineWidth; use crate::registry::AsRule; use super::fix_with; @@ -111,7 +111,10 @@ pub(crate) fn multiple_with_statements( .content() .unwrap_or_default() .universal_newlines() - .all(|line| line.width() <= checker.settings.line_length) + .all(|line| { + LineWidth::new(checker.settings.tab_size).add_str(&line) + <= checker.settings.line_length + }) { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(edit)); diff --git a/crates/ruff/src/rules/flake8_simplify/rules/fix_if.rs b/crates/ruff/src/rules/flake8_simplify/rules/fix_if.rs index b94839db61d12..4a58149ff8f33 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/fix_if.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/fix_if.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use anyhow::{bail, Result}; use libcst_native::{ BooleanOp, BooleanOperation, Codegen, CodegenState, CompoundStatement, Expression, If, @@ -5,13 +7,12 @@ use libcst_native::{ Statement, Suite, }; use rustpython_parser::ast::Ranged; -use std::borrow::Cow; use ruff_diagnostics::Edit; use ruff_python_ast::source_code::{Locator, Stylist}; use ruff_python_ast::whitespace; -use crate::cst::matchers::match_module; +use crate::cst::matchers::{match_function_def, match_if, match_indented_block, match_statement}; fn parenthesize_and_operand(expr: Expression) -> Expression { match &expr { @@ -66,26 +67,23 @@ pub(crate) fn fix_nested_if_statements( }; // Parse the CST. - let mut tree = match_module(&module_text)?; + let mut tree = match_statement(&module_text)?; - let statements = if outer_indent.is_empty() { - &mut *tree.body + let statement = if outer_indent.is_empty() { + &mut tree } else { - let [Statement::Compound(CompoundStatement::FunctionDef(embedding))] = &mut *tree.body else { - bail!("Expected statement to be embedded in a function definition") - }; + let embedding = match_function_def(&mut tree)?; - let Suite::IndentedBlock(indented_block) = &mut embedding.body else { - bail!("Expected indented block") - }; + let indented_block = match_indented_block(&mut embedding.body)?; indented_block.indent = Some(outer_indent); - &mut *indented_block.body + let Some(statement) = indented_block.body.first_mut() else { + bail!("Expected indented block to have at least one statement") + }; + statement }; - let [Statement::Compound(CompoundStatement::If(outer_if))] = statements else { - bail!("Expected one outer if statement") - }; + let outer_if = match_if(statement)?; let If { body: Suite::IndentedBlock(ref mut outer_body), diff --git a/crates/ruff/src/rules/flake8_simplify/rules/fix_with.rs b/crates/ruff/src/rules/flake8_simplify/rules/fix_with.rs index ba6bc1ebc4efc..eaae0f85755c6 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/fix_with.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/fix_with.rs @@ -6,7 +6,7 @@ use ruff_diagnostics::Edit; use ruff_python_ast::source_code::{Locator, Stylist}; use ruff_python_ast::whitespace; -use crate::cst::matchers::match_module; +use crate::cst::matchers::{match_function_def, match_indented_block, match_statement, match_with}; /// (SIM117) Convert `with a: with b:` to `with a, b:`. pub(crate) fn fix_multiple_with_statements( @@ -32,26 +32,23 @@ pub(crate) fn fix_multiple_with_statements( }; // Parse the CST. - let mut tree = match_module(&module_text)?; + let mut tree = match_statement(&module_text)?; - let statements = if outer_indent.is_empty() { - &mut *tree.body + let statement = if outer_indent.is_empty() { + &mut tree } else { - let [Statement::Compound(CompoundStatement::FunctionDef(embedding))] = &mut *tree.body else { - bail!("Expected statement to be embedded in a function definition") - }; + let embedding = match_function_def(&mut tree)?; - let Suite::IndentedBlock(indented_block) = &mut embedding.body else { - bail!("Expected indented block") - }; + let indented_block = match_indented_block(&mut embedding.body)?; indented_block.indent = Some(outer_indent); - &mut *indented_block.body + let Some(statement) = indented_block.body.first_mut() else { + bail!("Expected indented block to have at least one statement") + }; + statement }; - let [Statement::Compound(CompoundStatement::With(outer_with))] = statements else { - bail!("Expected one outer with statement") - }; + let outer_with = match_with(statement)?; let With { body: Suite::IndentedBlock(ref mut outer_body), diff --git a/crates/ruff/src/rules/flake8_simplify/rules/key_in_dict.rs b/crates/ruff/src/rules/flake8_simplify/rules/key_in_dict.rs index d986ecd9b4fb2..e63827a35263c 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/key_in_dict.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/key_in_dict.rs @@ -10,7 +10,7 @@ use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::source_code::{Locator, Stylist}; use crate::checkers::ast::Checker; -use crate::cst::matchers::{match_attribute, match_call, match_expression}; +use crate::cst::matchers::{match_attribute, match_call_mut, match_expression}; use crate::registry::AsRule; #[violation] @@ -39,7 +39,7 @@ fn get_value_content_for_key_in_dict( ) -> Result { let content = locator.slice(expr.range()); let mut expression = match_expression(content)?; - let call = match_call(&mut expression)?; + let call = match_call_mut(&mut expression)?; let attribute = match_attribute(&mut call.func)?; let mut state = CodegenState { diff --git a/crates/ruff/src/rules/flake8_simplify/rules/open_file_with_context_handler.rs b/crates/ruff/src/rules/flake8_simplify/rules/open_file_with_context_handler.rs index 9713fac9a22aa..8d10ddb4242a6 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/open_file_with_context_handler.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/open_file_with_context_handler.rs @@ -2,6 +2,7 @@ use rustpython_parser::ast::{self, Expr, Ranged, Stmt}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; @@ -41,8 +42,8 @@ impl Violation for OpenFileWithContextHandler { /// Return `true` if the current expression is nested in an `await /// exit_stack.enter_async_context` call. -fn match_async_exit_stack(checker: &Checker) -> bool { - let Some(expr) = checker.ctx.expr_grandparent() else { +fn match_async_exit_stack(model: &SemanticModel) -> bool { + let Some(expr) = model.expr_grandparent() else { return false; }; let Expr::Await(ast::ExprAwait { value, range: _ }) = expr else { @@ -57,17 +58,13 @@ fn match_async_exit_stack(checker: &Checker) -> bool { if attr != "enter_async_context" { return false; } - for parent in checker.ctx.parents() { + for parent in model.parents() { if let Stmt::With(ast::StmtWith { items, .. }) = parent { for item in items { if let Expr::Call(ast::ExprCall { func, .. }) = &item.context_expr { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["contextlib", "AsyncExitStack"] - }) - { + if model.resolve_call_path(func).map_or(false, |call_path| { + call_path.as_slice() == ["contextlib", "AsyncExitStack"] + }) { return true; } } @@ -79,8 +76,8 @@ fn match_async_exit_stack(checker: &Checker) -> bool { /// Return `true` if the current expression is nested in an /// `exit_stack.enter_context` call. -fn match_exit_stack(checker: &Checker) -> bool { - let Some(expr) = checker.ctx.expr_parent() else { +fn match_exit_stack(model: &SemanticModel) -> bool { + let Some(expr) = model.expr_parent() else { return false; }; let Expr::Call(ast::ExprCall { func, .. }) = expr else { @@ -92,17 +89,13 @@ fn match_exit_stack(checker: &Checker) -> bool { if attr != "enter_context" { return false; } - for parent in checker.ctx.parents() { + for parent in model.parents() { if let Stmt::With(ast::StmtWith { items, .. }) = parent { for item in items { if let Expr::Call(ast::ExprCall { func, .. }) = &item.context_expr { - if checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["contextlib", "ExitStack"] - }) - { + if model.resolve_call_path(func).map_or(false, |call_path| { + call_path.as_slice() == ["contextlib", "ExitStack"] + }) { return true; } } @@ -115,23 +108,23 @@ fn match_exit_stack(checker: &Checker) -> bool { /// SIM115 pub(crate) fn open_file_with_context_handler(checker: &mut Checker, func: &Expr) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["", "open"]) { - if checker.ctx.is_builtin("open") { + if checker.semantic_model().is_builtin("open") { // Ex) `with open("foo.txt") as f: ...` - if matches!(checker.ctx.stmt(), Stmt::With(_)) { + if matches!(checker.semantic_model().stmt(), Stmt::With(_)) { return; } // Ex) `with contextlib.ExitStack() as exit_stack: ...` - if match_exit_stack(checker) { + if match_exit_stack(checker.semantic_model()) { return; } // Ex) `with contextlib.AsyncExitStack() as exit_stack: ...` - if match_async_exit_stack(checker) { + if match_async_exit_stack(checker.semantic_model()) { return; } diff --git a/crates/ruff/src/rules/flake8_simplify/rules/reimplemented_builtin.rs b/crates/ruff/src/rules/flake8_simplify/rules/reimplemented_builtin.rs index 2bbc5740daeec..9b847fa91d60e 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/reimplemented_builtin.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/reimplemented_builtin.rs @@ -2,14 +2,13 @@ use ruff_text_size::{TextRange, TextSize}; use rustpython_parser::ast::{ self, Cmpop, Comprehension, Constant, Expr, ExprContext, Ranged, Stmt, Unaryop, }; -use unicode_width::UnicodeWidthStr; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_ast::source_code::Generator; use crate::checkers::ast::Checker; +use crate::line_width::LineWidth; use crate::registry::{AsRule, Rule}; #[violation] @@ -214,7 +213,7 @@ pub(crate) fn convert_for_loop_to_any_all( .or_else(|| sibling.and_then(|sibling| return_values_for_siblings(stmt, sibling))) { if loop_info.return_value && !loop_info.next_return_value { - if checker.settings.rules.enabled(Rule::ReimplementedBuiltin) { + if checker.enabled(Rule::ReimplementedBuiltin) { let contents = return_stmt( "any", loop_info.test, @@ -225,8 +224,9 @@ pub(crate) fn convert_for_loop_to_any_all( // Don't flag if the resulting expression would exceed the maximum line length. let line_start = checker.locator.line_start(stmt.start()); - if checker.locator.contents()[TextRange::new(line_start, stmt.start())].width() - + contents.width() + if LineWidth::new(checker.settings.tab_size) + .add_str(&checker.locator.contents()[TextRange::new(line_start, stmt.start())]) + .add_str(&contents) > checker.settings.line_length { return; @@ -236,9 +236,11 @@ pub(crate) fn convert_for_loop_to_any_all( ReimplementedBuiltin { repl: contents.clone(), }, - stmt.range(), + TextRange::new(stmt.start(), loop_info.terminal), ); - if checker.patch(diagnostic.kind.rule()) && checker.ctx.is_builtin("any") { + if checker.patch(diagnostic.kind.rule()) + && checker.semantic_model().is_builtin("any") + { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(Edit::replacement( contents, @@ -251,7 +253,7 @@ pub(crate) fn convert_for_loop_to_any_all( } if !loop_info.return_value && loop_info.next_return_value { - if checker.settings.rules.enabled(Rule::ReimplementedBuiltin) { + if checker.enabled(Rule::ReimplementedBuiltin) { // Invert the condition. let test = { if let Expr::UnaryOp(ast::ExprUnaryOp { @@ -315,8 +317,9 @@ pub(crate) fn convert_for_loop_to_any_all( // Don't flag if the resulting expression would exceed the maximum line length. let line_start = checker.locator.line_start(stmt.start()); - if checker.locator.contents()[TextRange::new(line_start, stmt.start())].width() - + contents.width() + if LineWidth::new(checker.settings.tab_size) + .add_str(&checker.locator.contents()[TextRange::new(line_start, stmt.start())]) + .add_str(&contents) > checker.settings.line_length { return; @@ -326,9 +329,11 @@ pub(crate) fn convert_for_loop_to_any_all( ReimplementedBuiltin { repl: contents.clone(), }, - stmt.range(), + TextRange::new(stmt.start(), loop_info.terminal), ); - if checker.patch(diagnostic.kind.rule()) && checker.ctx.is_builtin("all") { + if checker.patch(diagnostic.kind.rule()) + && checker.semantic_model().is_builtin("all") + { #[allow(deprecated)] diagnostic.set_fix(Fix::unspecified(Edit::replacement( contents, diff --git a/crates/ruff/src/rules/flake8_simplify/rules/suppressible_exception.rs b/crates/ruff/src/rules/flake8_simplify/rules/suppressible_exception.rs index 086fdb97c5526..ee1b45b68aa54 100644 --- a/crates/ruff/src/rules/flake8_simplify/rules/suppressible_exception.rs +++ b/crates/ruff/src/rules/flake8_simplify/rules/suppressible_exception.rs @@ -7,7 +7,6 @@ use ruff_python_ast::call_path::compose_call_path; use ruff_python_ast::helpers; use ruff_python_ast::helpers::has_comments; -use crate::autofix::actions::get_or_import_symbol; use crate::checkers::ast::Checker; use crate::registry::AsRule; @@ -89,13 +88,11 @@ pub(crate) fn suppressible_exception( if fixable && checker.patch(diagnostic.kind.rule()) { diagnostic.try_set_fix(|| { - let (import_edit, binding) = get_or_import_symbol( + let (import_edit, binding) = checker.importer.get_or_import_symbol( "contextlib", "suppress", stmt.start(), - &checker.ctx, - &checker.importer, - checker.locator, + checker.semantic_model(), )?; let replace_try = Edit::range_replacement( format!("with {binding}({exception})"), diff --git a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM102_SIM102.py.snap b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM102_SIM102.py.snap index a3fd9f8f35254..89c1a56862fdc 100644 --- a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM102_SIM102.py.snap +++ b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM102_SIM102.py.snap @@ -211,14 +211,14 @@ SIM102.py:83:5: SIM102 [*] Use a single `if` statement instead of nested `if` st 85 |+ )): 86 |+ print("Bad module!") 88 87 | -89 88 | # SIM102 -90 89 | if node.module: +89 88 | # SIM102 (auto-fixable) +90 89 | if node.module012345678: SIM102.py:90:1: SIM102 [*] Use a single `if` statement instead of nested `if` statements | -90 | # SIM102 -91 | / if node.module: -92 | | if node.module == "multiprocessing" or node.module.startswith( +90 | # SIM102 (auto-fixable) +91 | / if node.module012345678: +92 | | if node.module == "multiprocß9πŸ’£2ℝ" or node.module.startswith( 93 | | "multiprocessing." 94 | | ): | |______^ SIM102 @@ -229,44 +229,56 @@ SIM102.py:90:1: SIM102 [*] Use a single `if` statement instead of nested `if` st β„Ή Suggested fix 87 87 | print("Bad module!") 88 88 | -89 89 | # SIM102 -90 |-if node.module: -91 |- if node.module == "multiprocessing" or node.module.startswith( +89 89 | # SIM102 (auto-fixable) +90 |-if node.module012345678: +91 |- if node.module == "multiprocß9πŸ’£2ℝ" or node.module.startswith( 92 |- "multiprocessing." 93 |- ): 94 |- print("Bad module!") - 90 |+if node.module and (node.module == "multiprocessing" or node.module.startswith( + 90 |+if node.module012345678 and (node.module == "multiprocß9πŸ’£2ℝ" or node.module.startswith( 91 |+ "multiprocessing." 92 |+)): 93 |+ print("Bad module!") 95 94 | -96 95 | -97 96 | # OK +96 95 | # SIM102 (not auto-fixable) +97 96 | if node.module0123456789: -SIM102.py:117:5: SIM102 [*] Use a single `if` statement instead of nested `if` statements +SIM102.py:97:1: SIM102 Use a single `if` statement instead of nested `if` statements | -117 | if a: -118 | # SIM 102 -119 | if b: + 97 | # SIM102 (not auto-fixable) + 98 | / if node.module0123456789: + 99 | | if node.module == "multiprocß9πŸ’£2ℝ" or node.module.startswith( +100 | | "multiprocessing." +101 | | ): + | |______^ SIM102 +102 | print("Bad module!") + | + = help: Combine `if` statements using `and` + +SIM102.py:124:5: SIM102 [*] Use a single `if` statement instead of nested `if` statements + | +124 | if a: +125 | # SIM 102 +126 | if b: | _____^ -120 | | if c: +127 | | if c: | |_____________^ SIM102 -121 | print("foo") -122 | else: +128 | print("foo") +129 | else: | = help: Combine `if` statements using `and` β„Ή Suggested fix -114 114 | # OK -115 115 | if a: -116 116 | # SIM 102 -117 |- if b: -118 |- if c: -119 |- print("foo") - 117 |+ if b and c: - 118 |+ print("foo") -120 119 | else: -121 120 | print("bar") -122 121 | +121 121 | # OK +122 122 | if a: +123 123 | # SIM 102 +124 |- if b: +125 |- if c: +126 |- print("foo") + 124 |+ if b and c: + 125 |+ print("foo") +127 126 | else: +128 127 | print("bar") +129 128 | diff --git a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM108_SIM108.py.snap b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM108_SIM108.py.snap index 520f6f1c2e924..91a83a03c6394 100644 --- a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM108_SIM108.py.snap +++ b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM108_SIM108.py.snap @@ -38,57 +38,57 @@ SIM108.py:58:1: SIM108 Use ternary operator `abc = x if x > 0 else -x` instead o | = help: Replace `if`-`else`-block with `abc = x if x > 0 else -x` -SIM108.py:82:1: SIM108 [*] Use ternary operator `b = cccccccccccccccccccccccccccccccccccc if a else ddddddddddddddddddddddddddddddddddddd` instead of `if`-`else`-block +SIM108.py:82:1: SIM108 [*] Use ternary operator `b = "cccccccccccccccccccccccccccccccccß" if a else "dddddddddddddddddddddddddddddddddπŸ’£"` instead of `if`-`else`-block | 82 | # SIM108 83 | / if a: -84 | | b = cccccccccccccccccccccccccccccccccccc +84 | | b = "cccccccccccccccccccccccccccccccccß" 85 | | else: -86 | | b = ddddddddddddddddddddddddddddddddddddd +86 | | b = "dddddddddddddddddddddddddddddddddπŸ’£" | |_____________________________________________^ SIM108 | - = help: Replace `if`-`else`-block with `b = cccccccccccccccccccccccccccccccccccc if a else ddddddddddddddddddddddddddddddddddddd` + = help: Replace `if`-`else`-block with `b = "cccccccccccccccccccccccccccccccccß" if a else "dddddddddddddddddddddddddddddddddπŸ’£"` β„Ή Suggested fix 79 79 | 80 80 | 81 81 | # SIM108 82 |-if a: -83 |- b = cccccccccccccccccccccccccccccccccccc +83 |- b = "cccccccccccccccccccccccccccccccccß" 84 |-else: -85 |- b = ddddddddddddddddddddddddddddddddddddd - 82 |+b = cccccccccccccccccccccccccccccccccccc if a else ddddddddddddddddddddddddddddddddddddd +85 |- b = "dddddddddddddddddddddddddddddddddπŸ’£" + 82 |+b = "cccccccccccccccccccccccccccccccccß" if a else "dddddddddddddddddddddddddddddddddπŸ’£" 86 83 | 87 84 | 88 85 | # OK (too long) -SIM108.py:97:1: SIM108 Use ternary operator `exitcode = 0 if True else 1` instead of `if`-`else`-block +SIM108.py:105:1: SIM108 Use ternary operator `exitcode = 0 if True else 1` instead of `if`-`else`-block | - 97 | # SIM108 (without fix due to trailing comment) - 98 | / if True: - 99 | | exitcode = 0 -100 | | else: -101 | | exitcode = 1 # Trailing comment +105 | # SIM108 (without fix due to trailing comment) +106 | / if True: +107 | | exitcode = 0 +108 | | else: +109 | | exitcode = 1 # Trailing comment | |________________^ SIM108 | = help: Replace `if`-`else`-block with `exitcode = 0 if True else 1` -SIM108.py:104:1: SIM108 Use ternary operator `x = 3 if True else 5` instead of `if`-`else`-block +SIM108.py:112:1: SIM108 Use ternary operator `x = 3 if True else 5` instead of `if`-`else`-block | -104 | # SIM108 -105 | / if True: x = 3 # Foo -106 | | else: x = 5 +112 | # SIM108 +113 | / if True: x = 3 # Foo +114 | | else: x = 5 | |___________^ SIM108 | = help: Replace `if`-`else`-block with `x = 3 if True else 5` -SIM108.py:109:1: SIM108 Use ternary operator `x = 3 if True else 5` instead of `if`-`else`-block +SIM108.py:117:1: SIM108 Use ternary operator `x = 3 if True else 5` instead of `if`-`else`-block | -109 | # SIM108 -110 | / if True: # Foo -111 | | x = 3 -112 | | else: -113 | | x = 5 +117 | # SIM108 +118 | / if True: # Foo +119 | | x = 3 +120 | | else: +121 | | x = 5 | |_________^ SIM108 | = help: Replace `if`-`else`-block with `x = 3 if True else 5` diff --git a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM110.py.snap b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM110.py.snap index 6048c57d9f10c..c23a745c0d3d8 100644 --- a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM110.py.snap +++ b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM110.py.snap @@ -9,8 +9,8 @@ SIM110.py:3:5: SIM110 [*] Use `return any(check(x) for x in iterable)` instead o | _____^ 6 | | if check(x): 7 | | return True - | |_______________________^ SIM110 -8 | return False +8 | | return False + | |________________^ SIM110 | = help: Replace with `return any(check(x) for x in iterable)` @@ -34,8 +34,8 @@ SIM110.py:25:5: SIM110 [*] Use `return all(not check(x) for x in iterable)` inst | _____^ 28 | | if check(x): 29 | | return False - | |________________________^ SIM110 -30 | return True +30 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(not check(x) for x in iterable)` @@ -60,8 +60,8 @@ SIM110.py:33:5: SIM110 [*] Use `return all(x.is_empty() for x in iterable)` inst | _____^ 36 | | if not x.is_empty(): 37 | | return False - | |________________________^ SIM110 -38 | return True +38 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(x.is_empty() for x in iterable)` @@ -200,8 +200,8 @@ SIM110.py:124:5: SIM110 Use `return any(check(x) for x in iterable)` instead of | _____^ 127 | | if check(x): 128 | | return True - | |_______________________^ SIM110 -129 | return False +129 | | return False + | |________________^ SIM110 | = help: Replace with `return any(check(x) for x in iterable)` @@ -213,8 +213,8 @@ SIM110.py:134:5: SIM110 Use `return all(not check(x) for x in iterable)` instead | _____^ 137 | | if check(x): 138 | | return False - | |________________________^ SIM110 -139 | return True +139 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(not check(x) for x in iterable)` @@ -225,8 +225,8 @@ SIM110.py:144:5: SIM110 [*] Use `return any(check(x) for x in iterable)` instead | _____^ 146 | | if check(x): 147 | | return True - | |_______________________^ SIM110 -148 | return False +148 | | return False + | |________________^ SIM110 | = help: Replace with `return any(check(x) for x in iterable)` @@ -250,8 +250,8 @@ SIM110.py:154:5: SIM110 [*] Use `return all(not check(x) for x in iterable)` ins | _____^ 156 | | if check(x): 157 | | return False - | |________________________^ SIM110 -158 | return True +158 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(not check(x) for x in iterable)` @@ -264,5 +264,34 @@ SIM110.py:154:5: SIM110 [*] Use `return all(not check(x) for x in iterable)` ins 156 |- return False 157 |- return True 154 |+ return all(not check(x) for x in iterable) +158 155 | +159 156 | +160 157 | def f(): + +SIM110.py:162:5: SIM110 [*] Use `return any(x.isdigit() for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ")` instead of `for` loop + | +162 | def f(): +163 | # SIM110 +164 | for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ": + | _____^ +165 | | if x.isdigit(): +166 | | return True +167 | | return False + | |________________^ SIM110 + | + = help: Replace with `return any(x.isdigit() for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ")` + +β„Ή Suggested fix +159 159 | +160 160 | def f(): +161 161 | # SIM110 +162 |- for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ": +163 |- if x.isdigit(): +164 |- return True +165 |- return False + 162 |+ return any(x.isdigit() for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ") +166 163 | +167 164 | +168 165 | def f(): diff --git a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM111.py.snap b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM111.py.snap index 31159b5430f51..c4e2e48998b93 100644 --- a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM111.py.snap +++ b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM110_SIM111.py.snap @@ -9,8 +9,8 @@ SIM111.py:3:5: SIM110 [*] Use `return any(check(x) for x in iterable)` instead o | _____^ 6 | | if check(x): 7 | | return True - | |_______________________^ SIM110 -8 | return False +8 | | return False + | |________________^ SIM110 | = help: Replace with `return any(check(x) for x in iterable)` @@ -34,8 +34,8 @@ SIM111.py:25:5: SIM110 [*] Use `return all(not check(x) for x in iterable)` inst | _____^ 28 | | if check(x): 29 | | return False - | |________________________^ SIM110 -30 | return True +30 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(not check(x) for x in iterable)` @@ -60,8 +60,8 @@ SIM111.py:33:5: SIM110 [*] Use `return all(x.is_empty() for x in iterable)` inst | _____^ 36 | | if not x.is_empty(): 37 | | return False - | |________________________^ SIM110 -38 | return True +38 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(x.is_empty() for x in iterable)` @@ -200,8 +200,8 @@ SIM111.py:124:5: SIM110 Use `return any(check(x) for x in iterable)` instead of | _____^ 127 | | if check(x): 128 | | return True - | |_______________________^ SIM110 -129 | return False +129 | | return False + | |________________^ SIM110 | = help: Replace with `return any(check(x) for x in iterable)` @@ -213,8 +213,8 @@ SIM111.py:134:5: SIM110 Use `return all(not check(x) for x in iterable)` instead | _____^ 137 | | if check(x): 138 | | return False - | |________________________^ SIM110 -139 | return True +139 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(not check(x) for x in iterable)` @@ -225,8 +225,8 @@ SIM111.py:144:5: SIM110 [*] Use `return any(check(x) for x in iterable)` instead | _____^ 146 | | if check(x): 147 | | return True - | |_______________________^ SIM110 -148 | return False +148 | | return False + | |________________^ SIM110 | = help: Replace with `return any(check(x) for x in iterable)` @@ -250,8 +250,8 @@ SIM111.py:154:5: SIM110 [*] Use `return all(not check(x) for x in iterable)` ins | _____^ 156 | | if check(x): 157 | | return False - | |________________________^ SIM110 -158 | return True +158 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(not check(x) for x in iterable)` @@ -276,8 +276,8 @@ SIM111.py:162:5: SIM110 [*] Use `return all(x in y for x in iterable)` instead o | _____^ 165 | | if x not in y: 166 | | return False - | |________________________^ SIM110 -167 | return True +167 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(x in y for x in iterable)` @@ -302,8 +302,8 @@ SIM111.py:170:5: SIM110 [*] Use `return all(x <= y for x in iterable)` instead o | _____^ 173 | | if x > y: 174 | | return False - | |________________________^ SIM110 -175 | return True +175 | | return True + | |_______________^ SIM110 | = help: Replace with `return all(x <= y for x in iterable)` @@ -316,5 +316,34 @@ SIM111.py:170:5: SIM110 [*] Use `return all(x <= y for x in iterable)` instead o 172 |- return False 173 |- return True 170 |+ return all(x <= y for x in iterable) +174 171 | +175 172 | +176 173 | def f(): + +SIM111.py:178:5: SIM110 [*] Use `return all(not x.isdigit() for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9")` instead of `for` loop + | +178 | def f(): +179 | # SIM111 +180 | for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9": + | _____^ +181 | | if x.isdigit(): +182 | | return False +183 | | return True + | |_______________^ SIM110 + | + = help: Replace with `return all(not x.isdigit() for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9")` + +β„Ή Suggested fix +175 175 | +176 176 | def f(): +177 177 | # SIM111 +178 |- for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9": +179 |- if x.isdigit(): +180 |- return False +181 |- return True + 178 |+ return all(not x.isdigit() for x in "012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9") +182 179 | +183 180 | +184 181 | def f(): diff --git a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM117_SIM117.py.snap b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM117_SIM117.py.snap index 0cb4bd04bf1f6..94ddb2c3261af 100644 --- a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM117_SIM117.py.snap +++ b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM117_SIM117.py.snap @@ -212,5 +212,41 @@ SIM117.py:84:1: SIM117 [*] Use a single `with` statement with multiple contexts 91 |- ): 92 |- print("hello") 89 |+ print("hello") +93 90 | +94 91 | # SIM117 (auto-fixable) +95 92 | with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as a: + +SIM117.py:95:1: SIM117 [*] Use a single `with` statement with multiple contexts instead of nested `with` statements + | +95 | # SIM117 (auto-fixable) +96 | / with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as a: +97 | | with B("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as b: + | |__________________________________________________^ SIM117 +98 | print("hello") + | + = help: Combine `with` statements + +β„Ή Suggested fix +92 92 | print("hello") +93 93 | +94 94 | # SIM117 (auto-fixable) +95 |-with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as a: +96 |- with B("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as b: +97 |- print("hello") + 95 |+with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as a, B("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as b: + 96 |+ print("hello") +98 97 | +99 98 | # SIM117 (not auto-fixable too long) +100 99 | with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ890") as a: + +SIM117.py:100:1: SIM117 Use a single `with` statement with multiple contexts instead of nested `with` statements + | +100 | # SIM117 (not auto-fixable too long) +101 | / with A("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ890") as a: +102 | | with B("01ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ8901ß9πŸ’£2ℝ89") as b: + | |__________________________________________________^ SIM117 +103 | print("hello") + | + = help: Combine `with` statements diff --git a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM401_SIM401.py.snap b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM401_SIM401.py.snap index 2c0b10f03d564..830ba3a2e7517 100644 --- a/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM401_SIM401.py.snap +++ b/crates/ruff/src/rules/flake8_simplify/snapshots/ruff__rules__flake8_simplify__tests__SIM401_SIM401.py.snap @@ -36,7 +36,7 @@ SIM401.py:12:1: SIM401 [*] Use `var = a_dict.get(key, "default2")` instead of an 16 | | var = a_dict[key] | |_____________________^ SIM401 17 | -18 | # SIM401 (default with a complex expression) +18 | # OK (default contains effect) | = help: Replace with `var = a_dict.get(key, "default2")` @@ -50,7 +50,7 @@ SIM401.py:12:1: SIM401 [*] Use `var = a_dict.get(key, "default2")` instead of an 15 |- var = a_dict[key] 12 |+var = a_dict.get(key, "default2") 16 13 | -17 14 | # SIM401 (default with a complex expression) +17 14 | # OK (default contains effect) 18 15 | if key in a_dict: SIM401.py:24:1: SIM401 [*] Use `var = a_dict.get(keys[idx], "default")` instead of an `if` block @@ -105,18 +105,18 @@ SIM401.py:30:1: SIM401 [*] Use `var = dicts[idx].get(key, "default")` instead of 35 32 | # SIM401 (complex expression in var) 36 33 | if key in a_dict: -SIM401.py:36:1: SIM401 [*] Use `vars[idx] = a_dict.get(key, "default")` instead of an `if` block +SIM401.py:36:1: SIM401 [*] Use `vars[idx] = a_dict.get(key, "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789")` instead of an `if` block | 36 | # SIM401 (complex expression in var) 37 | / if key in a_dict: 38 | | vars[idx] = a_dict[key] 39 | | else: -40 | | vars[idx] = "default" - | |_________________________^ SIM401 +40 | | vars[idx] = "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789" + | |___________________________________________________________________________^ SIM401 41 | 42 | ### | - = help: Replace with `vars[idx] = a_dict.get(key, "default")` + = help: Replace with `vars[idx] = a_dict.get(key, "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789")` β„Ή Suggested fix 33 33 | var = "default" @@ -125,8 +125,8 @@ SIM401.py:36:1: SIM401 [*] Use `vars[idx] = a_dict.get(key, "default")` instead 36 |-if key in a_dict: 37 |- vars[idx] = a_dict[key] 38 |-else: -39 |- vars[idx] = "default" - 36 |+vars[idx] = a_dict.get(key, "default") +39 |- vars[idx] = "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789" + 36 |+vars[idx] = a_dict.get(key, "defaultß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789ß9πŸ’£2ℝ6789") 40 37 | 41 38 | ### 42 39 | # Negative cases diff --git a/crates/ruff/src/rules/flake8_tidy_imports/banned_api.rs b/crates/ruff/src/rules/flake8_tidy_imports/banned_api.rs deleted file mode 100644 index bcaf261967bef..0000000000000 --- a/crates/ruff/src/rules/flake8_tidy_imports/banned_api.rs +++ /dev/null @@ -1,186 +0,0 @@ -use rustc_hash::FxHashMap; -use rustpython_parser::ast::{Expr, Ranged}; -use serde::{Deserialize, Serialize}; - -use ruff_diagnostics::{Diagnostic, Violation}; -use ruff_macros::{derive_message_formats, violation, CacheKey}; -use ruff_python_ast::call_path::from_qualified_name; - -use crate::checkers::ast::Checker; - -pub type Settings = FxHashMap; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)] -#[serde(deny_unknown_fields, rename_all = "kebab-case")] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub struct ApiBan { - /// The message to display when the API is used. - pub msg: String, -} - -/// ## What it does -/// Checks for banned imports. -/// -/// ## Why is this bad? -/// Projects may want to ensure that specific modules or module members are -/// not be imported or accessed. -/// -/// Security or other company policies may be a reason to impose -/// restrictions on importing external Python libraries. In some cases, -/// projects may adopt conventions around the use of certain modules or -/// module members that are not enforceable by the language itself. -/// -/// This rule enforces certain import conventions project-wide in an -/// automatic way. -/// -/// ## Options -/// - `flake8-tidy-imports.banned-api` -#[violation] -pub struct BannedApi { - name: String, - message: String, -} - -impl Violation for BannedApi { - #[derive_message_formats] - fn message(&self) -> String { - let BannedApi { name, message } = self; - format!("`{name}` is banned: {message}") - } -} - -/// TID251 -pub(crate) fn name_is_banned(checker: &mut Checker, name: String, located: &T) -where - T: Ranged, -{ - let banned_api = &checker.settings.flake8_tidy_imports.banned_api; - if let Some(ban) = banned_api.get(&name) { - checker.diagnostics.push(Diagnostic::new( - BannedApi { - name, - message: ban.msg.to_string(), - }, - located.range(), - )); - } -} - -/// TID251 -pub(crate) fn name_or_parent_is_banned(checker: &mut Checker, name: &str, located: &T) -where - T: Ranged, -{ - let banned_api = &checker.settings.flake8_tidy_imports.banned_api; - let mut name = name; - loop { - if let Some(ban) = banned_api.get(name) { - checker.diagnostics.push(Diagnostic::new( - BannedApi { - name: name.to_string(), - message: ban.msg.to_string(), - }, - located.range(), - )); - return; - } - match name.rfind('.') { - Some(idx) => { - name = &name[..idx]; - } - None => return, - } - } -} - -/// TID251 -pub(crate) fn banned_attribute_access(checker: &mut Checker, expr: &Expr) { - let banned_api = &checker.settings.flake8_tidy_imports.banned_api; - if let Some((banned_path, ban)) = checker.ctx.resolve_call_path(expr).and_then(|call_path| { - banned_api - .iter() - .find(|(banned_path, ..)| call_path == from_qualified_name(banned_path)) - }) { - checker.diagnostics.push(Diagnostic::new( - BannedApi { - name: banned_path.to_string(), - message: ban.msg.to_string(), - }, - expr.range(), - )); - } -} - -#[cfg(test)] -mod tests { - use std::path::Path; - - use anyhow::Result; - use rustc_hash::FxHashMap; - - use crate::assert_messages; - use crate::registry::Rule; - use crate::settings::Settings; - use crate::test::test_path; - - use super::ApiBan; - - #[test] - fn banned_api() -> Result<()> { - let diagnostics = test_path( - Path::new("flake8_tidy_imports/TID251.py"), - &Settings { - flake8_tidy_imports: super::super::Settings { - banned_api: FxHashMap::from_iter([ - ( - "cgi".to_string(), - ApiBan { - msg: "The cgi module is deprecated.".to_string(), - }, - ), - ( - "typing.TypedDict".to_string(), - ApiBan { - msg: "Use typing_extensions.TypedDict instead.".to_string(), - }, - ), - ]), - ..Default::default() - }, - ..Settings::for_rules(vec![Rule::BannedApi]) - }, - )?; - assert_messages!(diagnostics); - Ok(()) - } - - #[test] - fn banned_api_package() -> Result<()> { - let diagnostics = test_path( - Path::new("flake8_tidy_imports/TID/my_package/sublib/api/application.py"), - &Settings { - flake8_tidy_imports: super::super::Settings { - banned_api: FxHashMap::from_iter([ - ( - "attrs".to_string(), - ApiBan { - msg: "The attrs module is deprecated.".to_string(), - }, - ), - ( - "my_package.sublib.protocol".to_string(), - ApiBan { - msg: "The protocol module is deprecated.".to_string(), - }, - ), - ]), - ..Default::default() - }, - namespace_packages: vec![Path::new("my_package").to_path_buf()], - ..Settings::for_rules(vec![Rule::BannedApi]) - }, - )?; - assert_messages!(diagnostics); - Ok(()) - } -} diff --git a/crates/ruff/src/rules/flake8_tidy_imports/mod.rs b/crates/ruff/src/rules/flake8_tidy_imports/mod.rs index f9dc34868e1af..8686f3971c06c 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/mod.rs +++ b/crates/ruff/src/rules/flake8_tidy_imports/mod.rs @@ -1,13 +1,127 @@ //! Rules from [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/). -use ruff_macros::CacheKey; - pub mod options; +pub(crate) mod rules; +pub mod settings; + +#[cfg(test)] +mod tests { + use std::path::Path; + + use anyhow::Result; + use rustc_hash::FxHashMap; + + use crate::assert_messages; + use crate::registry::Rule; + use crate::rules::flake8_tidy_imports; + use crate::rules::flake8_tidy_imports::settings::{ApiBan, Strictness}; + use crate::settings::Settings; + use crate::test::test_path; + + #[test] + fn banned_api() -> Result<()> { + let diagnostics = test_path( + Path::new("flake8_tidy_imports/TID251.py"), + &Settings { + flake8_tidy_imports: flake8_tidy_imports::settings::Settings { + banned_api: FxHashMap::from_iter([ + ( + "cgi".to_string(), + ApiBan { + msg: "The cgi module is deprecated.".to_string(), + }, + ), + ( + "typing.TypedDict".to_string(), + ApiBan { + msg: "Use typing_extensions.TypedDict instead.".to_string(), + }, + ), + ]), + ..Default::default() + }, + ..Settings::for_rules(vec![Rule::BannedApi]) + }, + )?; + assert_messages!(diagnostics); + Ok(()) + } + + #[test] + fn banned_api_package() -> Result<()> { + let diagnostics = test_path( + Path::new("flake8_tidy_imports/TID/my_package/sublib/api/application.py"), + &Settings { + flake8_tidy_imports: flake8_tidy_imports::settings::Settings { + banned_api: FxHashMap::from_iter([ + ( + "attrs".to_string(), + ApiBan { + msg: "The attrs module is deprecated.".to_string(), + }, + ), + ( + "my_package.sublib.protocol".to_string(), + ApiBan { + msg: "The protocol module is deprecated.".to_string(), + }, + ), + ]), + ..Default::default() + }, + namespace_packages: vec![Path::new("my_package").to_path_buf()], + ..Settings::for_rules(vec![Rule::BannedApi]) + }, + )?; + assert_messages!(diagnostics); + Ok(()) + } + + #[test] + fn ban_parent_imports() -> Result<()> { + let diagnostics = test_path( + Path::new("flake8_tidy_imports/TID252.py"), + &Settings { + flake8_tidy_imports: flake8_tidy_imports::settings::Settings { + ban_relative_imports: Strictness::Parents, + ..Default::default() + }, + ..Settings::for_rules(vec![Rule::RelativeImports]) + }, + )?; + assert_messages!(diagnostics); + Ok(()) + } -pub mod banned_api; -pub mod relative_imports; + #[test] + fn ban_all_imports() -> Result<()> { + let diagnostics = test_path( + Path::new("flake8_tidy_imports/TID252.py"), + &Settings { + flake8_tidy_imports: flake8_tidy_imports::settings::Settings { + ban_relative_imports: Strictness::All, + ..Default::default() + }, + ..Settings::for_rules(vec![Rule::RelativeImports]) + }, + )?; + assert_messages!(diagnostics); + Ok(()) + } -#[derive(Debug, CacheKey, Default)] -pub struct Settings { - pub ban_relative_imports: relative_imports::Settings, - pub banned_api: banned_api::Settings, + #[test] + fn ban_parent_imports_package() -> Result<()> { + let diagnostics = test_path( + Path::new("flake8_tidy_imports/TID/my_package/sublib/api/application.py"), + &Settings { + flake8_tidy_imports: flake8_tidy_imports::settings::Settings { + ban_relative_imports: Strictness::Parents, + ..Default::default() + }, + namespace_packages: vec![Path::new("my_package").to_path_buf()], + ..Settings::for_rules(vec![Rule::RelativeImports]) + }, + )?; + assert_messages!(diagnostics); + Ok(()) + } } diff --git a/crates/ruff/src/rules/flake8_tidy_imports/options.rs b/crates/ruff/src/rules/flake8_tidy_imports/options.rs index f180974a49776..6f3eb99fcb631 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/options.rs +++ b/crates/ruff/src/rules/flake8_tidy_imports/options.rs @@ -5,9 +5,7 @@ use serde::{Deserialize, Serialize}; use ruff_macros::{CombineOptions, ConfigurationOptions}; -use super::banned_api::ApiBan; -use super::relative_imports::Strictness; -use super::Settings; +use super::settings::{ApiBan, Settings, Strictness}; #[derive( Debug, PartialEq, Eq, Serialize, Deserialize, Default, ConfigurationOptions, CombineOptions, diff --git a/crates/ruff/src/rules/flake8_tidy_imports/rules/banned_api.rs b/crates/ruff/src/rules/flake8_tidy_imports/rules/banned_api.rs new file mode 100644 index 0000000000000..668f764354de5 --- /dev/null +++ b/crates/ruff/src/rules/flake8_tidy_imports/rules/banned_api.rs @@ -0,0 +1,105 @@ +use rustpython_parser::ast::{Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::call_path::from_qualified_name; + +use crate::checkers::ast::Checker; + +/// ## What it does +/// Checks for banned imports. +/// +/// ## Why is this bad? +/// Projects may want to ensure that specific modules or module members are +/// not be imported or accessed. +/// +/// Security or other company policies may be a reason to impose +/// restrictions on importing external Python libraries. In some cases, +/// projects may adopt conventions around the use of certain modules or +/// module members that are not enforceable by the language itself. +/// +/// This rule enforces certain import conventions project-wide in an +/// automatic way. +/// +/// ## Options +/// - `flake8-tidy-imports.banned-api` +#[violation] +pub struct BannedApi { + name: String, + message: String, +} + +impl Violation for BannedApi { + #[derive_message_formats] + fn message(&self) -> String { + let BannedApi { name, message } = self; + format!("`{name}` is banned: {message}") + } +} + +/// TID251 +pub(crate) fn name_is_banned(checker: &mut Checker, name: String, located: &T) +where + T: Ranged, +{ + let banned_api = &checker.settings.flake8_tidy_imports.banned_api; + if let Some(ban) = banned_api.get(&name) { + checker.diagnostics.push(Diagnostic::new( + BannedApi { + name, + message: ban.msg.to_string(), + }, + located.range(), + )); + } +} + +/// TID251 +pub(crate) fn name_or_parent_is_banned(checker: &mut Checker, name: &str, located: &T) +where + T: Ranged, +{ + let banned_api = &checker.settings.flake8_tidy_imports.banned_api; + let mut name = name; + loop { + if let Some(ban) = banned_api.get(name) { + checker.diagnostics.push(Diagnostic::new( + BannedApi { + name: name.to_string(), + message: ban.msg.to_string(), + }, + located.range(), + )); + return; + } + match name.rfind('.') { + Some(idx) => { + name = &name[..idx]; + } + None => return, + } + } +} + +/// TID251 +pub(crate) fn banned_attribute_access(checker: &mut Checker, expr: &Expr) { + let banned_api = &checker.settings.flake8_tidy_imports.banned_api; + if let Some((banned_path, ban)) = + checker + .semantic_model() + .resolve_call_path(expr) + .and_then(|call_path| { + banned_api + .iter() + .find(|(banned_path, ..)| call_path == from_qualified_name(banned_path)) + }) + { + checker.diagnostics.push(Diagnostic::new( + BannedApi { + name: banned_path.to_string(), + message: ban.msg.to_string(), + }, + expr.range(), + )); + } +} diff --git a/crates/ruff/src/rules/flake8_tidy_imports/rules/mod.rs b/crates/ruff/src/rules/flake8_tidy_imports/rules/mod.rs new file mode 100644 index 0000000000000..8efbdfd6aed9c --- /dev/null +++ b/crates/ruff/src/rules/flake8_tidy_imports/rules/mod.rs @@ -0,0 +1,7 @@ +pub(crate) use banned_api::{ + banned_attribute_access, name_is_banned, name_or_parent_is_banned, BannedApi, +}; +pub(crate) use relative_imports::{banned_relative_import, RelativeImports}; + +mod banned_api; +mod relative_imports; diff --git a/crates/ruff/src/rules/flake8_tidy_imports/relative_imports.rs b/crates/ruff/src/rules/flake8_tidy_imports/rules/relative_imports.rs similarity index 62% rename from crates/ruff/src/rules/flake8_tidy_imports/relative_imports.rs rename to crates/ruff/src/rules/flake8_tidy_imports/rules/relative_imports.rs index 79ccdd8fdb394..9bf9bc41a8448 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/relative_imports.rs +++ b/crates/ruff/src/rules/flake8_tidy_imports/rules/relative_imports.rs @@ -1,28 +1,15 @@ use ruff_text_size::TextRange; use rustpython_parser::ast::{self, Int, Ranged, Stmt}; -use serde::{Deserialize, Serialize}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; -use ruff_macros::{derive_message_formats, violation, CacheKey}; +use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::helpers::resolve_imported_module_path; use ruff_python_ast::source_code::Generator; use ruff_python_stdlib::identifiers::is_identifier; use crate::checkers::ast::Checker; use crate::registry::AsRule; - -pub type Settings = Strictness; - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey, Default)] -#[serde(deny_unknown_fields, rename_all = "kebab-case")] -#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] -pub enum Strictness { - /// Ban imports that extend into the parent module or beyond. - #[default] - Parents, - /// Ban all relative imports. - All, -} +use crate::rules::flake8_tidy_imports::settings::Strictness; /// ## What it does /// Checks for relative imports. @@ -147,66 +134,3 @@ pub(crate) fn banned_relative_import( None } } - -#[cfg(test)] -mod tests { - use std::path::Path; - - use anyhow::Result; - - use crate::assert_messages; - use crate::registry::Rule; - use crate::settings::Settings; - use crate::test::test_path; - - use super::Strictness; - - #[test] - fn ban_parent_imports() -> Result<()> { - let diagnostics = test_path( - Path::new("flake8_tidy_imports/TID252.py"), - &Settings { - flake8_tidy_imports: super::super::Settings { - ban_relative_imports: Strictness::Parents, - ..Default::default() - }, - ..Settings::for_rules(vec![Rule::RelativeImports]) - }, - )?; - assert_messages!(diagnostics); - Ok(()) - } - - #[test] - fn ban_all_imports() -> Result<()> { - let diagnostics = test_path( - Path::new("flake8_tidy_imports/TID252.py"), - &Settings { - flake8_tidy_imports: super::super::Settings { - ban_relative_imports: Strictness::All, - ..Default::default() - }, - ..Settings::for_rules(vec![Rule::RelativeImports]) - }, - )?; - assert_messages!(diagnostics); - Ok(()) - } - - #[test] - fn ban_parent_imports_package() -> Result<()> { - let diagnostics = test_path( - Path::new("flake8_tidy_imports/TID/my_package/sublib/api/application.py"), - &Settings { - flake8_tidy_imports: super::super::Settings { - ban_relative_imports: Strictness::Parents, - ..Default::default() - }, - namespace_packages: vec![Path::new("my_package").to_path_buf()], - ..Settings::for_rules(vec![Rule::RelativeImports]) - }, - )?; - assert_messages!(diagnostics); - Ok(()) - } -} diff --git a/crates/ruff/src/rules/flake8_tidy_imports/settings.rs b/crates/ruff/src/rules/flake8_tidy_imports/settings.rs new file mode 100644 index 0000000000000..90b2843280f27 --- /dev/null +++ b/crates/ruff/src/rules/flake8_tidy_imports/settings.rs @@ -0,0 +1,29 @@ +use rustc_hash::FxHashMap; +use serde::{Deserialize, Serialize}; + +use ruff_macros::CacheKey; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)] +#[serde(deny_unknown_fields, rename_all = "kebab-case")] +#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] +pub struct ApiBan { + /// The message to display when the API is used. + pub msg: String, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey, Default)] +#[serde(deny_unknown_fields, rename_all = "kebab-case")] +#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] +pub enum Strictness { + /// Ban imports that extend into the parent module or beyond. + #[default] + Parents, + /// Ban all relative imports. + All, +} + +#[derive(Debug, CacheKey, Default)] +pub struct Settings { + pub ban_relative_imports: Strictness, + pub banned_api: FxHashMap, +} diff --git a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_all_imports.snap b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_all_imports.snap similarity index 98% rename from crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_all_imports.snap rename to crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_all_imports.snap index 30f082000aea7..d3b952fa33274 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_all_imports.snap +++ b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_all_imports.snap @@ -1,5 +1,5 @@ --- -source: crates/ruff/src/rules/flake8_tidy_imports/relative_imports.rs +source: crates/ruff/src/rules/flake8_tidy_imports/mod.rs --- TID252.py:7:1: TID252 Relative imports are banned | diff --git a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_parent_imports.snap b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_parent_imports.snap similarity index 98% rename from crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_parent_imports.snap rename to crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_parent_imports.snap index 05b6d835035ea..39c572fb0f31b 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_parent_imports.snap +++ b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_parent_imports.snap @@ -1,5 +1,5 @@ --- -source: crates/ruff/src/rules/flake8_tidy_imports/relative_imports.rs +source: crates/ruff/src/rules/flake8_tidy_imports/mod.rs --- TID252.py:9:1: TID252 Relative imports from parent modules are banned | diff --git a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_parent_imports_package.snap b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_parent_imports_package.snap similarity index 98% rename from crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_parent_imports_package.snap rename to crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_parent_imports_package.snap index 4d0d27311abe4..d3c132753c67f 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__relative_imports__tests__ban_parent_imports_package.snap +++ b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__ban_parent_imports_package.snap @@ -1,5 +1,5 @@ --- -source: crates/ruff/src/rules/flake8_tidy_imports/relative_imports.rs +source: crates/ruff/src/rules/flake8_tidy_imports/mod.rs --- application.py:5:1: TID252 Relative imports from parent modules are banned | diff --git a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__banned_api__tests__banned_api.snap b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__banned_api.snap similarity index 97% rename from crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__banned_api__tests__banned_api.snap rename to crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__banned_api.snap index 0fcbbaf94d1ea..442a0838f976e 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__banned_api__tests__banned_api.snap +++ b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__banned_api.snap @@ -1,5 +1,5 @@ --- -source: crates/ruff/src/rules/flake8_tidy_imports/banned_api.rs +source: crates/ruff/src/rules/flake8_tidy_imports/mod.rs --- TID251.py:2:8: TID251 `cgi` is banned: The cgi module is deprecated. | diff --git a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__banned_api__tests__banned_api_package.snap b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__banned_api_package.snap similarity index 92% rename from crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__banned_api__tests__banned_api_package.snap rename to crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__banned_api_package.snap index d11d98610f288..bceabe72a2d2b 100644 --- a/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__banned_api__tests__banned_api_package.snap +++ b/crates/ruff/src/rules/flake8_tidy_imports/snapshots/ruff__rules__flake8_tidy_imports__tests__banned_api_package.snap @@ -1,5 +1,5 @@ --- -source: crates/ruff/src/rules/flake8_tidy_imports/banned_api.rs +source: crates/ruff/src/rules/flake8_tidy_imports/mod.rs --- application.py:3:8: TID251 `attrs` is banned: The attrs module is deprecated. | diff --git a/crates/ruff/src/rules/flake8_todos/rules/mod.rs b/crates/ruff/src/rules/flake8_todos/rules/mod.rs new file mode 100644 index 0000000000000..dd10c6bc3ba80 --- /dev/null +++ b/crates/ruff/src/rules/flake8_todos/rules/mod.rs @@ -0,0 +1,6 @@ +pub(crate) use todos::{ + todos, InvalidTodoCapitalization, InvalidTodoTag, MissingSpaceAfterTodoColon, + MissingTodoAuthor, MissingTodoColon, MissingTodoDescription, MissingTodoLink, +}; + +mod todos; diff --git a/crates/ruff/src/rules/flake8_todos/rules.rs b/crates/ruff/src/rules/flake8_todos/rules/todos.rs similarity index 88% rename from crates/ruff/src/rules/flake8_todos/rules.rs rename to crates/ruff/src/rules/flake8_todos/rules/todos.rs index 126083635028a..2eda0f70696dd 100644 --- a/crates/ruff/src/rules/flake8_todos/rules.rs +++ b/crates/ruff/src/rules/flake8_todos/rules/todos.rs @@ -1,9 +1,7 @@ -use itertools::Itertools; use once_cell::sync::Lazy; use regex::RegexSet; +use ruff_python_ast::source_code::{Indexer, Locator}; use ruff_text_size::{TextLen, TextRange, TextSize}; -use rustpython_parser::lexer::LexResult; -use rustpython_parser::Tok; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; @@ -289,44 +287,52 @@ static ISSUE_LINK_REGEX_SET: Lazy = Lazy::new(|| { .unwrap() }); -pub(crate) fn todos(tokens: &[LexResult], settings: &Settings) -> Vec { +pub(crate) fn todos(indexer: &Indexer, locator: &Locator, settings: &Settings) -> Vec { let mut diagnostics: Vec = vec![]; - let mut iter = tokens.iter().flatten().multipeek(); - while let Some((token, token_range)) = iter.next() { - let Tok::Comment(comment) = token else { - continue; - }; + let mut iter = indexer.comment_ranges().iter().peekable(); + while let Some(comment_range) = iter.next() { + let comment = locator.slice(*comment_range); // Check that the comment is a TODO (properly formed or not). - let Some(tag) = detect_tag(comment, token_range.start()) else { + let Some(tag) = detect_tag(comment, comment_range.start()) else { continue; }; tag_errors(&tag, &mut diagnostics, settings); - static_errors(&mut diagnostics, comment, *token_range, &tag); + static_errors(&mut diagnostics, comment, *comment_range, &tag); // TD003 let mut has_issue_link = false; - while let Some((token, token_range)) = iter.peek() { - match token { - Tok::Comment(comment) => { - if detect_tag(comment, token_range.start()).is_some() { - break; - } - if ISSUE_LINK_REGEX_SET.is_match(comment) { - has_issue_link = true; - break; - } - } - Tok::Newline | Tok::NonLogicalNewline => { - continue; - } - _ => { - break; - } + let mut curr_range = comment_range; + while let Some(next_range) = iter.peek() { + // Ensure that next_comment_range is in the same multiline comment "block" as + // comment_range. + if !locator + .slice(TextRange::new(curr_range.end(), next_range.start())) + .chars() + .all(char::is_whitespace) + { + break; + } + + let next_comment = locator.slice(**next_range); + if detect_tag(next_comment, next_range.start()).is_some() { + break; } + + if ISSUE_LINK_REGEX_SET.is_match(next_comment) { + has_issue_link = true; + } + + // If the next_comment isn't a tag or an issue, it's worthles in the context of this + // linter. We can increment here instead of waiting for the next iteration of the outer + // loop. + // + // Unwrap is safe because peek() is Some() + curr_range = iter.next().unwrap(); } + if !has_issue_link { diagnostics.push(Diagnostic::new(MissingTodoLink, tag.range)); } @@ -400,6 +406,7 @@ fn static_errors( trimmed.text_len() } } else { + // TD-002 diagnostics.push(Diagnostic::new(MissingTodoAuthor, tag.range)); TextSize::new(0) @@ -411,15 +418,18 @@ fn static_errors( if let Some(stripped) = after_colon.strip_prefix(' ') { stripped } else { + // TD-007 diagnostics.push(Diagnostic::new(MissingSpaceAfterTodoColon, tag.range)); after_colon } } else { + // TD-004 diagnostics.push(Diagnostic::new(MissingTodoColon, tag.range)); "" }; if post_colon.is_empty() { + // TD-005 diagnostics.push(Diagnostic::new(MissingTodoDescription, tag.range)); } } diff --git a/crates/ruff/src/rules/flake8_type_checking/helpers.rs b/crates/ruff/src/rules/flake8_type_checking/helpers.rs index 317ec2f044754..736704766922c 100644 --- a/crates/ruff/src/rules/flake8_type_checking/helpers.rs +++ b/crates/ruff/src/rules/flake8_type_checking/helpers.rs @@ -3,12 +3,12 @@ use rustpython_parser::ast::{self, Constant, Expr}; use ruff_python_ast::call_path::from_qualified_name; use ruff_python_ast::helpers::map_callable; -use ruff_python_semantic::binding::{Binding, BindingKind, ExecutionContext}; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::binding::{Binding, BindingKind}; +use ruff_python_semantic::model::SemanticModel; use ruff_python_semantic::scope::ScopeKind; /// Return `true` if [`Expr`] is a guard for a type-checking block. -pub(crate) fn is_type_checking_block(context: &Context, test: &Expr) -> bool { +pub(crate) fn is_type_checking_block(semantic_model: &SemanticModel, test: &Expr) -> bool { // Ex) `if False:` if matches!( test, @@ -32,50 +32,60 @@ pub(crate) fn is_type_checking_block(context: &Context, test: &Expr) -> bool { } // Ex) `if typing.TYPE_CHECKING:` - if context.resolve_call_path(test).map_or(false, |call_path| { - call_path.as_slice() == ["typing", "TYPE_CHECKING"] - }) { + if semantic_model + .resolve_call_path(test) + .map_or(false, |call_path| { + call_path.as_slice() == ["typing", "TYPE_CHECKING"] + }) + { return true; } false } -pub(crate) const fn is_valid_runtime_import(binding: &Binding) -> bool { +pub(crate) fn is_valid_runtime_import(semantic_model: &SemanticModel, binding: &Binding) -> bool { if matches!( binding.kind, BindingKind::Importation(..) | BindingKind::FromImportation(..) | BindingKind::SubmoduleImportation(..) ) { - binding.runtime_usage.is_some() && matches!(binding.context, ExecutionContext::Runtime) + binding.context.is_runtime() + && binding.references().any(|reference_id| { + semantic_model + .references + .resolve(reference_id) + .context() + .is_runtime() + }) } else { false } } pub(crate) fn runtime_evaluated( - context: &Context, + semantic_model: &SemanticModel, base_classes: &[String], decorators: &[String], ) -> bool { if !base_classes.is_empty() { - if runtime_evaluated_base_class(context, base_classes) { + if runtime_evaluated_base_class(semantic_model, base_classes) { return true; } } if !decorators.is_empty() { - if runtime_evaluated_decorators(context, decorators) { + if runtime_evaluated_decorators(semantic_model, decorators) { return true; } } false } -fn runtime_evaluated_base_class(context: &Context, base_classes: &[String]) -> bool { - if let ScopeKind::Class(class_def) = &context.scope().kind { +fn runtime_evaluated_base_class(semantic_model: &SemanticModel, base_classes: &[String]) -> bool { + if let ScopeKind::Class(class_def) = &semantic_model.scope().kind { for base in class_def.bases.iter() { - if let Some(call_path) = context.resolve_call_path(base) { + if let Some(call_path) = semantic_model.resolve_call_path(base) { if base_classes .iter() .any(|base_class| from_qualified_name(base_class) == call_path) @@ -88,10 +98,10 @@ fn runtime_evaluated_base_class(context: &Context, base_classes: &[String]) -> b false } -fn runtime_evaluated_decorators(context: &Context, decorators: &[String]) -> bool { - if let ScopeKind::Class(class_def) = &context.scope().kind { +fn runtime_evaluated_decorators(semantic_model: &SemanticModel, decorators: &[String]) -> bool { + if let ScopeKind::Class(class_def) = &semantic_model.scope().kind { for decorator in class_def.decorator_list.iter() { - if let Some(call_path) = context.resolve_call_path(map_callable(decorator)) { + if let Some(call_path) = semantic_model.resolve_call_path(map_callable(decorator)) { if decorators .iter() .any(|decorator| from_qualified_name(decorator) == call_path) diff --git a/crates/ruff/src/rules/flake8_type_checking/mod.rs b/crates/ruff/src/rules/flake8_type_checking/mod.rs index cec52d2f45d9c..ff2b1c298c931 100644 --- a/crates/ruff/src/rules/flake8_type_checking/mod.rs +++ b/crates/ruff/src/rules/flake8_type_checking/mod.rs @@ -9,7 +9,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_type_checking/rules/empty_type_checking_block.rs b/crates/ruff/src/rules/flake8_type_checking/rules/empty_type_checking_block.rs index f78759d6516e3..cc8b29bb7dd78 100644 --- a/crates/ruff/src/rules/flake8_type_checking/rules/empty_type_checking_block.rs +++ b/crates/ruff/src/rules/flake8_type_checking/rules/empty_type_checking_block.rs @@ -60,7 +60,7 @@ pub(crate) fn empty_type_checking_block<'a, 'b>( // Delete the entire type-checking block. if checker.patch(diagnostic.kind.rule()) { - let parent = checker.ctx.stmts.parent(stmt); + let parent = checker.semantic_model().stmts.parent(stmt); let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); match delete_stmt( stmt, diff --git a/crates/ruff/src/rules/flake8_type_checking/rules/runtime_import_in_type_checking_block.rs b/crates/ruff/src/rules/flake8_type_checking/rules/runtime_import_in_type_checking_block.rs index 81eb309dc0074..95a2b6af956a1 100644 --- a/crates/ruff/src/rules/flake8_type_checking/rules/runtime_import_in_type_checking_block.rs +++ b/crates/ruff/src/rules/flake8_type_checking/rules/runtime_import_in_type_checking_block.rs @@ -1,8 +1,9 @@ use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_semantic::binding::{ - Binding, BindingKind, ExecutionContext, FromImportation, Importation, SubmoduleImportation, + Binding, BindingKind, FromImportation, Importation, SubmoduleImportation, }; +use ruff_python_semantic::model::SemanticModel; /// ## What it does /// Checks for runtime imports defined in a type-checking block. @@ -51,7 +52,10 @@ impl Violation for RuntimeImportInTypeCheckingBlock { } /// TCH004 -pub(crate) fn runtime_import_in_type_checking_block(binding: &Binding) -> Option { +pub(crate) fn runtime_import_in_type_checking_block( + binding: &Binding, + semantic_model: &SemanticModel, +) -> Option { let full_name = match &binding.kind { BindingKind::Importation(Importation { full_name, .. }) => full_name, BindingKind::FromImportation(FromImportation { full_name, .. }) => full_name.as_str(), @@ -59,7 +63,15 @@ pub(crate) fn runtime_import_in_type_checking_block(binding: &Binding) -> Option _ => return None, }; - if matches!(binding.context, ExecutionContext::Typing) && binding.runtime_usage.is_some() { + if binding.context.is_typing() + && binding.references().any(|reference_id| { + semantic_model + .references + .resolve(reference_id) + .context() + .is_runtime() + }) + { Some(Diagnostic::new( RuntimeImportInTypeCheckingBlock { full_name: full_name.to_string(), diff --git a/crates/ruff/src/rules/flake8_type_checking/rules/typing_only_runtime_import.rs b/crates/ruff/src/rules/flake8_type_checking/rules/typing_only_runtime_import.rs index 64b01fe62870d..df32a2dcbb352 100644 --- a/crates/ruff/src/rules/flake8_type_checking/rules/typing_only_runtime_import.rs +++ b/crates/ruff/src/rules/flake8_type_checking/rules/typing_only_runtime_import.rs @@ -3,8 +3,9 @@ use std::path::Path; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_semantic::binding::{ - Binding, BindingKind, ExecutionContext, FromImportation, Importation, SubmoduleImportation, + Binding, BindingKind, FromImportation, Importation, SubmoduleImportation, }; +use ruff_python_semantic::model::SemanticModel; use crate::rules::isort::{categorize, ImportSection, ImportType}; use crate::settings::Settings; @@ -243,6 +244,7 @@ fn is_exempt(name: &str, exempt_modules: &[&str]) -> bool { pub(crate) fn typing_only_runtime_import( binding: &Binding, runtime_imports: &[&Binding], + semantic_model: &SemanticModel, package: Option<&Path>, settings: &Settings, ) -> Option { @@ -275,10 +277,15 @@ pub(crate) fn typing_only_runtime_import( return None; } - if matches!(binding.context, ExecutionContext::Runtime) - && binding.typing_usage.is_some() - && binding.runtime_usage.is_none() - && binding.synthetic_usage.is_none() + if binding.context.is_runtime() + && binding.is_used() + && binding.references().all(|reference_id| { + semantic_model + .references + .resolve(reference_id) + .context() + .is_typing() + }) { // Extract the module base and level from the full name. // Ex) `foo.bar.baz` -> `foo`, `0` diff --git a/crates/ruff/src/rules/flake8_unused_arguments/mod.rs b/crates/ruff/src/rules/flake8_unused_arguments/mod.rs index 1739430a9ab96..528a001a69316 100644 --- a/crates/ruff/src/rules/flake8_unused_arguments/mod.rs +++ b/crates/ruff/src/rules/flake8_unused_arguments/mod.rs @@ -2,14 +2,12 @@ mod helpers; pub(crate) mod rules; pub mod settings; -mod types; #[cfg(test)] mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/flake8_unused_arguments/rules/mod.rs b/crates/ruff/src/rules/flake8_unused_arguments/rules/mod.rs new file mode 100644 index 0000000000000..5cf115d8bf1f1 --- /dev/null +++ b/crates/ruff/src/rules/flake8_unused_arguments/rules/mod.rs @@ -0,0 +1,6 @@ +pub(crate) use unused_arguments::{ + unused_arguments, UnusedClassMethodArgument, UnusedFunctionArgument, UnusedLambdaArgument, + UnusedMethodArgument, UnusedStaticMethodArgument, +}; + +mod unused_arguments; diff --git a/crates/ruff/src/rules/flake8_unused_arguments/rules.rs b/crates/ruff/src/rules/flake8_unused_arguments/rules/unused_arguments.rs similarity index 81% rename from crates/ruff/src/rules/flake8_unused_arguments/rules.rs rename to crates/ruff/src/rules/flake8_unused_arguments/rules/unused_arguments.rs index 65a94ebf51973..c5e339e65cdf1 100644 --- a/crates/ruff/src/rules/flake8_unused_arguments/rules.rs +++ b/crates/ruff/src/rules/flake8_unused_arguments/rules/unused_arguments.rs @@ -3,6 +3,7 @@ use std::iter; use regex::Regex; use rustpython_parser::ast::{Arg, Arguments}; +use ruff_diagnostics::DiagnosticKind; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_semantic::analyze::function_type; @@ -11,10 +12,42 @@ use ruff_python_semantic::analyze::visibility; use ruff_python_semantic::binding::Bindings; use ruff_python_semantic::scope::{FunctionDef, Lambda, Scope, ScopeKind}; +use super::super::helpers; + use crate::checkers::ast::Checker; +use crate::registry::Rule; + +/// An AST node that can contain arguments. +#[derive(Copy, Clone)] +enum Argumentable { + Function, + Method, + ClassMethod, + StaticMethod, + Lambda, +} -use super::helpers; -use super::types::Argumentable; +impl Argumentable { + pub(crate) fn check_for(self, name: String) -> DiagnosticKind { + match self { + Self::Function => UnusedFunctionArgument { name }.into(), + Self::Method => UnusedMethodArgument { name }.into(), + Self::ClassMethod => UnusedClassMethodArgument { name }.into(), + Self::StaticMethod => UnusedStaticMethodArgument { name }.into(), + Self::Lambda => UnusedLambdaArgument { name }.into(), + } + } + + pub(crate) const fn rule_code(self) -> Rule { + match self { + Self::Function => Rule::UnusedFunctionArgument, + Self::Method => Rule::UnusedMethodArgument, + Self::ClassMethod => Rule::UnusedClassMethodArgument, + Self::StaticMethod => Rule::UnusedStaticMethodArgument, + Self::Lambda => Rule::UnusedLambdaArgument, + } + } +} /// ## What it does /// Checks for the presence of unused arguments in function definitions. @@ -258,9 +291,12 @@ fn call<'a>( ) -> Vec { let mut diagnostics: Vec = vec![]; for arg in args { - if let Some(binding) = values.get(arg.arg.as_str()).map(|index| &bindings[*index]) { - if !binding.used() - && binding.kind.is_argument() + if let Some(binding) = values + .get(arg.arg.as_str()) + .map(|binding_id| &bindings[binding_id]) + { + if binding.kind.is_argument() + && !binding.is_used() && !dummy_variable_rgx.is_match(arg.arg.as_str()) { diagnostics.push(Diagnostic::new( @@ -289,7 +325,7 @@ pub(crate) fn unused_arguments( .. }) => { match function_type::classify( - &checker.ctx, + checker.semantic_model(), parent, name, decorator_list, @@ -297,11 +333,8 @@ pub(crate) fn unused_arguments( &checker.settings.pep8_naming.staticmethod_decorators, ) { FunctionType::Function => { - if checker - .settings - .rules - .enabled(Argumentable::Function.rule_code()) - && !visibility::is_overload(&checker.ctx, decorator_list) + if checker.enabled(Argumentable::Function.rule_code()) + && !visibility::is_overload(checker.semantic_model(), decorator_list) { function( Argumentable::Function, @@ -319,18 +352,15 @@ pub(crate) fn unused_arguments( } } FunctionType::Method => { - if checker - .settings - .rules - .enabled(Argumentable::Method.rule_code()) + if checker.enabled(Argumentable::Method.rule_code()) && !helpers::is_empty(body) && (!visibility::is_magic(name) || visibility::is_init(name) || visibility::is_new(name) || visibility::is_call(name)) - && !visibility::is_abstract(&checker.ctx, decorator_list) - && !visibility::is_override(&checker.ctx, decorator_list) - && !visibility::is_overload(&checker.ctx, decorator_list) + && !visibility::is_abstract(checker.semantic_model(), decorator_list) + && !visibility::is_override(checker.semantic_model(), decorator_list) + && !visibility::is_overload(checker.semantic_model(), decorator_list) { method( Argumentable::Method, @@ -348,18 +378,15 @@ pub(crate) fn unused_arguments( } } FunctionType::ClassMethod => { - if checker - .settings - .rules - .enabled(Argumentable::ClassMethod.rule_code()) + if checker.enabled(Argumentable::ClassMethod.rule_code()) && !helpers::is_empty(body) && (!visibility::is_magic(name) || visibility::is_init(name) || visibility::is_new(name) || visibility::is_call(name)) - && !visibility::is_abstract(&checker.ctx, decorator_list) - && !visibility::is_override(&checker.ctx, decorator_list) - && !visibility::is_overload(&checker.ctx, decorator_list) + && !visibility::is_abstract(checker.semantic_model(), decorator_list) + && !visibility::is_override(checker.semantic_model(), decorator_list) + && !visibility::is_overload(checker.semantic_model(), decorator_list) { method( Argumentable::ClassMethod, @@ -377,18 +404,15 @@ pub(crate) fn unused_arguments( } } FunctionType::StaticMethod => { - if checker - .settings - .rules - .enabled(Argumentable::StaticMethod.rule_code()) + if checker.enabled(Argumentable::StaticMethod.rule_code()) && !helpers::is_empty(body) && (!visibility::is_magic(name) || visibility::is_init(name) || visibility::is_new(name) || visibility::is_call(name)) - && !visibility::is_abstract(&checker.ctx, decorator_list) - && !visibility::is_override(&checker.ctx, decorator_list) - && !visibility::is_overload(&checker.ctx, decorator_list) + && !visibility::is_abstract(checker.semantic_model(), decorator_list) + && !visibility::is_override(checker.semantic_model(), decorator_list) + && !visibility::is_overload(checker.semantic_model(), decorator_list) { function( Argumentable::StaticMethod, @@ -408,11 +432,7 @@ pub(crate) fn unused_arguments( } } ScopeKind::Lambda(Lambda { args, .. }) => { - if checker - .settings - .rules - .enabled(Argumentable::Lambda.rule_code()) - { + if checker.enabled(Argumentable::Lambda.rule_code()) { function( Argumentable::Lambda, args, diff --git a/crates/ruff/src/rules/flake8_unused_arguments/types.rs b/crates/ruff/src/rules/flake8_unused_arguments/types.rs deleted file mode 100644 index 778e53c556b6a..0000000000000 --- a/crates/ruff/src/rules/flake8_unused_arguments/types.rs +++ /dev/null @@ -1,37 +0,0 @@ -use ruff_diagnostics::DiagnosticKind; - -use crate::registry::Rule; - -use super::rules; - -/// An AST node that can contain arguments. -#[derive(Copy, Clone)] -pub(crate) enum Argumentable { - Function, - Method, - ClassMethod, - StaticMethod, - Lambda, -} - -impl Argumentable { - pub(crate) fn check_for(self, name: String) -> DiagnosticKind { - match self { - Self::Function => rules::UnusedFunctionArgument { name }.into(), - Self::Method => rules::UnusedMethodArgument { name }.into(), - Self::ClassMethod => rules::UnusedClassMethodArgument { name }.into(), - Self::StaticMethod => rules::UnusedStaticMethodArgument { name }.into(), - Self::Lambda => rules::UnusedLambdaArgument { name }.into(), - } - } - - pub(crate) const fn rule_code(self) -> Rule { - match self { - Self::Function => Rule::UnusedFunctionArgument, - Self::Method => Rule::UnusedMethodArgument, - Self::ClassMethod => Rule::UnusedClassMethodArgument, - Self::StaticMethod => Rule::UnusedStaticMethodArgument, - Self::Lambda => Rule::UnusedLambdaArgument, - } - } -} diff --git a/crates/ruff/src/rules/flake8_use_pathlib/mod.rs b/crates/ruff/src/rules/flake8_use_pathlib/mod.rs index dc2f6ba566668..99f4aca16ce23 100644 --- a/crates/ruff/src/rules/flake8_use_pathlib/mod.rs +++ b/crates/ruff/src/rules/flake8_use_pathlib/mod.rs @@ -1,5 +1,5 @@ //! Rules from [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/). -pub(crate) mod helpers; +pub(crate) mod rules; pub(crate) mod violations; #[cfg(test)] diff --git a/crates/ruff/src/rules/flake8_use_pathlib/rules/mod.rs b/crates/ruff/src/rules/flake8_use_pathlib/rules/mod.rs new file mode 100644 index 0000000000000..33eac782e4601 --- /dev/null +++ b/crates/ruff/src/rules/flake8_use_pathlib/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use replaceable_by_pathlib::replaceable_by_pathlib; + +mod replaceable_by_pathlib; diff --git a/crates/ruff/src/rules/flake8_use_pathlib/helpers.rs b/crates/ruff/src/rules/flake8_use_pathlib/rules/replaceable_by_pathlib.rs similarity index 97% rename from crates/ruff/src/rules/flake8_use_pathlib/helpers.rs rename to crates/ruff/src/rules/flake8_use_pathlib/rules/replaceable_by_pathlib.rs index 5978d2dfc6235..e918123ac177c 100644 --- a/crates/ruff/src/rules/flake8_use_pathlib/helpers.rs +++ b/crates/ruff/src/rules/flake8_use_pathlib/rules/replaceable_by_pathlib.rs @@ -1,5 +1,7 @@ use rustpython_parser::ast::{Expr, Ranged}; +use ruff_diagnostics::{Diagnostic, DiagnosticKind}; + use crate::checkers::ast::Checker; use crate::registry::AsRule; use crate::rules::flake8_use_pathlib::violations::{ @@ -9,12 +11,11 @@ use crate::rules::flake8_use_pathlib::violations::{ OsRmdir, OsStat, OsUnlink, PathlibReplace, PyPath, }; use crate::settings::types::PythonVersion; -use ruff_diagnostics::{Diagnostic, DiagnosticKind}; pub(crate) fn replaceable_by_pathlib(checker: &mut Checker, expr: &Expr) { if let Some(diagnostic_kind) = checker - .ctx + .semantic_model() .resolve_call_path(expr) .and_then(|call_path| match call_path.as_slice() { ["os", "path", "abspath"] => Some(OsPathAbspath.into()), @@ -51,7 +52,7 @@ pub(crate) fn replaceable_by_pathlib(checker: &mut Checker, expr: &Expr) { { let diagnostic = Diagnostic::new::(diagnostic_kind, expr.range()); - if checker.settings.rules.enabled(diagnostic.kind.rule()) { + if checker.enabled(diagnostic.kind.rule()) { checker.diagnostics.push(diagnostic); } } diff --git a/crates/ruff/src/rules/flynt/rules/mod.rs b/crates/ruff/src/rules/flynt/rules/mod.rs index d3bd2e2746009..ce0b9462d81fc 100644 --- a/crates/ruff/src/rules/flynt/rules/mod.rs +++ b/crates/ruff/src/rules/flynt/rules/mod.rs @@ -1,3 +1,3 @@ -mod static_join_to_fstring; - pub(crate) use static_join_to_fstring::{static_join_to_fstring, StaticJoinToFString}; + +mod static_join_to_fstring; diff --git a/crates/ruff/src/rules/isort/track.rs b/crates/ruff/src/rules/isort/block.rs similarity index 94% rename from crates/ruff/src/rules/isort/track.rs rename to crates/ruff/src/rules/isort/block.rs index 77570563082f5..f01511a154bec 100644 --- a/crates/ruff/src/rules/isort/track.rs +++ b/crates/ruff/src/rules/isort/block.rs @@ -7,21 +7,24 @@ use ruff_python_ast::statement_visitor::StatementVisitor; use crate::directives::IsortDirectives; use crate::rules::isort::helpers; +/// A block of imports within a Python module. +#[derive(Debug, Default)] +pub(crate) struct Block<'a> { + pub(crate) nested: bool, + pub(crate) imports: Vec<&'a Stmt>, + pub(crate) trailer: Option, +} + +/// The type of trailer that should follow an import block. #[derive(Debug, Copy, Clone)] -pub enum Trailer { +pub(crate) enum Trailer { Sibling, ClassDef, FunctionDef, } -#[derive(Debug, Default)] -pub struct Block<'a> { - pub nested: bool, - pub imports: Vec<&'a Stmt>, - pub trailer: Option, -} - -pub(crate) struct ImportTracker<'a> { +/// A builder for identifying and constructing import blocks within a Python module. +pub(crate) struct BlockBuilder<'a> { locator: &'a Locator<'a>, is_stub: bool, blocks: Vec>, @@ -30,7 +33,7 @@ pub(crate) struct ImportTracker<'a> { nested: bool, } -impl<'a> ImportTracker<'a> { +impl<'a> BlockBuilder<'a> { pub(crate) fn new( locator: &'a Locator<'a>, directives: &'a IsortDirectives, @@ -111,14 +114,14 @@ impl<'a> ImportTracker<'a> { } } -impl<'a, 'b> StatementVisitor<'b> for ImportTracker<'a> +impl<'a, 'b> StatementVisitor<'b> for BlockBuilder<'a> where 'b: 'a, { fn visit_stmt(&mut self, stmt: &'b Stmt) { // Track manual splits. for (index, split) in self.splits.iter().enumerate() { - if stmt.end() >= *split { + if stmt.start() >= *split { self.finalize(self.trailer_for(stmt)); self.splits = &self.splits[index + 1..]; } else { diff --git a/crates/ruff/src/rules/isort/categorize.rs b/crates/ruff/src/rules/isort/categorize.rs index 3dc3a01b427f8..ac6348476d860 100644 --- a/crates/ruff/src/rules/isort/categorize.rs +++ b/crates/ruff/src/rules/isort/categorize.rs @@ -63,7 +63,7 @@ enum Reason<'a> { } #[allow(clippy::too_many_arguments)] -pub fn categorize<'a>( +pub(crate) fn categorize<'a>( module_name: &str, level: Option, src: &[PathBuf], @@ -220,7 +220,7 @@ pub struct KnownModules { } impl KnownModules { - pub fn new( + pub(crate) fn new( first_party: Vec, third_party: Vec, local_folder: Vec, @@ -318,7 +318,7 @@ impl KnownModules { } /// Return the list of modules that are known to be of a given type. - pub fn modules_for_known_type(&self, import_type: ImportType) -> Vec { + pub(crate) fn modules_for_known_type(&self, import_type: ImportType) -> Vec { self.known .iter() .filter_map(|(module, known_section)| { @@ -336,7 +336,7 @@ impl KnownModules { } /// Return the list of user-defined modules, indexed by section. - pub fn user_defined(&self) -> FxHashMap> { + pub(crate) fn user_defined(&self) -> FxHashMap> { let mut user_defined: FxHashMap> = FxHashMap::default(); for (module, section) in &self.known { if let ImportSection::UserDefined(section_name) = section { diff --git a/crates/ruff/src/rules/isort/comments.rs b/crates/ruff/src/rules/isort/comments.rs index 7b4d7417d8c72..ee1c4d9f7ffa3 100644 --- a/crates/ruff/src/rules/isort/comments.rs +++ b/crates/ruff/src/rules/isort/comments.rs @@ -1,28 +1,24 @@ -use ruff_text_size::{TextRange, TextSize}; use std::borrow::Cow; +use ruff_text_size::{TextRange, TextSize}; use rustpython_parser::{lexer, Mode, Tok}; use ruff_python_ast::source_code::Locator; #[derive(Debug)] -pub struct Comment<'a> { - pub value: Cow<'a, str>, - pub range: TextRange, +pub(crate) struct Comment<'a> { + pub(crate) value: Cow<'a, str>, + pub(crate) range: TextRange, } impl Comment<'_> { - pub const fn start(&self) -> TextSize { + pub(crate) const fn start(&self) -> TextSize { self.range.start() } - pub const fn end(&self) -> TextSize { + pub(crate) const fn end(&self) -> TextSize { self.range.end() } - - pub const fn range(&self) -> TextRange { - self.range - } } /// Collect all comments in an import block. diff --git a/crates/ruff/src/rules/isort/format.rs b/crates/ruff/src/rules/isort/format.rs index 4304d86f5dd32..ece938b2ac9cc 100644 --- a/crates/ruff/src/rules/isort/format.rs +++ b/crates/ruff/src/rules/isort/format.rs @@ -1,5 +1,6 @@ use ruff_python_ast::source_code::Stylist; -use unicode_width::UnicodeWidthStr; + +use crate::line_width::{LineLength, LineWidth}; use super::types::{AliasData, CommentSet, ImportFromData, Importable}; @@ -44,7 +45,8 @@ pub(crate) fn format_import_from( import_from: &ImportFromData, comments: &CommentSet, aliases: &[(AliasData, CommentSet)], - line_length: usize, + line_length: LineLength, + indentation_width: LineWidth, stylist: &Stylist, force_wrap_aliases: bool, is_first: bool, @@ -55,8 +57,14 @@ pub(crate) fn format_import_from( .iter() .all(|(alias, _)| alias.name == "*" && alias.asname.is_none()) { - let (single_line, ..) = - format_single_line(import_from, comments, aliases, is_first, stylist); + let (single_line, ..) = format_single_line( + import_from, + comments, + aliases, + is_first, + stylist, + indentation_width, + ); return single_line; } @@ -70,8 +78,14 @@ pub(crate) fn format_import_from( || aliases.len() == 1 || aliases.iter().all(|(alias, _)| alias.asname.is_none())) { - let (single_line, import_width) = - format_single_line(import_from, comments, aliases, is_first, stylist); + let (single_line, import_width) = format_single_line( + import_from, + comments, + aliases, + is_first, + stylist, + indentation_width, + ); if import_width <= line_length || aliases.iter().any(|(alias, _)| alias.name == "*") { return single_line; } @@ -89,9 +103,10 @@ fn format_single_line( aliases: &[(AliasData, CommentSet)], is_first: bool, stylist: &Stylist, -) -> (String, usize) { + indentation_width: LineWidth, +) -> (String, LineWidth) { let mut output = String::with_capacity(CAPACITY); - let mut line_width = 0; + let mut line_width = indentation_width; if !is_first && !comments.atop.is_empty() { output.push_str(&stylist.line_ending()); @@ -105,28 +120,28 @@ fn format_single_line( output.push_str("from "); output.push_str(&module_name); output.push_str(" import "); - line_width += 5 + module_name.width() + 8; + line_width = line_width.add_width(5).add_str(&module_name).add_width(8); for (index, (AliasData { name, asname }, comments)) in aliases.iter().enumerate() { if let Some(asname) = asname { output.push_str(name); output.push_str(" as "); output.push_str(asname); - line_width += name.width() + 4 + asname.width(); + line_width = line_width.add_str(name).add_width(4).add_str(asname); } else { output.push_str(name); - line_width += name.width(); + line_width = line_width.add_str(name); } if index < aliases.len() - 1 { output.push_str(", "); - line_width += 2; + line_width = line_width.add_width(2); } for comment in &comments.inline { output.push(' '); output.push(' '); output.push_str(comment); - line_width += 2 + comment.width(); + line_width = line_width.add_width(2).add_str(comment); } } @@ -134,7 +149,7 @@ fn format_single_line( output.push(' '); output.push(' '); output.push_str(comment); - line_width += 2 + comment.width(); + line_width = line_width.add_width(2).add_str(comment); } output.push_str(&stylist.line_ending()); diff --git a/crates/ruff/src/rules/isort/mod.rs b/crates/ruff/src/rules/isort/mod.rs index ad72fdbc2b678..251b69f6e48dd 100644 --- a/crates/ruff/src/rules/isort/mod.rs +++ b/crates/ruff/src/rules/isort/mod.rs @@ -4,23 +4,26 @@ use std::collections::BTreeSet; use std::path::{Path, PathBuf}; use annotate::annotate_imports; +use block::{Block, Trailer}; +pub(crate) use categorize::categorize; use categorize::categorize_imports; -pub use categorize::{categorize, ImportSection, ImportType}; +pub use categorize::{ImportSection, ImportType}; use comments::Comment; use normalize::normalize_imports; use order::order_imports; use ruff_python_ast::source_code::{Locator, Stylist}; use settings::RelativeImportsOrder; use sorting::cmp_either_import; -use track::{Block, Trailer}; use types::EitherImport::{Import, ImportFrom}; use types::{AliasData, EitherImport, TrailingComma}; +use crate::line_width::{LineLength, LineWidth}; use crate::rules::isort::categorize::KnownModules; use crate::rules::isort::types::ImportBlock; use crate::settings::types::PythonVersion; mod annotate; +pub(crate) mod block; mod categorize; mod comments; mod format; @@ -31,19 +34,18 @@ pub(crate) mod rules; pub mod settings; mod sorting; mod split; -pub(crate) mod track; mod types; #[derive(Debug)] -pub struct AnnotatedAliasData<'a> { - pub name: &'a str, - pub asname: Option<&'a str>, - pub atop: Vec>, - pub inline: Vec>, +pub(crate) struct AnnotatedAliasData<'a> { + pub(crate) name: &'a str, + pub(crate) asname: Option<&'a str>, + pub(crate) atop: Vec>, + pub(crate) inline: Vec>, } #[derive(Debug)] -pub enum AnnotatedImport<'a> { +pub(crate) enum AnnotatedImport<'a> { Import { names: Vec>, atop: Vec>, @@ -60,11 +62,12 @@ pub enum AnnotatedImport<'a> { } #[allow(clippy::too_many_arguments, clippy::fn_params_excessive_bools)] -pub fn format_imports( +pub(crate) fn format_imports( block: &Block, comments: Vec, locator: &Locator, - line_length: usize, + line_length: LineLength, + indentation_width: LineWidth, stylist: &Stylist, src: &[PathBuf], package: Option<&Path>, @@ -106,6 +109,7 @@ pub fn format_imports( let block_output = format_import_block( block, line_length, + indentation_width, stylist, src, package, @@ -161,7 +165,8 @@ pub fn format_imports( #[allow(clippy::too_many_arguments, clippy::fn_params_excessive_bools)] fn format_import_block( block: ImportBlock, - line_length: usize, + line_length: LineLength, + indentation_width: LineWidth, stylist: &Stylist, src: &[PathBuf], package: Option<&Path>, @@ -263,6 +268,7 @@ fn format_import_block( &comments, &aliases, line_length, + indentation_width, stylist, force_wrap_aliases, is_first_statement, @@ -282,12 +288,11 @@ mod tests { use std::path::Path; use anyhow::Result; - - use crate::message::Message; use rustc_hash::FxHashMap; use test_case::test_case; use crate::assert_messages; + use crate::message::Message; use crate::registry::Rule; use crate::rules::isort::categorize::{ImportSection, KnownModules}; use crate::settings::Settings; @@ -687,11 +692,16 @@ mod tests { Ok(()) } + #[test_case(Path::new("comment.py"))] #[test_case(Path::new("docstring.py"))] #[test_case(Path::new("docstring.pyi"))] #[test_case(Path::new("docstring_only.py"))] - #[test_case(Path::new("multiline_docstring.py"))] + #[test_case(Path::new("docstring_with_continuation.py"))] + #[test_case(Path::new("docstring_with_semicolon.py"))] #[test_case(Path::new("empty.py"))] + #[test_case(Path::new("existing_import.py"))] + #[test_case(Path::new("multiline_docstring.py"))] + #[test_case(Path::new("off.py"))] fn required_import(path: &Path) -> Result<()> { let snapshot = format!("required_import_{}", path.to_string_lossy()); let diagnostics = test_path( diff --git a/crates/ruff/src/rules/isort/normalize.rs b/crates/ruff/src/rules/isort/normalize.rs index b626d03ededc4..1c0e4312edfdd 100644 --- a/crates/ruff/src/rules/isort/normalize.rs +++ b/crates/ruff/src/rules/isort/normalize.rs @@ -1,6 +1,7 @@ -use crate::rules::isort::types::TrailingComma; use std::collections::BTreeSet; +use crate::rules::isort::types::TrailingComma; + use super::types::{AliasData, ImportBlock, ImportFromData}; use super::AnnotatedImport; diff --git a/crates/ruff/src/rules/isort/rules/add_required_imports.rs b/crates/ruff/src/rules/isort/rules/add_required_imports.rs index 9658b0c26e42b..08f56d2c318cc 100644 --- a/crates/ruff/src/rules/isort/rules/add_required_imports.rs +++ b/crates/ruff/src/rules/isort/rules/add_required_imports.rs @@ -11,7 +11,6 @@ use ruff_python_ast::source_code::{Locator, Stylist}; use crate::importer::Importer; use crate::registry::Rule; -use crate::rules::isort::track::Block; use crate::settings::Settings; /// ## What it does @@ -53,58 +52,48 @@ impl AlwaysAutofixableViolation for MissingRequiredImport { } } -fn contains(block: &Block, required_import: &AnyImport) -> bool { - block.imports.iter().any(|import| match required_import { - AnyImport::Import(required_import) => { +/// Return `true` if the [`Stmt`] includes the given [`AnyImport`]. +fn includes_import(stmt: &Stmt, target: &AnyImport) -> bool { + match target { + AnyImport::Import(target) => { let Stmt::Import(ast::StmtImport { names, range: _, - }) = &import else { + }) = &stmt else { return false; }; names.iter().any(|alias| { - &alias.name == required_import.name.name - && alias.asname.as_deref() == required_import.name.as_name + &alias.name == target.name.name && alias.asname.as_deref() == target.name.as_name }) } - AnyImport::ImportFrom(required_import) => { + AnyImport::ImportFrom(target) => { let Stmt::ImportFrom(ast::StmtImportFrom { module, names, level, range: _, - }) = &import else { + }) = &stmt else { return false; }; - module.as_deref() == required_import.module - && level.map(|level| level.to_u32()) == required_import.level + module.as_deref() == target.module + && level.map(|level| level.to_u32()) == target.level && names.iter().any(|alias| { - &alias.name == required_import.name.name - && alias.asname.as_deref() == required_import.name.as_name + &alias.name == target.name.name + && alias.asname.as_deref() == target.name.as_name }) } - }) + } } #[allow(clippy::too_many_arguments)] fn add_required_import( required_import: &AnyImport, - blocks: &[&Block], python_ast: &Suite, locator: &Locator, stylist: &Stylist, settings: &Settings, is_stub: bool, ) -> Option { - // If the import is already present in a top-level block, don't add it. - if blocks - .iter() - .filter(|block| !block.nested) - .any(|block| contains(block, required_import)) - { - return None; - } - // Don't add imports to semantically-empty files. if python_ast.iter().all(is_docstring_stmt) { return None; @@ -115,6 +104,14 @@ fn add_required_import( return None; } + // If the import is already present in a top-level block, don't add it. + if python_ast + .iter() + .any(|stmt| includes_import(stmt, required_import)) + { + return None; + } + // Always insert the diagnostic at top-of-file. let mut diagnostic = Diagnostic::new( MissingRequiredImport(required_import.to_string()), @@ -132,7 +129,6 @@ fn add_required_import( /// I002 pub(crate) fn add_required_imports( - blocks: &[&Block], python_ast: &Suite, locator: &Locator, stylist: &Stylist, @@ -174,7 +170,6 @@ pub(crate) fn add_required_imports( }, level: level.map(|level| level.to_u32()), }), - blocks, python_ast, locator, stylist, @@ -193,7 +188,6 @@ pub(crate) fn add_required_imports( as_name: name.asname.as_deref(), }, }), - blocks, python_ast, locator, stylist, diff --git a/crates/ruff/src/rules/isort/rules/organize_imports.rs b/crates/ruff/src/rules/isort/rules/organize_imports.rs index 15ef0c8d3d097..ab79ecbda081f 100644 --- a/crates/ruff/src/rules/isort/rules/organize_imports.rs +++ b/crates/ruff/src/rules/isort/rules/organize_imports.rs @@ -13,10 +13,11 @@ use ruff_python_ast::helpers::{ use ruff_python_ast::source_code::{Indexer, Locator, Stylist}; use ruff_python_ast::whitespace::leading_space; +use crate::line_width::LineWidth; use crate::registry::AsRule; use crate::settings::Settings; -use super::super::track::Block; +use super::super::block::Block; use super::super::{comments, format_imports}; /// ## What it does @@ -116,7 +117,8 @@ pub(crate) fn organize_imports( block, comments, locator, - settings.line_length - indentation.len(), + settings.line_length, + LineWidth::new(settings.tab_size).add_str(indentation), stylist, &settings.src, package, diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__fit_line_length_comment.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__fit_line_length_comment.py.snap index 2dfbed797846c..20c420373dd83 100644 --- a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__fit_line_length_comment.py.snap +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__fit_line_length_comment.py.snap @@ -7,17 +7,30 @@ fit_line_length_comment.py:1:1: I001 [*] Import block is un-sorted or un-formatt 2 | | # Don't take this comment into account when determining whether the next import can fit on one line. 3 | | from b import c 4 | | from d import e # Do take this comment into account when determining whether the next import can fit on one line. +5 | | # The next import fits on one line. +6 | | from f import g # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ +7 | | # The next import doesn't fit on one line. +8 | | from h import i # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9 | = help: Organize imports β„Ή Suggested fix -1 1 | import a - 2 |+ -2 3 | # Don't take this comment into account when determining whether the next import can fit on one line. -3 4 | from b import c -4 |-from d import e # Do take this comment into account when determining whether the next import can fit on one line. - 5 |+from d import ( - 6 |+ e, # Do take this comment into account when determining whether the next import can fit on one line. - 7 |+) +1 1 | import a + 2 |+ +2 3 | # Don't take this comment into account when determining whether the next import can fit on one line. +3 4 | from b import c +4 |-from d import e # Do take this comment into account when determining whether the next import can fit on one line. + 5 |+from d import ( + 6 |+ e, # Do take this comment into account when determining whether the next import can fit on one line. + 7 |+) + 8 |+ +5 9 | # The next import fits on one line. +6 10 | from f import g # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ + 11 |+ +7 12 | # The next import doesn't fit on one line. +8 |-from h import i # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9 + 13 |+from h import ( + 14 |+ i, # 012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9012ß9πŸ’£2ℝ9 + 15 |+) diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_comment.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_comment.py.snap new file mode 100644 index 0000000000000..9606d7a44e867 --- /dev/null +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_comment.py.snap @@ -0,0 +1,19 @@ +--- +source: crates/ruff/src/rules/isort/mod.rs +--- +comment.py:1:1: I002 [*] Missing required import: `from __future__ import annotations` + | +1 | #!/usr/bin/env python3 + | I002 +2 | +3 | x = 1 + | + = help: Insert required import: `from future import annotations` + +β„Ή Suggested fix +1 1 | #!/usr/bin/env python3 + 2 |+from __future__ import annotations +2 3 | +3 4 | x = 1 + + diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_docstring_with_continuation.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_docstring_with_continuation.py.snap new file mode 100644 index 0000000000000..2e4dfc38c9300 --- /dev/null +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_docstring_with_continuation.py.snap @@ -0,0 +1,17 @@ +--- +source: crates/ruff/src/rules/isort/mod.rs +--- +docstring_with_continuation.py:1:1: I002 [*] Missing required import: `from __future__ import annotations` + | +1 | """Hello, world!"""; x = \ + | I002 +2 | 1; y = 2 + | + = help: Insert required import: `from future import annotations` + +β„Ή Suggested fix +1 |-"""Hello, world!"""; x = \ + 1 |+"""Hello, world!"""; from __future__ import annotations; x = \ +2 2 | 1; y = 2 + + diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_docstring_with_semicolon.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_docstring_with_semicolon.py.snap new file mode 100644 index 0000000000000..0a084316d8009 --- /dev/null +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_docstring_with_semicolon.py.snap @@ -0,0 +1,15 @@ +--- +source: crates/ruff/src/rules/isort/mod.rs +--- +docstring_with_semicolon.py:1:1: I002 [*] Missing required import: `from __future__ import annotations` + | +1 | """Hello, world!"""; x = 1 + | I002 + | + = help: Insert required import: `from future import annotations` + +β„Ή Suggested fix +1 |-"""Hello, world!"""; x = 1 + 1 |+"""Hello, world!"""; from __future__ import annotations; x = 1 + + diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_existing_import.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_existing_import.py.snap new file mode 100644 index 0000000000000..d094c96a6cd9c --- /dev/null +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_existing_import.py.snap @@ -0,0 +1,17 @@ +--- +source: crates/ruff/src/rules/isort/mod.rs +--- +existing_import.py:1:1: I002 [*] Missing required import: `from __future__ import annotations` + | +1 | from __future__ import generator_stop + | I002 +2 | import os + | + = help: Insert required import: `from future import annotations` + +β„Ή Suggested fix + 1 |+from __future__ import annotations +1 2 | from __future__ import generator_stop +2 3 | import os + + diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_off.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_off.py.snap new file mode 100644 index 0000000000000..0d506cdcab4b7 --- /dev/null +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__required_import_off.py.snap @@ -0,0 +1,20 @@ +--- +source: crates/ruff/src/rules/isort/mod.rs +--- +off.py:1:1: I002 [*] Missing required import: `from __future__ import annotations` + | +1 | # isort: off + | I002 +2 | +3 | x = 1 + | + = help: Insert required import: `from future import annotations` + +β„Ή Suggested fix +1 1 | # isort: off + 2 |+from __future__ import annotations +2 3 | +3 4 | x = 1 +4 5 | # isort: on + + diff --git a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__split.py.snap b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__split.py.snap index 94aa1559b8bb6..4168c7420881a 100644 --- a/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__split.py.snap +++ b/crates/ruff/src/rules/isort/snapshots/ruff__rules__isort__tests__split.py.snap @@ -1,4 +1,43 @@ --- source: crates/ruff/src/rules/isort/mod.rs --- +split.py:15:1: I001 [*] Import block is un-sorted or un-formatted + | +15 | if True: +16 | / import C +17 | | import A +18 | | + | |_^ I001 +19 | # isort: split + | + = help: Organize imports + +β„Ή Suggested fix +12 12 | import b +13 13 | +14 14 | if True: + 15 |+ import A +15 16 | import C +16 |- import A +17 17 | +18 18 | # isort: split +19 19 | + +split.py:20:1: I001 [*] Import block is un-sorted or un-formatted + | +20 | # isort: split +21 | +22 | / import D +23 | | import B + | + = help: Organize imports + +β„Ή Suggested fix +17 17 | +18 18 | # isort: split +19 19 | + 20 |+ import B +20 21 | import D +21 |- import B + diff --git a/crates/ruff/src/rules/isort/types.rs b/crates/ruff/src/rules/isort/types.rs index c99b50e40f2e6..a6e6247ff3831 100644 --- a/crates/ruff/src/rules/isort/types.rs +++ b/crates/ruff/src/rules/isort/types.rs @@ -5,7 +5,7 @@ use rustc_hash::FxHashMap; use ruff_python_ast::helpers::format_import_from; #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -pub enum TrailingComma { +pub(crate) enum TrailingComma { Present, #[default] Absent, @@ -18,9 +18,9 @@ pub(crate) struct ImportFromData<'a> { } #[derive(Debug, Hash, Ord, PartialOrd, Eq, PartialEq)] -pub struct AliasData<'a> { - pub name: &'a str, - pub asname: Option<&'a str>, +pub(crate) struct AliasData<'a> { + pub(crate) name: &'a str, + pub(crate) asname: Option<&'a str>, } #[derive(Debug, Default, Clone)] diff --git a/crates/ruff/src/rules/mccabe/mod.rs b/crates/ruff/src/rules/mccabe/mod.rs index ad21d2e16c5fd..697859e26a52b 100644 --- a/crates/ruff/src/rules/mccabe/mod.rs +++ b/crates/ruff/src/rules/mccabe/mod.rs @@ -6,11 +6,10 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::settings::Settings; use crate::test::test_path; diff --git a/crates/ruff/src/rules/mccabe/rules.rs b/crates/ruff/src/rules/mccabe/rules/function_is_too_complex.rs similarity index 100% rename from crates/ruff/src/rules/mccabe/rules.rs rename to crates/ruff/src/rules/mccabe/rules/function_is_too_complex.rs diff --git a/crates/ruff/src/rules/mccabe/rules/mod.rs b/crates/ruff/src/rules/mccabe/rules/mod.rs new file mode 100644 index 0000000000000..b6898b7da22a8 --- /dev/null +++ b/crates/ruff/src/rules/mccabe/rules/mod.rs @@ -0,0 +1,3 @@ +pub(crate) use function_is_too_complex::{function_is_too_complex, ComplexStructure}; + +mod function_is_too_complex; diff --git a/crates/ruff/src/rules/mccabe/settings.rs b/crates/ruff/src/rules/mccabe/settings.rs index 8f53f535e2e6b..eb8daa8dccc56 100644 --- a/crates/ruff/src/rules/mccabe/settings.rs +++ b/crates/ruff/src/rules/mccabe/settings.rs @@ -1,8 +1,9 @@ //! Settings for the `mccabe` plugin. -use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; use serde::{Deserialize, Serialize}; +use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; + #[derive( Debug, PartialEq, Eq, Serialize, Deserialize, Default, ConfigurationOptions, CombineOptions, )] diff --git a/crates/ruff/src/rules/numpy/mod.rs b/crates/ruff/src/rules/numpy/mod.rs index 865b116cd9d95..97a25000e517b 100644 --- a/crates/ruff/src/rules/numpy/mod.rs +++ b/crates/ruff/src/rules/numpy/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/numpy/rules/deprecated_type_alias.rs b/crates/ruff/src/rules/numpy/rules/deprecated_type_alias.rs index 8a136e292f531..834400e5eb79f 100644 --- a/crates/ruff/src/rules/numpy/rules/deprecated_type_alias.rs +++ b/crates/ruff/src/rules/numpy/rules/deprecated_type_alias.rs @@ -48,21 +48,26 @@ impl AlwaysAutofixableViolation for NumpyDeprecatedTypeAlias { /// NPY001 pub(crate) fn deprecated_type_alias(checker: &mut Checker, expr: &Expr) { - if let Some(type_name) = checker.ctx.resolve_call_path(expr).and_then(|call_path| { - if call_path.as_slice() == ["numpy", "bool"] - || call_path.as_slice() == ["numpy", "int"] - || call_path.as_slice() == ["numpy", "float"] - || call_path.as_slice() == ["numpy", "complex"] - || call_path.as_slice() == ["numpy", "object"] - || call_path.as_slice() == ["numpy", "str"] - || call_path.as_slice() == ["numpy", "long"] - || call_path.as_slice() == ["numpy", "unicode"] - { - Some(call_path[1]) - } else { - None - } - }) { + if let Some(type_name) = + checker + .semantic_model() + .resolve_call_path(expr) + .and_then(|call_path| { + if call_path.as_slice() == ["numpy", "bool"] + || call_path.as_slice() == ["numpy", "int"] + || call_path.as_slice() == ["numpy", "float"] + || call_path.as_slice() == ["numpy", "complex"] + || call_path.as_slice() == ["numpy", "object"] + || call_path.as_slice() == ["numpy", "str"] + || call_path.as_slice() == ["numpy", "long"] + || call_path.as_slice() == ["numpy", "unicode"] + { + Some(call_path[1]) + } else { + None + } + }) + { let mut diagnostic = Diagnostic::new( NumpyDeprecatedTypeAlias { type_name: type_name.to_string(), diff --git a/crates/ruff/src/rules/numpy/rules/numpy_legacy_random.rs b/crates/ruff/src/rules/numpy/rules/numpy_legacy_random.rs index d1e5aa055b631..0e52c3917625a 100644 --- a/crates/ruff/src/rules/numpy/rules/numpy_legacy_random.rs +++ b/crates/ruff/src/rules/numpy/rules/numpy_legacy_random.rs @@ -1,9 +1,10 @@ use rustpython_parser::ast::{Expr, Ranged}; -use crate::checkers::ast::Checker; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use crate::checkers::ast::Checker; + /// ## What it does /// Checks for the use of legacy `np.random` function calls. /// @@ -57,9 +58,13 @@ impl Violation for NumpyLegacyRandom { /// NPY002 pub(crate) fn numpy_legacy_random(checker: &mut Checker, expr: &Expr) { - if let Some(method_name) = checker.ctx.resolve_call_path(expr).and_then(|call_path| { - // seeding state - if call_path.as_slice() == ["numpy", "random", "seed"] + if let Some(method_name) = + checker + .semantic_model() + .resolve_call_path(expr) + .and_then(|call_path| { + // seeding state + if call_path.as_slice() == ["numpy", "random", "seed"] || call_path.as_slice() == ["numpy", "random", "get_state"] || call_path.as_slice() == ["numpy", "random", "set_state"] // simple random data @@ -110,12 +115,13 @@ pub(crate) fn numpy_legacy_random(checker: &mut Checker, expr: &Expr) { || call_path.as_slice() == ["numpy", "random", "wald"] || call_path.as_slice() == ["numpy", "random", "weibull"] || call_path.as_slice() == ["numpy", "random", "zipf"] - { - Some(call_path[2]) - } else { - None - } - }) { + { + Some(call_path[2]) + } else { + None + } + }) + { checker.diagnostics.push(Diagnostic::new( NumpyLegacyRandom { method_name: method_name.to_string(), diff --git a/crates/ruff/src/rules/pandas_vet/helpers.rs b/crates/ruff/src/rules/pandas_vet/helpers.rs index 45db1a5d0b8c7..6783a960ddbd4 100644 --- a/crates/ruff/src/rules/pandas_vet/helpers.rs +++ b/crates/ruff/src/rules/pandas_vet/helpers.rs @@ -1,8 +1,9 @@ -use ruff_python_semantic::binding::{BindingKind, Importation}; -use ruff_python_semantic::context::Context; use rustpython_parser::ast; use rustpython_parser::ast::Expr; +use ruff_python_semantic::binding::{BindingKind, Importation}; +use ruff_python_semantic::model::SemanticModel; + pub(crate) enum Resolution { /// The expression resolves to an irrelevant expression type (e.g., a constant). IrrelevantExpression, @@ -15,7 +16,7 @@ pub(crate) enum Resolution { } /// Test an [`Expr`] for relevance to Pandas-related operations. -pub(crate) fn test_expression(expr: &Expr, context: &Context) -> Resolution { +pub(crate) fn test_expression(expr: &Expr, model: &SemanticModel) -> Resolution { match expr { Expr::Constant(_) | Expr::Tuple(_) @@ -27,7 +28,7 @@ pub(crate) fn test_expression(expr: &Expr, context: &Context) -> Resolution { | Expr::DictComp(_) | Expr::GeneratorExp(_) => Resolution::IrrelevantExpression, Expr::Name(ast::ExprName { id, .. }) => { - context + model .find_binding(id) .map_or(Resolution::IrrelevantBinding, |binding| { match binding.kind { diff --git a/crates/ruff/src/rules/pandas_vet/mod.rs b/crates/ruff/src/rules/pandas_vet/mod.rs index cc610c5ad6cdb..346caf8137123 100644 --- a/crates/ruff/src/rules/pandas_vet/mod.rs +++ b/crates/ruff/src/rules/pandas_vet/mod.rs @@ -8,7 +8,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use rustpython_parser::lexer::LexResult; use test_case::test_case; use textwrap::dedent; diff --git a/crates/ruff/src/rules/pandas_vet/rules/attr.rs b/crates/ruff/src/rules/pandas_vet/rules/attr.rs index 67311ad8a0460..ccdb5616b159c 100644 --- a/crates/ruff/src/rules/pandas_vet/rules/attr.rs +++ b/crates/ruff/src/rules/pandas_vet/rules/attr.rs @@ -26,7 +26,7 @@ pub(crate) fn attr(checker: &mut Checker, attr: &str, value: &Expr, attr_expr: & }; // Avoid flagging on function calls (e.g., `df.values()`). - if let Some(parent) = checker.ctx.expr_parent() { + if let Some(parent) = checker.semantic_model().expr_parent() { if matches!(parent, Expr::Call(_)) { return; } @@ -35,7 +35,7 @@ pub(crate) fn attr(checker: &mut Checker, attr: &str, value: &Expr, attr_expr: & // Avoid flagging on non-DataFrames (e.g., `{"a": 1}.values`), and on irrelevant bindings // (like imports). if !matches!( - test_expression(value, &checker.ctx), + test_expression(value, checker.semantic_model()), Resolution::RelevantLocal ) { return; diff --git a/crates/ruff/src/rules/pandas_vet/rules/call.rs b/crates/ruff/src/rules/pandas_vet/rules/call.rs index 2d0396d1fc12a..ecea41b6f87f5 100644 --- a/crates/ruff/src/rules/pandas_vet/rules/call.rs +++ b/crates/ruff/src/rules/pandas_vet/rules/call.rs @@ -80,7 +80,7 @@ pub(crate) fn call(checker: &mut Checker, func: &Expr) { // Ignore irrelevant bindings (like imports). if !matches!( - test_expression(value, &checker.ctx), + test_expression(value, checker.semantic_model()), Resolution::RelevantLocal | Resolution::PandasModule ) { return; diff --git a/crates/ruff/src/rules/pandas_vet/rules/inplace_argument.rs b/crates/ruff/src/rules/pandas_vet/rules/inplace_argument.rs index 40421917bfe1b..bbb0c3e9a0441 100644 --- a/crates/ruff/src/rules/pandas_vet/rules/inplace_argument.rs +++ b/crates/ruff/src/rules/pandas_vet/rules/inplace_argument.rs @@ -1,8 +1,8 @@ -use ruff_python_semantic::binding::{BindingKind, Importation}; use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged}; use ruff_diagnostics::{AutofixKind, Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_semantic::binding::{BindingKind, Importation}; use crate::checkers::ast::Checker; use crate::registry::AsRule; @@ -60,19 +60,22 @@ pub(crate) fn inplace_argument( let mut is_checkable = false; let mut is_pandas = false; - if let Some(call_path) = checker.ctx.resolve_call_path(func) { + if let Some(call_path) = checker.semantic_model().resolve_call_path(func) { is_checkable = true; let module = call_path[0]; - is_pandas = checker.ctx.find_binding(module).map_or(false, |binding| { - matches!( - binding.kind, - BindingKind::Importation(Importation { - full_name: "pandas", - .. - }) - ) - }); + is_pandas = checker + .semantic_model() + .find_binding(module) + .map_or(false, |binding| { + matches!( + binding.kind, + BindingKind::Importation(Importation { + full_name: "pandas", + .. + }) + ) + }); } for keyword in keywords.iter().rev() { @@ -99,9 +102,9 @@ pub(crate) fn inplace_argument( // but we don't currently restore expression stacks when parsing deferred nodes, // and so the parent is lost. let fixable = !seen_star - && checker.ctx.stmt().is_expr_stmt() - && checker.ctx.expr_parent().is_none() - && !checker.ctx.scope().kind.is_lambda(); + && checker.semantic_model().stmt().is_expr_stmt() + && checker.semantic_model().expr_parent().is_none() + && !checker.semantic_model().scope().kind.is_lambda(); let mut diagnostic = Diagnostic::new(PandasUseOfInplaceArgument, keyword.range()); if fixable && checker.patch(diagnostic.kind.rule()) { if let Some(fix) = convert_inplace_argument_to_assignment( diff --git a/crates/ruff/src/rules/pandas_vet/rules/subscript.rs b/crates/ruff/src/rules/pandas_vet/rules/subscript.rs index 1ee7147a0b51f..96430e0151573 100644 --- a/crates/ruff/src/rules/pandas_vet/rules/subscript.rs +++ b/crates/ruff/src/rules/pandas_vet/rules/subscript.rs @@ -54,7 +54,7 @@ pub(crate) fn subscript(checker: &mut Checker, value: &Expr, expr: &Expr) { // Avoid flagging on non-DataFrames (e.g., `{"a": 1}.at[0]`), and on irrelevant bindings // (like imports). if !matches!( - test_expression(value, &checker.ctx), + test_expression(value, checker.semantic_model()), Resolution::RelevantLocal ) { return; diff --git a/crates/ruff/src/rules/pep8_naming/helpers.rs b/crates/ruff/src/rules/pep8_naming/helpers.rs index 29b0a803f10c6..11e96981ca5a2 100644 --- a/crates/ruff/src/rules/pep8_naming/helpers.rs +++ b/crates/ruff/src/rules/pep8_naming/helpers.rs @@ -1,7 +1,7 @@ use itertools::Itertools; -use ruff_python_semantic::context::Context; use rustpython_parser::ast::{self, Expr, Stmt}; +use ruff_python_semantic::model::SemanticModel; use ruff_python_stdlib::str::{is_lower, is_upper}; pub(crate) fn is_camelcase(name: &str) -> bool { @@ -22,14 +22,14 @@ pub(crate) fn is_acronym(name: &str, asname: &str) -> bool { name.chars().filter(|c| c.is_uppercase()).join("") == asname } -pub(crate) fn is_named_tuple_assignment(context: &Context, stmt: &Stmt) -> bool { +pub(crate) fn is_named_tuple_assignment(model: &SemanticModel, stmt: &Stmt) -> bool { let Stmt::Assign(ast::StmtAssign { value, .. }) = stmt else { return false; }; let Expr::Call(ast::ExprCall {func, ..}) = value.as_ref() else { return false; }; - context.resolve_call_path(func).map_or(false, |call_path| { + model.resolve_call_path(func).map_or(false, |call_path| { matches!( call_path.as_slice(), ["collections", "namedtuple"] | ["typing", "NamedTuple"] @@ -37,35 +37,35 @@ pub(crate) fn is_named_tuple_assignment(context: &Context, stmt: &Stmt) -> bool }) } -pub(crate) fn is_typed_dict_assignment(context: &Context, stmt: &Stmt) -> bool { +pub(crate) fn is_typed_dict_assignment(model: &SemanticModel, stmt: &Stmt) -> bool { let Stmt::Assign(ast::StmtAssign { value, .. }) = stmt else { return false; }; let Expr::Call(ast::ExprCall {func, ..}) = value.as_ref() else { return false; }; - context.resolve_call_path(func).map_or(false, |call_path| { + model.resolve_call_path(func).map_or(false, |call_path| { call_path.as_slice() == ["typing", "TypedDict"] }) } -pub(crate) fn is_type_var_assignment(context: &Context, stmt: &Stmt) -> bool { +pub(crate) fn is_type_var_assignment(model: &SemanticModel, stmt: &Stmt) -> bool { let Stmt::Assign(ast::StmtAssign { value, .. }) = stmt else { return false; }; let Expr::Call(ast::ExprCall {func, ..}) = value.as_ref() else { return false; }; - context.resolve_call_path(func).map_or(false, |call_path| { + model.resolve_call_path(func).map_or(false, |call_path| { call_path.as_slice() == ["typing", "TypeVar"] || call_path.as_slice() == ["typing", "NewType"] }) } -pub(crate) fn is_typed_dict_class(context: &Context, bases: &[Expr]) -> bool { +pub(crate) fn is_typed_dict_class(model: &SemanticModel, bases: &[Expr]) -> bool { bases .iter() - .any(|base| context.match_typing_expr(base, "TypedDict")) + .any(|base| model.match_typing_expr(base, "TypedDict")) } #[cfg(test)] diff --git a/crates/ruff/src/rules/pep8_naming/mod.rs b/crates/ruff/src/rules/pep8_naming/mod.rs index 989376efd067b..8c4f53b04f408 100644 --- a/crates/ruff/src/rules/pep8_naming/mod.rs +++ b/crates/ruff/src/rules/pep8_naming/mod.rs @@ -8,7 +8,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_class_method.rs b/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_class_method.rs index 010d69dfb414b..df5e7c64bc07c 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_class_method.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_class_method.rs @@ -64,7 +64,7 @@ pub(crate) fn invalid_first_argument_name_for_class_method( ) -> Option { if !matches!( function_type::classify( - &checker.ctx, + checker.semantic_model(), scope, name, decorator_list, diff --git a/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_method.rs b/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_method.rs index 6a6c786035d63..c0e0cb3895e3f 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_method.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/invalid_first_argument_name_for_method.rs @@ -61,7 +61,7 @@ pub(crate) fn invalid_first_argument_name_for_method( ) -> Option { if !matches!( function_type::classify( - &checker.ctx, + checker.semantic_model(), scope, name, decorator_list, diff --git a/crates/ruff/src/rules/pep8_naming/rules/invalid_function_name.rs b/crates/ruff/src/rules/pep8_naming/rules/invalid_function_name.rs index e8843a7a3f3f9..1b908c225626b 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/invalid_function_name.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/invalid_function_name.rs @@ -5,7 +5,7 @@ use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::helpers::identifier_range; use ruff_python_ast::source_code::Locator; use ruff_python_semantic::analyze::visibility; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; /// ## What it does /// Checks for functions names that do not follow the `snake_case` naming @@ -53,7 +53,7 @@ pub(crate) fn invalid_function_name( name: &str, decorator_list: &[Expr], ignore_names: &[String], - ctx: &Context, + model: &SemanticModel, locator: &Locator, ) -> Option { // Ignore any explicitly-ignored function names. @@ -68,7 +68,7 @@ pub(crate) fn invalid_function_name( // Ignore any functions that are explicitly `@override`. These are defined elsewhere, // so if they're first-party, we'll flag them at the definition site. - if visibility::is_override(ctx, decorator_list) { + if visibility::is_override(model, decorator_list) { return None; } diff --git a/crates/ruff/src/rules/pep8_naming/rules/invalid_module_name.rs b/crates/ruff/src/rules/pep8_naming/rules/invalid_module_name.rs index 165f3036e0097..aa42ba8a89af8 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/invalid_module_name.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/invalid_module_name.rs @@ -1,7 +1,8 @@ -use ruff_text_size::TextRange; use std::ffi::OsStr; use std::path::Path; +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_stdlib::identifiers::{is_migration_name, is_module_name}; diff --git a/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_class_scope.rs b/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_class_scope.rs index 6834f0a09334f..2cb07937896e3 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_class_scope.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_class_scope.rs @@ -67,8 +67,8 @@ pub(crate) fn mixed_case_variable_in_class_scope( return; } if helpers::is_mixed_case(name) - && !helpers::is_named_tuple_assignment(&checker.ctx, stmt) - && !helpers::is_typed_dict_class(&checker.ctx, bases) + && !helpers::is_named_tuple_assignment(checker.semantic_model(), stmt) + && !helpers::is_typed_dict_class(checker.semantic_model(), bases) { checker.diagnostics.push(Diagnostic::new( MixedCaseVariableInClassScope { diff --git a/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_global_scope.rs b/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_global_scope.rs index fcad8aa628572..b0312c50d6e66 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_global_scope.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/mixed_case_variable_in_global_scope.rs @@ -75,7 +75,9 @@ pub(crate) fn mixed_case_variable_in_global_scope( { return; } - if helpers::is_mixed_case(name) && !helpers::is_named_tuple_assignment(&checker.ctx, stmt) { + if helpers::is_mixed_case(name) + && !helpers::is_named_tuple_assignment(checker.semantic_model(), stmt) + { checker.diagnostics.push(Diagnostic::new( MixedCaseVariableInGlobalScope { name: name.to_string(), diff --git a/crates/ruff/src/rules/pep8_naming/rules/non_lowercase_variable_in_function.rs b/crates/ruff/src/rules/pep8_naming/rules/non_lowercase_variable_in_function.rs index 2b7e45286dfe8..ed7648fb34891 100644 --- a/crates/ruff/src/rules/pep8_naming/rules/non_lowercase_variable_in_function.rs +++ b/crates/ruff/src/rules/pep8_naming/rules/non_lowercase_variable_in_function.rs @@ -66,9 +66,9 @@ pub(crate) fn non_lowercase_variable_in_function( } if name.to_lowercase() != name - && !helpers::is_named_tuple_assignment(&checker.ctx, stmt) - && !helpers::is_typed_dict_assignment(&checker.ctx, stmt) - && !helpers::is_type_var_assignment(&checker.ctx, stmt) + && !helpers::is_named_tuple_assignment(checker.semantic_model(), stmt) + && !helpers::is_typed_dict_assignment(checker.semantic_model(), stmt) + && !helpers::is_type_var_assignment(checker.semantic_model(), stmt) { checker.diagnostics.push(Diagnostic::new( NonLowercaseVariableInFunction { diff --git a/crates/ruff/src/rules/pep8_naming/settings.rs b/crates/ruff/src/rules/pep8_naming/settings.rs index ffc77699fc0d8..cfacb63632ff1 100644 --- a/crates/ruff/src/rules/pep8_naming/settings.rs +++ b/crates/ruff/src/rules/pep8_naming/settings.rs @@ -1,8 +1,9 @@ //! Settings for the `pep8-naming` plugin. -use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; use serde::{Deserialize, Serialize}; +use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; + const IGNORE_NAMES: [&str; 12] = [ "setUp", "tearDown", diff --git a/crates/ruff/src/rules/pycodestyle/helpers.rs b/crates/ruff/src/rules/pycodestyle/helpers.rs index 40b0314e9a3cf..519ec3040e381 100644 --- a/crates/ruff/src/rules/pycodestyle/helpers.rs +++ b/crates/ruff/src/rules/pycodestyle/helpers.rs @@ -1,10 +1,12 @@ use ruff_text_size::{TextLen, TextRange}; use rustpython_parser::ast::{self, Cmpop, Expr}; -use unicode_width::{UnicodeWidthChar, UnicodeWidthStr}; +use unicode_width::UnicodeWidthStr; use ruff_python_ast::newlines::Line; use ruff_python_ast::source_code::Generator; +use crate::line_width::{LineLength, LineWidth, TabSize}; + pub(crate) fn is_ambiguous_name(name: &str) -> bool { name == "l" || name == "I" || name == "O" } @@ -26,19 +28,19 @@ pub(crate) fn compare( pub(super) fn is_overlong( line: &Line, - limit: usize, + limit: LineLength, ignore_overlong_task_comments: bool, task_tags: &[String], + tab_size: TabSize, ) -> Option { let mut start_offset = line.start(); - let mut width = 0; + let mut width = LineWidth::new(tab_size); for c in line.chars() { if width < limit { start_offset += c.text_len(); } - - width += c.width().unwrap_or(0); + width = width.add_char(c); } if width <= limit { @@ -64,14 +66,14 @@ pub(super) fn is_overlong( // begins before the limit. let last_chunk = chunks.last().unwrap_or(second_chunk); if last_chunk.contains("://") { - if width - last_chunk.width() <= limit { + if width.get() - last_chunk.width() <= limit.get() { return None; } } Some(Overlong { range: TextRange::new(start_offset, line.end()), - width, + width: width.get(), }) } diff --git a/crates/ruff/src/rules/pycodestyle/mod.rs b/crates/ruff/src/rules/pycodestyle/mod.rs index 7b7f492426335..cc2570824631e 100644 --- a/crates/ruff/src/rules/pycodestyle/mod.rs +++ b/crates/ruff/src/rules/pycodestyle/mod.rs @@ -9,9 +9,9 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; + use crate::line_width::LineLength; use crate::registry::Rule; use crate::test::test_path; use crate::{assert_messages, settings}; @@ -167,7 +167,7 @@ mod tests { Path::new("pycodestyle/W505.py"), &settings::Settings { pycodestyle: Settings { - max_doc_length: Some(50), + max_doc_length: Some(LineLength::from(50)), ..Settings::default() }, ..settings::Settings::for_rule(Rule::DocLineTooLong) @@ -176,4 +176,37 @@ mod tests { assert_messages!(diagnostics); Ok(()) } + + #[test] + fn max_doc_length_with_utf_8() -> Result<()> { + let diagnostics = test_path( + Path::new("pycodestyle/W505_utf_8.py"), + &settings::Settings { + pycodestyle: Settings { + max_doc_length: Some(LineLength::from(50)), + ..Settings::default() + }, + ..settings::Settings::for_rule(Rule::DocLineTooLong) + }, + )?; + assert_messages!(diagnostics); + Ok(()) + } + + #[test_case(1)] + #[test_case(2)] + #[test_case(4)] + #[test_case(8)] + fn tab_size(tab_size: u8) -> Result<()> { + let snapshot = format!("tab_size_{tab_size}"); + let diagnostics = test_path( + Path::new("pycodestyle/E501_2.py"), + &settings::Settings { + tab_size: tab_size.into(), + ..settings::Settings::for_rule(Rule::LineTooLong) + }, + )?; + assert_messages!(snapshot, diagnostics); + Ok(()) + } } diff --git a/crates/ruff/src/rules/pycodestyle/rules/ambiguous_class_name.rs b/crates/ruff/src/rules/pycodestyle/rules/ambiguous_class_name.rs index d03b69ac624b6..c5159f0119119 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/ambiguous_class_name.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/ambiguous_class_name.rs @@ -1,6 +1,7 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; use crate::rules::pycodestyle::helpers::is_ambiguous_name; diff --git a/crates/ruff/src/rules/pycodestyle/rules/ambiguous_function_name.rs b/crates/ruff/src/rules/pycodestyle/rules/ambiguous_function_name.rs index c45bfda3a023b..653814af77ed6 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/ambiguous_function_name.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/ambiguous_function_name.rs @@ -1,6 +1,7 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; use crate::rules::pycodestyle::helpers::is_ambiguous_name; diff --git a/crates/ruff/src/rules/pycodestyle/rules/ambiguous_variable_name.rs b/crates/ruff/src/rules/pycodestyle/rules/ambiguous_variable_name.rs index 4aa2ecdbe448d..4abeb9444ea2f 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/ambiguous_variable_name.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/ambiguous_variable_name.rs @@ -1,6 +1,7 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; use crate::rules::pycodestyle::helpers::is_ambiguous_name; diff --git a/crates/ruff/src/rules/pycodestyle/rules/doc_line_too_long.rs b/crates/ruff/src/rules/pycodestyle/rules/doc_line_too_long.rs index 99b663a733650..74e94801631d7 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/doc_line_too_long.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/doc_line_too_long.rs @@ -48,6 +48,12 @@ pub(crate) fn doc_line_too_long(line: &Line, settings: &Settings) -> Option Option<(Vec, Expr)> { +fn extract_types(model: &SemanticModel, annotation: &Expr) -> Option<(Vec, Expr)> { let Expr::Subscript(ast::ExprSubscript { value, slice, .. }) = &annotation else { return None; }; @@ -135,9 +135,9 @@ fn extract_types(ctx: &Context, annotation: &Expr) -> Option<(Vec, Expr)> return None; } - if !ctx.resolve_call_path(value).map_or(false, |call_path| { + if !model.resolve_call_path(value).map_or(false, |call_path| { call_path.as_slice() == ["collections", "abc", "Callable"] - || ctx.match_typing_call_path(&call_path, "Callable") + || model.match_typing_call_path(&call_path, "Callable") }) { return None; } @@ -160,7 +160,7 @@ fn extract_types(ctx: &Context, annotation: &Expr) -> Option<(Vec, Expr)> } fn function( - ctx: &Context, + model: &SemanticModel, name: &str, args: &Arguments, body: &Expr, @@ -172,7 +172,7 @@ fn function( range: TextRange::default(), }); if let Some(annotation) = annotation { - if let Some((arg_types, return_type)) = extract_types(ctx, annotation) { + if let Some((arg_types, return_type)) = extract_types(model, annotation) { // A `lambda` expression can only have positional and positional-only // arguments. The order is always positional-only first, then positional. let new_posonlyargs = args diff --git a/crates/ruff/src/rules/pycodestyle/rules/line_too_long.rs b/crates/ruff/src/rules/pycodestyle/rules/line_too_long.rs index 4e34d25059c7e..309d2fe36e618 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/line_too_long.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/line_too_long.rs @@ -43,6 +43,7 @@ pub(crate) fn line_too_long(line: &Line, settings: &Settings) -> Option for EqCmpop { /// ## References /// - [PEP 8](https://peps.python.org/pep-0008/#programming-recommendations) #[violation] -pub struct NoneComparison(pub EqCmpop); +pub struct NoneComparison(EqCmpop); impl AlwaysAutofixableViolation for NoneComparison { #[derive_message_formats] @@ -97,7 +97,7 @@ impl AlwaysAutofixableViolation for NoneComparison { /// ## References /// - [PEP 8](https://peps.python.org/pep-0008/#programming-recommendations) #[violation] -pub struct TrueFalseComparison(pub bool, pub EqCmpop); +pub struct TrueFalseComparison(bool, EqCmpop); impl AlwaysAutofixableViolation for TrueFalseComparison { #[derive_message_formats] diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace.rs index ab289a7863364..b10e1b79fac0f 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace.rs @@ -1,10 +1,13 @@ -use super::LogicalLine; -use crate::checkers::logical_lines::LogicalLinesContext; +use ruff_text_size::TextSize; + use ruff_diagnostics::Edit; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Fix}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::token_kind::TokenKind; -use ruff_text_size::TextSize; + +use crate::checkers::logical_lines::LogicalLinesContext; + +use super::LogicalLine; #[violation] pub struct MissingWhitespace { diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_after_keyword.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_after_keyword.rs index cb249accbc4c1..19ee6a1e4e201 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_after_keyword.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/missing_whitespace_after_keyword.rs @@ -1,9 +1,10 @@ -use crate::checkers::logical_lines::LogicalLinesContext; -use crate::rules::pycodestyle::rules::logical_lines::LogicalLine; use ruff_diagnostics::Violation; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::token_kind::TokenKind; +use crate::checkers::logical_lines::LogicalLinesContext; +use crate::rules::pycodestyle::rules::logical_lines::LogicalLine; + #[violation] pub struct MissingWhitespaceAfterKeyword; diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/space_around_operator.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/space_around_operator.rs index beb2e9552ef6f..5c111773def67 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/space_around_operator.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/space_around_operator.rs @@ -1,11 +1,13 @@ use ruff_text_size::TextRange; -use super::{LogicalLine, Whitespace}; -use crate::checkers::logical_lines::LogicalLinesContext; use ruff_diagnostics::Violation; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::token_kind::TokenKind; +use crate::checkers::logical_lines::LogicalLinesContext; + +use super::{LogicalLine, Whitespace}; + /// ## What it does /// Checks for extraneous tabs before an operator. /// diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_keywords.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_keywords.rs index a181c925a87aa..d65d1a169328c 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_keywords.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_keywords.rs @@ -1,8 +1,11 @@ -use super::{LogicalLine, Whitespace}; -use crate::checkers::logical_lines::LogicalLinesContext; +use ruff_text_size::TextRange; + use ruff_diagnostics::Violation; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; + +use crate::checkers::logical_lines::LogicalLinesContext; + +use super::{LogicalLine, Whitespace}; /// ## What it does /// Checks for extraneous whitespace after keywords. diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_named_parameter_equals.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_named_parameter_equals.rs index 935373c726a09..080afb2d329a2 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_named_parameter_equals.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_around_named_parameter_equals.rs @@ -1,9 +1,11 @@ -use crate::checkers::logical_lines::LogicalLinesContext; -use crate::rules::pycodestyle::rules::logical_lines::{LogicalLine, LogicalLineToken}; +use ruff_text_size::{TextRange, TextSize}; + use ruff_diagnostics::Violation; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::token_kind::TokenKind; -use ruff_text_size::{TextRange, TextSize}; + +use crate::checkers::logical_lines::LogicalLinesContext; +use crate::rules::pycodestyle::rules::logical_lines::{LogicalLine, LogicalLineToken}; #[violation] pub struct UnexpectedSpacesAroundKeywordParameterEquals; diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_comment.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_comment.rs index 30cf2a149c601..455bd5f0f34d9 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_comment.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_comment.rs @@ -1,10 +1,12 @@ -use crate::checkers::logical_lines::LogicalLinesContext; -use crate::rules::pycodestyle::rules::logical_lines::LogicalLine; +use ruff_text_size::{TextLen, TextRange, TextSize}; + use ruff_diagnostics::Violation; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::source_code::Locator; use ruff_python_ast::token_kind::TokenKind; -use ruff_text_size::{TextLen, TextRange, TextSize}; + +use crate::checkers::logical_lines::LogicalLinesContext; +use crate::rules::pycodestyle::rules::logical_lines::LogicalLine; /// ## What it does /// Checks if inline comments are separated by at least two spaces. diff --git a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_parameters.rs b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_parameters.rs index 0c5434509c260..a65220fa74db5 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_parameters.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/logical_lines/whitespace_before_parameters.rs @@ -1,9 +1,11 @@ -use crate::checkers::logical_lines::LogicalLinesContext; -use crate::rules::pycodestyle::rules::logical_lines::LogicalLine; +use ruff_text_size::{TextRange, TextSize}; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::token_kind::TokenKind; -use ruff_text_size::{TextRange, TextSize}; + +use crate::checkers::logical_lines::LogicalLinesContext; +use crate::rules::pycodestyle::rules::logical_lines::LogicalLine; #[violation] pub struct WhitespaceBeforeParameters { diff --git a/crates/ruff/src/rules/pycodestyle/rules/mixed_spaces_and_tabs.rs b/crates/ruff/src/rules/pycodestyle/rules/mixed_spaces_and_tabs.rs index c296adeb89bef..a2a7966a403d4 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/mixed_spaces_and_tabs.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/mixed_spaces_and_tabs.rs @@ -1,8 +1,9 @@ +use ruff_text_size::{TextLen, TextRange}; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::newlines::Line; use ruff_python_ast::whitespace::leading_space; -use ruff_text_size::{TextLen, TextRange}; /// ## What it does /// Checks for mixed tabs and spaces in indentation. diff --git a/crates/ruff/src/rules/pycodestyle/rules/type_comparison.rs b/crates/ruff/src/rules/pycodestyle/rules/type_comparison.rs index e4d93a02b360f..7cec3e0cb4670 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/type_comparison.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/type_comparison.rs @@ -1,10 +1,11 @@ use itertools::izip; use rustpython_parser::ast::{self, Cmpop, Constant, Expr, Ranged}; -use crate::checkers::ast::Checker; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use crate::checkers::ast::Checker; + /// ## What it does /// Checks for object type comparisons without using isinstance(). /// @@ -51,7 +52,7 @@ pub(crate) fn type_comparison( Expr::Call(ast::ExprCall { func, args, .. }) => { if let Expr::Name(ast::ExprName { id, .. }) = func.as_ref() { // Ex) `type(False)` - if id == "type" && checker.ctx.is_builtin("type") { + if id == "type" && checker.semantic_model().is_builtin("type") { if let Some(arg) = args.first() { // Allow comparison for types which are not obvious. if !matches!( @@ -75,12 +76,12 @@ pub(crate) fn type_comparison( if let Expr::Name(ast::ExprName { id, .. }) = value.as_ref() { // Ex) `types.NoneType` if id == "types" - && checker - .ctx - .resolve_call_path(value) - .map_or(false, |call_path| { + && checker.semantic_model().resolve_call_path(value).map_or( + false, + |call_path| { call_path.first().map_or(false, |module| *module == "types") - }) + }, + ) { checker .diagnostics diff --git a/crates/ruff/src/rules/pycodestyle/settings.rs b/crates/ruff/src/rules/pycodestyle/settings.rs index 06204f29bba3c..7366930d109e4 100644 --- a/crates/ruff/src/rules/pycodestyle/settings.rs +++ b/crates/ruff/src/rules/pycodestyle/settings.rs @@ -1,8 +1,11 @@ //! Settings for the `pycodestyle` plugin. -use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; use serde::{Deserialize, Serialize}; +use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; + +use crate::line_width::LineLength; + #[derive( Debug, PartialEq, Eq, Serialize, Deserialize, Default, ConfigurationOptions, CombineOptions, )] @@ -18,7 +21,7 @@ pub struct Options { )] /// The maximum line length to allow for line-length violations within /// documentation (`W505`), including standalone comments. - pub max_doc_length: Option, + pub max_doc_length: Option, #[option( default = "false", value_type = "bool", @@ -34,7 +37,7 @@ pub struct Options { #[derive(Debug, Default, CacheKey)] pub struct Settings { - pub max_doc_length: Option, + pub max_doc_length: Option, pub ignore_overlong_task_comments: bool, } diff --git a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__E402_E402.py.snap b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__E402_E402.py.snap index c5ab26ba38029..b2e69a17baac0 100644 --- a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__E402_E402.py.snap +++ b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__E402_E402.py.snap @@ -3,7 +3,7 @@ source: crates/ruff/src/rules/pycodestyle/mod.rs --- E402.py:24:1: E402 Module level import not at top of file | -24 | y = x + 1 +24 | __some__magic = 1 25 | 26 | import f | ^^^^^^^^ E402 diff --git a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__max_doc_length_with_utf_8.snap b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__max_doc_length_with_utf_8.snap new file mode 100644 index 0000000000000..17d2ed3c77438 --- /dev/null +++ b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__max_doc_length_with_utf_8.snap @@ -0,0 +1,62 @@ +--- +source: crates/ruff/src/rules/pycodestyle/mod.rs +--- +W505_utf_8.py:2:50: W505 Doc line too long (57 > 50 characters) + | +2 | #!/usr/bin/env python3 +3 | """Here's a top-level ß9πŸ’£2ℝing that's over theß9πŸ’£2ℝ.""" + | ^^^^^^ W505 + | + +W505_utf_8.py:6:49: W505 Doc line too long (56 > 50 characters) + | +6 | def f1(): +7 | """Here's a ß9πŸ’£2ℝing that's also over theß9πŸ’£2ℝ.""" + | ^^^^^^ W505 +8 | +9 | x = 1 # Here's a comment that's over theß9πŸ’£2ℝ, but it's not standalone. + | + +W505_utf_8.py:10:51: W505 Doc line too long (56 > 50 characters) + | +10 | x = 1 # Here's a comment that's over theß9πŸ’£2ℝ, but it's not standalone. +11 | +12 | # Here's a standalone comment that's over theß9πŸ’£2ℝ. + | ^^^^^^ W505 +13 | +14 | x = 2 + | + +W505_utf_8.py:13:51: W505 Doc line too long (93 > 50 characters) + | +13 | x = 2 +14 | # Another standalone that is preceded by a newline and indent toke and is over theß9πŸ’£2ℝ. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ W505 +15 | +16 | print("Here's a string that's over theß9πŸ’£2ℝ, but it's not a ß9πŸ’£2ℝing.") + | + +W505_utf_8.py:18:50: W505 Doc line too long (61 > 50 characters) + | +18 | "This is also considered a ß9πŸ’£2ℝing, and is over theß9πŸ’£2ℝ." + | ^^^^^^^^^^^ W505 + | + +W505_utf_8.py:24:50: W505 Doc line too long (82 > 50 characters) + | +24 | """Here's a multi-line ß9πŸ’£2ℝing. +25 | +26 | It's over theß9πŸ’£2ℝ on this line, which isn't the first line in the ß9πŸ’£2ℝing. + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ W505 +27 | """ + | + +W505_utf_8.py:31:50: W505 Doc line too long (85 > 50 characters) + | +31 | """Here's a multi-line ß9πŸ’£2ℝing. +32 | +33 | It's over theß9πŸ’£2ℝ on this line, which isn't the first line in the ß9πŸ’£2ℝing.""" + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ W505 + | + + diff --git a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_1.snap b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_1.snap new file mode 100644 index 0000000000000..674ac41bd6b5a --- /dev/null +++ b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_1.snap @@ -0,0 +1,56 @@ +--- +source: crates/ruff/src/rules/pycodestyle/mod.rs +--- +E501_2.py:1:81: E501 Line too long (89 > 88 characters) + | +1 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:2:81: E501 Line too long (89 > 88 characters) + | +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +3 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +4 | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:4:81: E501 Line too long (89 > 88 characters) + | +4 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +5 | +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:5:81: E501 Line too long (89 > 88 characters) + | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +7 | +8 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:7:82: E501 Line too long (89 > 88 characters) + | + 7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + 8 | + 9 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:10:82: E501 Line too long (89 > 88 characters) + | +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +11 | +12 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +13 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + + diff --git a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_2.snap b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_2.snap new file mode 100644 index 0000000000000..674ac41bd6b5a --- /dev/null +++ b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_2.snap @@ -0,0 +1,56 @@ +--- +source: crates/ruff/src/rules/pycodestyle/mod.rs +--- +E501_2.py:1:81: E501 Line too long (89 > 88 characters) + | +1 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:2:81: E501 Line too long (89 > 88 characters) + | +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +3 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +4 | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:4:81: E501 Line too long (89 > 88 characters) + | +4 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +5 | +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:5:81: E501 Line too long (89 > 88 characters) + | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +7 | +8 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:7:82: E501 Line too long (89 > 88 characters) + | + 7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + 8 | + 9 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:10:82: E501 Line too long (89 > 88 characters) + | +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +11 | +12 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +13 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + + diff --git a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_4.snap b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_4.snap new file mode 100644 index 0000000000000..ef6c75a66d0da --- /dev/null +++ b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_4.snap @@ -0,0 +1,65 @@ +--- +source: crates/ruff/src/rules/pycodestyle/mod.rs +--- +E501_2.py:1:81: E501 Line too long (89 > 88 characters) + | +1 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:2:77: E501 Line too long (93 > 88 characters) + | +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +3 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^^^^^ E501 +4 | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:4:81: E501 Line too long (89 > 88 characters) + | +4 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +5 | +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:5:77: E501 Line too long (93 > 88 characters) + | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^^^^^ E501 +7 | +8 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:7:82: E501 Line too long (89 > 88 characters) + | + 7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + 8 | + 9 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:8:78: E501 Line too long (89 > 88 characters) + | + 8 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + 9 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +10 | +11 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:10:82: E501 Line too long (89 > 88 characters) + | +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +11 | +12 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +13 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + + diff --git a/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_8.snap b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_8.snap new file mode 100644 index 0000000000000..139405c99331d --- /dev/null +++ b/crates/ruff/src/rules/pycodestyle/snapshots/ruff__rules__pycodestyle__tests__tab_size_8.snap @@ -0,0 +1,72 @@ +--- +source: crates/ruff/src/rules/pycodestyle/mod.rs +--- +E501_2.py:1:81: E501 Line too long (89 > 88 characters) + | +1 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:2:70: E501 Line too long (101 > 88 characters) + | +2 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +3 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^^^^^^^^^^^^^ E501 +4 | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:4:81: E501 Line too long (89 > 88 characters) + | +4 | a = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +5 | +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:5:70: E501 Line too long (101 > 88 characters) + | +5 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +6 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^^^^^^^^^^^^^ E501 +7 | +8 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:7:82: E501 Line too long (89 > 88 characters) + | + 7 | b = """ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + 8 | + 9 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:8:71: E501 Line too long (97 > 88 characters) + | + 8 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + 9 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^^^^^^^^ E501 +10 | +11 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:10:82: E501 Line too long (89 > 88 characters) + | +10 | c = """2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +11 | +12 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 +13 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | + +E501_2.py:11:70: E501 Line too long (89 > 88 characters) + | +11 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" +12 | d = """πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A67ß9πŸ’£2ℝ4A6""" + | ^ E501 + | + + diff --git a/crates/ruff/src/rules/pydocstyle/helpers.rs b/crates/ruff/src/rules/pydocstyle/helpers.rs index c63964ba195ed..3925b0680ad8a 100644 --- a/crates/ruff/src/rules/pydocstyle/helpers.rs +++ b/crates/ruff/src/rules/pydocstyle/helpers.rs @@ -6,8 +6,7 @@ use ruff_python_ast::helpers::map_callable; use ruff_python_ast::newlines::StrExt; use ruff_python_ast::str::is_implicit_concatenation; use ruff_python_semantic::definition::{Definition, Member, MemberKind}; - -use crate::checkers::ast::Checker; +use ruff_python_semantic::model::SemanticModel; /// Return the index of the first logical line in a string. pub(crate) fn logical_line(content: &str) -> Option { @@ -37,7 +36,7 @@ pub(crate) fn normalize_word(first_word: &str) -> String { /// Check decorator list to see if function should be ignored. pub(crate) fn should_ignore_definition( - checker: &Checker, + model: &SemanticModel, definition: &Definition, ignore_decorators: &BTreeSet, ) -> bool { @@ -52,7 +51,7 @@ pub(crate) fn should_ignore_definition( }) = definition { for decorator in cast::decorator_list(stmt) { - if let Some(call_path) = checker.ctx.resolve_call_path(map_callable(decorator)) { + if let Some(call_path) = model.resolve_call_path(map_callable(decorator)) { if ignore_decorators .iter() .any(|decorator| from_qualified_name(decorator) == call_path) diff --git a/crates/ruff/src/rules/pydocstyle/mod.rs b/crates/ruff/src/rules/pydocstyle/mod.rs index 80ddf076f5d7b..86206741d3699 100644 --- a/crates/ruff/src/rules/pydocstyle/mod.rs +++ b/crates/ruff/src/rules/pydocstyle/mod.rs @@ -9,7 +9,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_class.rs b/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_class.rs index a07bf56ff8c26..1f8634cc1ab3c 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_class.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_class.rs @@ -1,9 +1,10 @@ +use ruff_text_size::{TextLen, TextRange}; +use rustpython_parser::ast::Ranged; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::newlines::{StrExt, UniversalNewlineIterator}; use ruff_python_semantic::definition::{Definition, Member, MemberKind}; -use ruff_text_size::{TextLen, TextRange}; -use rustpython_parser::ast::Ranged; use crate::checkers::ast::Checker; use crate::docstrings::Docstring; @@ -67,11 +68,7 @@ pub(crate) fn blank_before_after_class(checker: &mut Checker, docstring: &Docstr return; }; - if checker - .settings - .rules - .enabled(Rule::OneBlankLineBeforeClass) - || checker.settings.rules.enabled(Rule::BlankLineBeforeClass) + if checker.enabled(Rule::OneBlankLineBeforeClass) || checker.enabled(Rule::BlankLineBeforeClass) { let before = checker .locator @@ -90,7 +87,7 @@ pub(crate) fn blank_before_after_class(checker: &mut Checker, docstring: &Docstr } } - if checker.settings.rules.enabled(Rule::BlankLineBeforeClass) { + if checker.enabled(Rule::BlankLineBeforeClass) { if blank_lines_before != 0 { let mut diagnostic = Diagnostic::new( BlankLineBeforeClass { @@ -109,11 +106,7 @@ pub(crate) fn blank_before_after_class(checker: &mut Checker, docstring: &Docstr checker.diagnostics.push(diagnostic); } } - if checker - .settings - .rules - .enabled(Rule::OneBlankLineBeforeClass) - { + if checker.enabled(Rule::OneBlankLineBeforeClass) { if blank_lines_before != 1 { let mut diagnostic = Diagnostic::new( OneBlankLineBeforeClass { @@ -135,7 +128,7 @@ pub(crate) fn blank_before_after_class(checker: &mut Checker, docstring: &Docstr } } - if checker.settings.rules.enabled(Rule::OneBlankLineAfterClass) { + if checker.enabled(Rule::OneBlankLineAfterClass) { let after = checker .locator .slice(TextRange::new(docstring.end(), stmt.end())); diff --git a/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_function.rs b/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_function.rs index 41469ec9d37dc..c72b882d41ef4 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_function.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/blank_before_after_function.rs @@ -59,11 +59,7 @@ pub(crate) fn blank_before_after_function(checker: &mut Checker, docstring: &Doc return; }; - if checker - .settings - .rules - .enabled(Rule::NoBlankLineBeforeFunction) - { + if checker.enabled(Rule::NoBlankLineBeforeFunction) { let before = checker .locator .slice(TextRange::new(stmt.start(), docstring.start())); @@ -100,11 +96,7 @@ pub(crate) fn blank_before_after_function(checker: &mut Checker, docstring: &Doc } } - if checker - .settings - .rules - .enabled(Rule::NoBlankLineAfterFunction) - { + if checker.enabled(Rule::NoBlankLineAfterFunction) { let after = checker .locator .slice(TextRange::new(docstring.end(), stmt.end())); diff --git a/crates/ruff/src/rules/pydocstyle/rules/if_needed.rs b/crates/ruff/src/rules/pydocstyle/rules/if_needed.rs index 87cf83fee442b..1ef2016d5ae0f 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/if_needed.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/if_needed.rs @@ -27,7 +27,7 @@ pub(crate) fn if_needed(checker: &mut Checker, docstring: &Docstring) { }) = docstring.definition else { return; }; - if !is_overload(&checker.ctx, cast::decorator_list(stmt)) { + if !is_overload(checker.semantic_model(), cast::decorator_list(stmt)) { return; } checker.diagnostics.push(Diagnostic::new( diff --git a/crates/ruff/src/rules/pydocstyle/rules/indent.rs b/crates/ruff/src/rules/pydocstyle/rules/indent.rs index e76c3bf420ec2..0cec2598d5aab 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/indent.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/indent.rs @@ -1,9 +1,10 @@ +use ruff_text_size::{TextLen, TextRange}; + use ruff_diagnostics::{AlwaysAutofixableViolation, Violation}; use ruff_diagnostics::{Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::newlines::NewlineWithTrailingNewline; use ruff_python_ast::whitespace; -use ruff_text_size::{TextLen, TextRange}; use crate::checkers::ast::Checker; use crate::docstrings::Docstring; @@ -81,7 +82,7 @@ pub(crate) fn indent(checker: &mut Checker, docstring: &Docstring) { // yet. has_seen_tab = has_seen_tab || line_indent.contains('\t'); - if checker.settings.rules.enabled(Rule::UnderIndentation) { + if checker.enabled(Rule::UnderIndentation) { // We report under-indentation on every line. This isn't great, but enables // autofix. if (i == lines.len() - 1 || !is_blank) @@ -115,7 +116,7 @@ pub(crate) fn indent(checker: &mut Checker, docstring: &Docstring) { } } - if checker.settings.rules.enabled(Rule::IndentWithSpaces) { + if checker.enabled(Rule::IndentWithSpaces) { if has_seen_tab { checker .diagnostics @@ -123,7 +124,7 @@ pub(crate) fn indent(checker: &mut Checker, docstring: &Docstring) { } } - if checker.settings.rules.enabled(Rule::OverIndentation) { + if checker.enabled(Rule::OverIndentation) { // If every line (except the last) is over-indented... if is_over_indented { for over_indented in over_indented_lines { diff --git a/crates/ruff/src/rules/pydocstyle/rules/multi_line_summary_start.rs b/crates/ruff/src/rules/pydocstyle/rules/multi_line_summary_start.rs index c1ee8cf9a4188..80c260c5aa14e 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/multi_line_summary_start.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/multi_line_summary_start.rs @@ -60,11 +60,7 @@ pub(crate) fn multi_line_summary_start(checker: &mut Checker, docstring: &Docstr }; if is_triple_quote(&first_line) { - if checker - .settings - .rules - .enabled(Rule::MultiLineSummaryFirstLine) - { + if checker.enabled(Rule::MultiLineSummaryFirstLine) { let mut diagnostic = Diagnostic::new(MultiLineSummaryFirstLine, docstring.range()); if checker.patch(diagnostic.kind.rule()) { // Delete until first non-whitespace char. @@ -82,11 +78,7 @@ pub(crate) fn multi_line_summary_start(checker: &mut Checker, docstring: &Docstr checker.diagnostics.push(diagnostic); } } else { - if checker - .settings - .rules - .enabled(Rule::MultiLineSummarySecondLine) - { + if checker.enabled(Rule::MultiLineSummarySecondLine) { let mut diagnostic = Diagnostic::new(MultiLineSummarySecondLine, docstring.range()); if checker.patch(diagnostic.kind.rule()) { let mut indentation = String::from(docstring.indentation); diff --git a/crates/ruff/src/rules/pydocstyle/rules/newline_after_last_paragraph.rs b/crates/ruff/src/rules/pydocstyle/rules/newline_after_last_paragraph.rs index 34f3cf1047da4..2461d4df89d0b 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/newline_after_last_paragraph.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/newline_after_last_paragraph.rs @@ -1,9 +1,10 @@ +use ruff_text_size::{TextLen, TextSize}; +use rustpython_parser::ast::Ranged; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::newlines::{NewlineWithTrailingNewline, StrExt}; use ruff_python_ast::whitespace; -use ruff_text_size::{TextLen, TextSize}; -use rustpython_parser::ast::Ranged; use crate::checkers::ast::Checker; use crate::docstrings::Docstring; diff --git a/crates/ruff/src/rules/pydocstyle/rules/no_surrounding_whitespace.rs b/crates/ruff/src/rules/pydocstyle/rules/no_surrounding_whitespace.rs index ea2e61a8b4a1b..8bd1682ffcfe1 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/no_surrounding_whitespace.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/no_surrounding_whitespace.rs @@ -1,7 +1,8 @@ +use ruff_text_size::{TextLen, TextRange}; + use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::newlines::NewlineWithTrailingNewline; -use ruff_text_size::{TextLen, TextRange}; use crate::checkers::ast::Checker; use crate::docstrings::Docstring; diff --git a/crates/ruff/src/rules/pydocstyle/rules/non_imperative_mood.rs b/crates/ruff/src/rules/pydocstyle/rules/non_imperative_mood.rs index 39ec77b9ecc7f..a3df27817f36d 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/non_imperative_mood.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/non_imperative_mood.rs @@ -41,7 +41,7 @@ pub(crate) fn non_imperative_mood( if is_test(cast::name(stmt)) || is_property( - &checker.ctx, + checker.semantic_model(), cast::decorator_list(stmt), &property_decorators, ) diff --git a/crates/ruff/src/rules/pydocstyle/rules/not_empty.rs b/crates/ruff/src/rules/pydocstyle/rules/not_empty.rs index 804bea5ee06b6..c349336e7c1a6 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/not_empty.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/not_empty.rs @@ -21,7 +21,7 @@ pub(crate) fn not_empty(checker: &mut Checker, docstring: &Docstring) -> bool { return true; } - if checker.settings.rules.enabled(Rule::EmptyDocstring) { + if checker.enabled(Rule::EmptyDocstring) { checker .diagnostics .push(Diagnostic::new(EmptyDocstring, docstring.range())); diff --git a/crates/ruff/src/rules/pydocstyle/rules/not_missing.rs b/crates/ruff/src/rules/pydocstyle/rules/not_missing.rs index 82d29685c4a70..a4ecde3e008fc 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/not_missing.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/not_missing.rs @@ -1,3 +1,5 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::cast; @@ -6,10 +8,8 @@ use ruff_python_semantic::analyze::visibility::{ is_call, is_init, is_magic, is_new, is_overload, is_override, Visibility, }; use ruff_python_semantic::definition::{Definition, Member, MemberKind, Module, ModuleKind}; -use ruff_text_size::TextRange; use crate::checkers::ast::Checker; - use crate::registry::Rule; #[violation] @@ -107,11 +107,7 @@ pub(crate) fn not_missing( kind: ModuleKind::Module, .. }) => { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicModule) - { + if checker.enabled(Rule::UndocumentedPublicModule) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicModule, TextRange::default(), @@ -123,11 +119,7 @@ pub(crate) fn not_missing( kind: ModuleKind::Package, .. }) => { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicPackage) - { + if checker.enabled(Rule::UndocumentedPublicPackage) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicPackage, TextRange::default(), @@ -140,11 +132,7 @@ pub(crate) fn not_missing( stmt, .. }) => { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicClass) - { + if checker.enabled(Rule::UndocumentedPublicClass) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicClass, identifier_range(stmt, checker.locator), @@ -157,11 +145,7 @@ pub(crate) fn not_missing( stmt, .. }) => { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicNestedClass) - { + if checker.enabled(Rule::UndocumentedPublicNestedClass) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicNestedClass, identifier_range(stmt, checker.locator), @@ -174,14 +158,10 @@ pub(crate) fn not_missing( stmt, .. }) => { - if is_overload(&checker.ctx, cast::decorator_list(stmt)) { + if is_overload(checker.semantic_model(), cast::decorator_list(stmt)) { true } else { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicFunction) - { + if checker.enabled(Rule::UndocumentedPublicFunction) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicFunction, identifier_range(stmt, checker.locator), @@ -195,12 +175,12 @@ pub(crate) fn not_missing( stmt, .. }) => { - if is_overload(&checker.ctx, cast::decorator_list(stmt)) - || is_override(&checker.ctx, cast::decorator_list(stmt)) + if is_overload(checker.semantic_model(), cast::decorator_list(stmt)) + || is_override(checker.semantic_model(), cast::decorator_list(stmt)) { true } else if is_init(cast::name(stmt)) { - if checker.settings.rules.enabled(Rule::UndocumentedPublicInit) { + if checker.enabled(Rule::UndocumentedPublicInit) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicInit, identifier_range(stmt, checker.locator), @@ -208,11 +188,7 @@ pub(crate) fn not_missing( } true } else if is_new(cast::name(stmt)) || is_call(cast::name(stmt)) { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicMethod) - { + if checker.enabled(Rule::UndocumentedPublicMethod) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicMethod, identifier_range(stmt, checker.locator), @@ -220,11 +196,7 @@ pub(crate) fn not_missing( } true } else if is_magic(cast::name(stmt)) { - if checker - .settings - .rules - .enabled(Rule::UndocumentedMagicMethod) - { + if checker.enabled(Rule::UndocumentedMagicMethod) { checker.diagnostics.push(Diagnostic::new( UndocumentedMagicMethod, identifier_range(stmt, checker.locator), @@ -232,11 +204,7 @@ pub(crate) fn not_missing( } true } else { - if checker - .settings - .rules - .enabled(Rule::UndocumentedPublicMethod) - { + if checker.enabled(Rule::UndocumentedPublicMethod) { checker.diagnostics.push(Diagnostic::new( UndocumentedPublicMethod, identifier_range(stmt, checker.locator), diff --git a/crates/ruff/src/rules/pydocstyle/rules/sections.rs b/crates/ruff/src/rules/pydocstyle/rules/sections.rs index 2f4b5adc5d63d..73099280b29d7 100644 --- a/crates/ruff/src/rules/pydocstyle/rules/sections.rs +++ b/crates/ruff/src/rules/pydocstyle/rules/sections.rs @@ -363,11 +363,7 @@ fn blanks_and_section_underline( if dash_line_found { if blank_lines_after_header > 0 { - if checker - .settings - .rules - .enabled(Rule::SectionUnderlineAfterName) - { + if checker.enabled(Rule::SectionUnderlineAfterName) { let mut diagnostic = Diagnostic::new( SectionUnderlineAfterName { name: context.section_name().to_string(), @@ -392,11 +388,7 @@ fn blanks_and_section_underline( .count() != context.section_name().len() { - if checker - .settings - .rules - .enabled(Rule::SectionUnderlineMatchesSectionLength) - { + if checker.enabled(Rule::SectionUnderlineMatchesSectionLength) { let mut diagnostic = Diagnostic::new( SectionUnderlineMatchesSectionLength { name: context.section_name().to_string(), @@ -422,11 +414,7 @@ fn blanks_and_section_underline( } } - if checker - .settings - .rules - .enabled(Rule::SectionUnderlineNotOverIndented) - { + if checker.enabled(Rule::SectionUnderlineNotOverIndented) { let leading_space = whitespace::leading_space(&non_blank_line); if leading_space.len() > docstring.indentation.len() { let mut diagnostic = Diagnostic::new( @@ -465,7 +453,7 @@ fn blanks_and_section_underline( } if following_lines.peek().is_none() { - if checker.settings.rules.enabled(Rule::EmptyDocstringSection) { + if checker.enabled(Rule::EmptyDocstringSection) { checker.diagnostics.push(Diagnostic::new( EmptyDocstringSection { name: context.section_name().to_string(), @@ -473,11 +461,7 @@ fn blanks_and_section_underline( docstring.range(), )); } - } else if checker - .settings - .rules - .enabled(Rule::BlankLinesBetweenHeaderAndContent) - { + } else if checker.enabled(Rule::BlankLinesBetweenHeaderAndContent) { let mut diagnostic = Diagnostic::new( BlankLinesBetweenHeaderAndContent { name: context.section_name().to_string(), @@ -496,7 +480,7 @@ fn blanks_and_section_underline( } } } else { - if checker.settings.rules.enabled(Rule::EmptyDocstringSection) { + if checker.enabled(Rule::EmptyDocstringSection) { checker.diagnostics.push(Diagnostic::new( EmptyDocstringSection { name: context.section_name().to_string(), @@ -506,11 +490,7 @@ fn blanks_and_section_underline( } } } else { - if checker - .settings - .rules - .enabled(Rule::DashedUnderlineAfterSection) - { + if checker.enabled(Rule::DashedUnderlineAfterSection) { let mut diagnostic = Diagnostic::new( DashedUnderlineAfterSection { name: context.section_name().to_string(), @@ -534,11 +514,7 @@ fn blanks_and_section_underline( checker.diagnostics.push(diagnostic); } if blank_lines_after_header > 0 { - if checker - .settings - .rules - .enabled(Rule::BlankLinesBetweenHeaderAndContent) - { + if checker.enabled(Rule::BlankLinesBetweenHeaderAndContent) { let mut diagnostic = Diagnostic::new( BlankLinesBetweenHeaderAndContent { name: context.section_name().to_string(), @@ -559,11 +535,7 @@ fn blanks_and_section_underline( } // Nothing but blank lines after the section header. else { - if checker - .settings - .rules - .enabled(Rule::DashedUnderlineAfterSection) - { + if checker.enabled(Rule::DashedUnderlineAfterSection) { let mut diagnostic = Diagnostic::new( DashedUnderlineAfterSection { name: context.section_name().to_string(), @@ -587,7 +559,7 @@ fn blanks_and_section_underline( } checker.diagnostics.push(diagnostic); } - if checker.settings.rules.enabled(Rule::EmptyDocstringSection) { + if checker.enabled(Rule::EmptyDocstringSection) { checker.diagnostics.push(Diagnostic::new( EmptyDocstringSection { name: context.section_name().to_string(), @@ -604,7 +576,7 @@ fn common_section( context: &SectionContext, next: Option<&SectionContext>, ) { - if checker.settings.rules.enabled(Rule::CapitalizeSectionName) { + if checker.enabled(Rule::CapitalizeSectionName) { let capitalized_section_name = context.kind().as_str(); if context.section_name() != capitalized_section_name { let mut diagnostic = Diagnostic::new( @@ -627,7 +599,7 @@ fn common_section( } } - if checker.settings.rules.enabled(Rule::SectionNotOverIndented) { + if checker.enabled(Rule::SectionNotOverIndented) { let leading_space = whitespace::leading_space(context.summary_line()); if leading_space.len() > docstring.indentation.len() { let mut diagnostic = Diagnostic::new( @@ -656,11 +628,7 @@ fn common_section( let last_line = context.following_lines().last(); if last_line.map_or(true, |line| !line.trim().is_empty()) { if let Some(next) = next { - if checker - .settings - .rules - .enabled(Rule::NoBlankLineAfterSection) - { + if checker.enabled(Rule::NoBlankLineAfterSection) { let mut diagnostic = Diagnostic::new( NoBlankLineAfterSection { name: context.section_name().to_string(), @@ -678,11 +646,7 @@ fn common_section( checker.diagnostics.push(diagnostic); } } else { - if checker - .settings - .rules - .enabled(Rule::BlankLineAfterLastSection) - { + if checker.enabled(Rule::BlankLineAfterLastSection) { let mut diagnostic = Diagnostic::new( BlankLineAfterLastSection { name: context.section_name().to_string(), @@ -702,11 +666,7 @@ fn common_section( } } - if checker - .settings - .rules - .enabled(Rule::NoBlankLineBeforeSection) - { + if checker.enabled(Rule::NoBlankLineBeforeSection) { if !context.previous_line().map_or(false, str::is_empty) { let mut diagnostic = Diagnostic::new( NoBlankLineBeforeSection { @@ -760,7 +720,7 @@ fn missing_args(checker: &mut Checker, docstring: &Docstring, docstrings_args: & // If this is a non-static method, skip `cls` or `self`. usize::from( docstring.definition.is_method() - && !is_staticmethod(&checker.ctx, cast::decorator_list(stmt)), + && !is_staticmethod(checker.semantic_model(), cast::decorator_list(stmt)), ), ) { @@ -904,11 +864,7 @@ fn numpy_section( ) { common_section(checker, docstring, context, next); - if checker - .settings - .rules - .enabled(Rule::NewLineAfterSectionName) - { + if checker.enabled(Rule::NewLineAfterSectionName) { let suffix = context.summary_after_section_name(); if !suffix.is_empty() { @@ -931,7 +887,7 @@ fn numpy_section( } } - if checker.settings.rules.enabled(Rule::UndocumentedParam) { + if checker.enabled(Rule::UndocumentedParam) { if matches!(context.kind(), SectionKind::Parameters) { parameters_section(checker, docstring, context); } @@ -946,7 +902,7 @@ fn google_section( ) { common_section(checker, docstring, context, next); - if checker.settings.rules.enabled(Rule::SectionNameEndsInColon) { + if checker.enabled(Rule::SectionNameEndsInColon) { let suffix = context.summary_after_section_name(); if suffix != ":" { let mut diagnostic = Diagnostic::new( @@ -990,7 +946,7 @@ fn parse_google_sections( google_section(checker, docstring, &context, iterator.peek()); } - if checker.settings.rules.enabled(Rule::UndocumentedParam) { + if checker.enabled(Rule::UndocumentedParam) { let mut has_args = false; let mut documented_args: FxHashSet = FxHashSet::default(); for section_context in section_contexts { diff --git a/crates/ruff/src/rules/pydocstyle/settings.rs b/crates/ruff/src/rules/pydocstyle/settings.rs index e3ce5bd399f8f..1ec4a9a7fc3b2 100644 --- a/crates/ruff/src/rules/pydocstyle/settings.rs +++ b/crates/ruff/src/rules/pydocstyle/settings.rs @@ -1,10 +1,13 @@ //! Settings for the `pydocstyle` plugin. -use crate::registry::Rule; -use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; -use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; +use serde::{Deserialize, Serialize}; + +use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; + +use crate::registry::Rule; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, CacheKey)] #[serde(deny_unknown_fields, rename_all = "kebab-case")] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] diff --git a/crates/ruff/src/rules/pyflakes/fixes.rs b/crates/ruff/src/rules/pyflakes/fixes.rs index 904eba68e7bf1..6bc0e2aae3b78 100644 --- a/crates/ruff/src/rules/pyflakes/fixes.rs +++ b/crates/ruff/src/rules/pyflakes/fixes.rs @@ -1,18 +1,18 @@ use anyhow::{bail, Ok, Result}; use libcst_native::{Codegen, CodegenState, DictElement, Expression}; use ruff_text_size::TextRange; +use rustpython_format::{ + FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate, +}; use rustpython_parser::ast::{Excepthandler, Expr, Ranged}; use rustpython_parser::{lexer, Mode, Tok}; use ruff_diagnostics::Edit; use ruff_python_ast::source_code::{Locator, Stylist}; use ruff_python_ast::str::raw_contents; -use rustpython_format::{ - FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate, -}; use crate::cst::matchers::{ - match_attribute, match_call, match_dict, match_expression, match_simple_string, + match_attribute, match_call_mut, match_dict, match_expression, match_simple_string, }; /// Generate a [`Edit`] to remove unused keys from format dict. @@ -52,7 +52,7 @@ pub(crate) fn remove_unused_keyword_arguments_from_format_call( ) -> Result { let module_text = locator.slice(location); let mut tree = match_expression(module_text)?; - let call = match_call(&mut tree)?; + let call = match_call_mut(&mut tree)?; call.args .retain(|e| !matches!(&e.keyword, Some(kw) if unused_arguments.contains(&kw.value))); @@ -135,7 +135,7 @@ pub(crate) fn remove_unused_positional_arguments_from_format_call( ) -> Result { let module_text = locator.slice(location); let mut tree = match_expression(module_text)?; - let call = match_call(&mut tree)?; + let call = match_call_mut(&mut tree)?; let mut index = 0; call.args.retain(|_| { diff --git a/crates/ruff/src/rules/pyflakes/mod.rs b/crates/ruff/src/rules/pyflakes/mod.rs index 648f93ba118e0..052c759047e87 100644 --- a/crates/ruff/src/rules/pyflakes/mod.rs +++ b/crates/ruff/src/rules/pyflakes/mod.rs @@ -9,13 +9,12 @@ mod tests { use std::path::Path; use anyhow::Result; - use regex::Regex; - use ruff_diagnostics::Diagnostic; use rustpython_parser::lexer::LexResult; use test_case::test_case; use textwrap::dedent; + use ruff_diagnostics::Diagnostic; use ruff_python_ast::source_code::{Indexer, Locator, Stylist}; use crate::linter::{check_path, LinterResult}; @@ -36,6 +35,9 @@ mod tests { #[test_case(Rule::UnusedImport, Path::new("F401_9.py"); "F401_9")] #[test_case(Rule::UnusedImport, Path::new("F401_10.py"); "F401_10")] #[test_case(Rule::UnusedImport, Path::new("F401_11.py"); "F401_11")] + #[test_case(Rule::UnusedImport, Path::new("F401_12.py"); "F401_12")] + #[test_case(Rule::UnusedImport, Path::new("F401_13.py"); "F401_13")] + #[test_case(Rule::UnusedImport, Path::new("F401_14.py"); "F401_14")] #[test_case(Rule::ImportShadowedByLoopVar, Path::new("F402.py"); "F402")] #[test_case(Rule::UndefinedLocalWithImportStar, Path::new("F403.py"); "F403")] #[test_case(Rule::LateFutureImport, Path::new("F404.py"); "F404")] @@ -97,6 +99,8 @@ mod tests { #[test_case(Rule::RedefinedWhileUnused, Path::new("F811_20.py"); "F811_20")] #[test_case(Rule::RedefinedWhileUnused, Path::new("F811_21.py"); "F811_21")] #[test_case(Rule::RedefinedWhileUnused, Path::new("F811_22.py"); "F811_22")] + #[test_case(Rule::RedefinedWhileUnused, Path::new("F811_23.py"); "F811_23")] + #[test_case(Rule::RedefinedWhileUnused, Path::new("F811_24.py"); "F811_24")] #[test_case(Rule::UndefinedName, Path::new("F821_0.py"); "F821_0")] #[test_case(Rule::UndefinedName, Path::new("F821_1.py"); "F821_1")] #[test_case(Rule::UndefinedName, Path::new("F821_2.py"); "F821_2")] @@ -470,6 +474,16 @@ mod tests { "#, &[Rule::UndefinedName], ); + flakes( + r#" + def f(): + __qualname__ = 1 + + class Foo: + __qualname__ + "#, + &[Rule::UnusedVariable], + ); } #[test] @@ -1148,6 +1162,40 @@ mod tests { "#, &[], ); + flakes( + r#" + class Test(object): + print(__class__.__name__) + + def __init__(self): + self.x = 1 + + t = Test() + "#, + &[Rule::UndefinedName], + ); + flakes( + r#" + class Test(object): + X = [__class__ for _ in range(10)] + + def __init__(self): + self.x = 1 + + t = Test() + "#, + &[Rule::UndefinedName], + ); + flakes( + r#" + def f(self): + print(__class__.__name__) + self.x = 1 + + f() + "#, + &[Rule::UndefinedName], + ); } /// See: @@ -3643,6 +3691,16 @@ mod tests { "#, &[], ); + flakes( + r#" + from typing import NewType + + def f(): + name = "x" + NewType(name, int) + "#, + &[], + ); } #[test] diff --git a/crates/ruff/src/rules/pyflakes/rules/imports.rs b/crates/ruff/src/rules/pyflakes/rules/imports.rs index f7651469ab8a4..373beefc6bb9d 100644 --- a/crates/ruff/src/rules/pyflakes/rules/imports.rs +++ b/crates/ruff/src/rules/pyflakes/rules/imports.rs @@ -10,16 +10,54 @@ use ruff_python_stdlib::future::ALL_FEATURE_NAMES; use crate::checkers::ast::Checker; #[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub enum UnusedImportContext { +pub(crate) enum UnusedImportContext { ExceptHandler, Init, } +/// ## What it does +/// Checks for unused imports. +/// +/// ## Why is this bad? +/// Unused imports add a performance overhead at runtime, and risk creating +/// import cycles. They also increase the cognitive load of reading the code. +/// +/// If an import statement is used to check for the availability or existence +/// of a module, consider using `importlib.util.find_spec` instead. +/// +/// ## Example +/// ```python +/// import numpy as np # unused import +/// +/// +/// def area(radius): +/// return 3.14 * radius**2 +/// ``` +/// +/// Use instead: +/// ```python +/// def area(radius): +/// return 3.14 * radius**2 +/// ``` +/// +/// To check the availability of a module, use `importlib.util.find_spec`: +/// ```python +/// from importlib.util import find_spec +/// +/// if find_spec("numpy") is not None: +/// print("numpy is installed") +/// else: +/// print("numpy is not installed") +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/simple_stmts.html#the-import-statement) +/// - [Python documentation](https://docs.python.org/3/library/importlib.html#importlib.util.find_spec) #[violation] pub struct UnusedImport { - pub name: String, - pub context: Option, - pub multiple: bool, + pub(crate) name: String, + pub(crate) context: Option, + pub(crate) multiple: bool, } impl Violation for UnusedImport { @@ -54,10 +92,38 @@ impl Violation for UnusedImport { }) } } + +/// ## What it does +/// Checks for import bindings that are shadowed by loop variables. +/// +/// ## Why is this bad? +/// Shadowing an import with loop variables makes the code harder to read and +/// reason about, as the identify of the imported binding is no longer clear. +/// It's also often indicative of a mistake, as it's unlikely that the loop +/// variable is intended to be used as the imported binding. +/// +/// Consider using a different name for the loop variable. +/// +/// ## Example +/// ```python +/// from os import path +/// +/// for path in files: +/// print(path) +/// ``` +/// +/// Use instead: +/// ```python +/// from os import path +/// +/// +/// for filename in files: +/// print(filename) +/// ``` #[violation] pub struct ImportShadowedByLoopVar { - pub name: String, - pub line: OneIndexed, + pub(crate) name: String, + pub(crate) line: OneIndexed, } impl Violation for ImportShadowedByLoopVar { @@ -68,9 +134,37 @@ impl Violation for ImportShadowedByLoopVar { } } +/// ## What it does +/// Checks for the use of wildcard imports. +/// +/// ## Why is this bad? +/// Wildcard imports (e.g., `from module import *`) make it hard to determine +/// which symbols are available in the current namespace, and from which module +/// they were imported. +/// +/// ## Example +/// ```python +/// from math import * +/// +/// +/// def area(radius): +/// return pi * radius**2 +/// ``` +/// +/// Use instead: +/// ```python +/// from math import pi +/// +/// +/// def area(radius): +/// return pi * radius**2 +/// ``` +/// +/// ## References +/// - [PEP 8](https://peps.python.org/pep-0008/#imports) #[violation] pub struct UndefinedLocalWithImportStar { - pub name: String, + pub(crate) name: String, } impl Violation for UndefinedLocalWithImportStar { @@ -81,6 +175,31 @@ impl Violation for UndefinedLocalWithImportStar { } } +/// ## What it does +/// Checks for `__future__` imports that are not located at the beginning of a +/// file. +/// +/// ## Why is this bad? +/// Imports from `__future__` must be placed the beginning of the file, before any +/// other statements (apart from docstrings). The use of `__future__` imports +/// elsewhere is invalid and will result in a `SyntaxError`. +/// +/// ## Example +/// ```python +/// from pathlib import Path +/// +/// from __future__ import annotations +/// ``` +/// +/// Use instead: +/// ```python +/// from __future__ import annotations +/// +/// from pathlib import Path +/// ``` +/// +/// ## References +/// - [PEP 8](https://peps.python.org/pep-0008/#module-level-dunder-names) #[violation] pub struct LateFutureImport; @@ -91,10 +210,47 @@ impl Violation for LateFutureImport { } } +/// ## What it does +/// Checks for names that might be undefined, but may also be defined in a +/// wildcard import. +/// +/// ## Why is this bad? +/// Wildcard imports (e.g., `from module import *`) make it hard to determine +/// which symbols are available in the current namespace. If a module contains +/// a wildcard import, and a name in the current namespace has not been +/// explicitly defined or imported, then it's unclear whether the name is +/// undefined or was imported by the wildcard import. +/// +/// If the name _is_ defined in via a wildcard import, that member should be +/// imported explicitly to avoid confusion. +/// +/// If the name is _not_ defined in a wildcard import, it should be defined or +/// imported. +/// +/// ## Example +/// ```python +/// from math import * +/// +/// +/// def area(radius): +/// return pi * radius**2 +/// ``` +/// +/// Use instead: +/// ```python +/// from math import pi +/// +/// +/// def area(radius): +/// return pi * radius**2 +/// ``` +/// +/// ## References +/// - [PEP 8](https://peps.python.org/pep-0008/#imports) #[violation] pub struct UndefinedLocalWithImportStarUsage { - pub name: String, - pub sources: Vec, + pub(crate) name: String, + pub(crate) sources: Vec, } impl Violation for UndefinedLocalWithImportStarUsage { @@ -109,9 +265,36 @@ impl Violation for UndefinedLocalWithImportStarUsage { } } +/// ## What it does +/// Check for the use of wildcard imports outside of the module namespace. +/// +/// ## Why is this bad? +/// The use of wildcard imports outside of the module namespace (e.g., within +/// functions) can lead to confusion, as the import can shadow local variables. +/// +/// Though wildcard imports are discouraged, when necessary, they should be placed +/// in the module namespace (i.e., at the top-level of a module). +/// +/// ## Example +/// ```python +/// def foo(): +/// from math import * +/// ``` +/// +/// Use instead: +/// ```python +/// from math import * +/// +/// +/// def foo(): +/// ... +/// ``` +/// +/// ## References +/// - [PEP 8](https://peps.python.org/pep-0008/#imports) #[violation] pub struct UndefinedLocalWithNestedImportStarUsage { - pub name: String, + pub(crate) name: String, } impl Violation for UndefinedLocalWithNestedImportStarUsage { @@ -122,6 +305,16 @@ impl Violation for UndefinedLocalWithNestedImportStarUsage { } } +/// ## What it does +/// Checks for `__future__` imports that are not defined in the current Python +/// version. +/// +/// ## Why is this bad? +/// Importing undefined or unsupported members from the `__future__` module is +/// a `SyntaxError`. +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/__future__.html) #[violation] pub struct FutureFeatureNotDefined { name: String, diff --git a/crates/ruff/src/rules/pyflakes/rules/invalid_print_syntax.rs b/crates/ruff/src/rules/pyflakes/rules/invalid_print_syntax.rs index 7e47e176f6871..96c9996971144 100644 --- a/crates/ruff/src/rules/pyflakes/rules/invalid_print_syntax.rs +++ b/crates/ruff/src/rules/pyflakes/rules/invalid_print_syntax.rs @@ -23,7 +23,7 @@ pub(crate) fn invalid_print_syntax(checker: &mut Checker, left: &Expr) { if id != "print" { return; } - if !checker.ctx.is_builtin("print") { + if !checker.semantic_model().is_builtin("print") { return; }; checker diff --git a/crates/ruff/src/rules/pyflakes/rules/repeated_keys.rs b/crates/ruff/src/rules/pyflakes/rules/repeated_keys.rs index 85dbf62dcbdc5..b36393ae45159 100644 --- a/crates/ruff/src/rules/pyflakes/rules/repeated_keys.rs +++ b/crates/ruff/src/rules/pyflakes/rules/repeated_keys.rs @@ -96,11 +96,7 @@ pub(crate) fn repeated_keys(checker: &mut Checker, keys: &[Option], values if let Some(seen_values) = seen.get_mut(&dict_key) { match dict_key { DictionaryKey::Constant(..) => { - if checker - .settings - .rules - .enabled(Rule::MultiValueRepeatedKeyLiteral) - { + if checker.enabled(Rule::MultiValueRepeatedKeyLiteral) { let comparable_value: ComparableExpr = (&values[i]).into(); let is_duplicate_value = seen_values.contains(&comparable_value); let mut diagnostic = Diagnostic::new( @@ -125,11 +121,7 @@ pub(crate) fn repeated_keys(checker: &mut Checker, keys: &[Option], values } } DictionaryKey::Variable(dict_key) => { - if checker - .settings - .rules - .enabled(Rule::MultiValueRepeatedKeyVariable) - { + if checker.enabled(Rule::MultiValueRepeatedKeyVariable) { let comparable_value: ComparableExpr = (&values[i]).into(); let is_duplicate_value = seen_values.contains(&comparable_value); let mut diagnostic = Diagnostic::new( diff --git a/crates/ruff/src/rules/pyflakes/rules/return_outside_function.rs b/crates/ruff/src/rules/pyflakes/rules/return_outside_function.rs index 41cecdc571e07..d88d08df0f16e 100644 --- a/crates/ruff/src/rules/pyflakes/rules/return_outside_function.rs +++ b/crates/ruff/src/rules/pyflakes/rules/return_outside_function.rs @@ -18,7 +18,7 @@ impl Violation for ReturnOutsideFunction { pub(crate) fn return_outside_function(checker: &mut Checker, stmt: &Stmt) { if matches!( - checker.ctx.scope().kind, + checker.semantic_model().scope().kind, ScopeKind::Class(_) | ScopeKind::Module ) { checker diff --git a/crates/ruff/src/rules/pyflakes/rules/strings.rs b/crates/ruff/src/rules/pyflakes/rules/strings.rs index 232401293734a..34df8a937fbb2 100644 --- a/crates/ruff/src/rules/pyflakes/rules/strings.rs +++ b/crates/ruff/src/rules/pyflakes/rules/strings.rs @@ -1,6 +1,6 @@ -use ruff_text_size::TextRange; use std::string::ToString; +use ruff_text_size::TextRange; use rustc_hash::FxHashSet; use rustpython_parser::ast::{self, Constant, Expr, Identifier, Keyword}; diff --git a/crates/ruff/src/rules/pyflakes/rules/undefined_export.rs b/crates/ruff/src/rules/pyflakes/rules/undefined_export.rs index 1ad222cbf1573..bce55367fcf93 100644 --- a/crates/ruff/src/rules/pyflakes/rules/undefined_export.rs +++ b/crates/ruff/src/rules/pyflakes/rules/undefined_export.rs @@ -1,7 +1,8 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_semantic::scope::Scope; -use ruff_text_size::TextRange; #[violation] pub struct UndefinedExport { diff --git a/crates/ruff/src/rules/pyflakes/rules/undefined_local.rs b/crates/ruff/src/rules/pyflakes/rules/undefined_local.rs index 1d7a4b868f793..0a8ccaaede54a 100644 --- a/crates/ruff/src/rules/pyflakes/rules/undefined_local.rs +++ b/crates/ruff/src/rules/pyflakes/rules/undefined_local.rs @@ -21,7 +21,7 @@ impl Violation for UndefinedLocal { /// F823 pub(crate) fn undefined_local(checker: &mut Checker, name: &str) { // If the name hasn't already been defined in the current scope... - let current = checker.ctx.scope(); + let current = checker.semantic_model().scope(); if !current.kind.is_function() || current.defines(name) { return; } @@ -31,26 +31,46 @@ pub(crate) fn undefined_local(checker: &mut Checker, name: &str) { }; // For every function and module scope above us... - for scope in checker.ctx.scopes.ancestors(parent) { - if !(scope.kind.is_function() || scope.kind.is_module()) { - continue; - } - - // If the name was defined in that scope... - if let Some(binding) = scope.get(name).map(|index| &checker.ctx.bindings[*index]) { - // And has already been accessed in the current scope... - if let Some((scope_id, location)) = binding.runtime_usage { - if scope_id == checker.ctx.scope_id { + let local_access = checker + .semantic_model() + .scopes + .ancestors(parent) + .find_map(|scope| { + if !(scope.kind.is_function() || scope.kind.is_module()) { + return None; + } + + // If the name was defined in that scope... + if let Some(binding) = scope + .get(name) + .map(|binding_id| &checker.semantic_model().bindings[binding_id]) + { + // And has already been accessed in the current scope... + if let Some(range) = binding.references().find_map(|reference_id| { + let reference = checker.semantic_model().references.resolve(reference_id); + if checker + .semantic_model() + .is_current_scope(reference.scope_id()) + { + Some(reference.range()) + } else { + None + } + }) { // Then it's probably an error. - checker.diagnostics.push(Diagnostic::new( - UndefinedLocal { - name: name.to_string(), - }, - location, - )); - return; + return Some(range); } } - } + + None + }); + + if let Some(location) = local_access { + checker.diagnostics.push(Diagnostic::new( + UndefinedLocal { + name: name.to_string(), + }, + location, + )); } } diff --git a/crates/ruff/src/rules/pyflakes/rules/unused_annotation.rs b/crates/ruff/src/rules/pyflakes/rules/unused_annotation.rs index b29e52bbcf7c3..265b463888130 100644 --- a/crates/ruff/src/rules/pyflakes/rules/unused_annotation.rs +++ b/crates/ruff/src/rules/pyflakes/rules/unused_annotation.rs @@ -19,21 +19,26 @@ impl Violation for UnusedAnnotation { /// F842 pub(crate) fn unused_annotation(checker: &mut Checker, scope: ScopeId) { - let scope = &checker.ctx.scopes[scope]; - for (name, binding) in scope + let scope = &checker.semantic_model().scopes[scope]; + + let bindings: Vec<_> = scope .bindings() - .map(|(name, index)| (name, &checker.ctx.bindings[*index])) - { - if !binding.used() - && binding.kind.is_annotation() - && !checker.settings.dummy_variable_rgx.is_match(name) - { - checker.diagnostics.push(Diagnostic::new( - UnusedAnnotation { - name: (*name).to_string(), - }, - binding.range, - )); - } + .filter_map(|(name, binding_id)| { + let binding = &checker.semantic_model().bindings[binding_id]; + if binding.kind.is_annotation() + && !binding.is_used() + && !checker.settings.dummy_variable_rgx.is_match(name) + { + Some((name.to_string(), binding.range)) + } else { + None + } + }) + .collect(); + + for (name, range) in bindings { + checker + .diagnostics + .push(Diagnostic::new(UnusedAnnotation { name }, range)); } } diff --git a/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs b/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs index 29944b1186aa6..f9d4ab1d39d95 100644 --- a/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs +++ b/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs @@ -204,7 +204,7 @@ fn remove_unused_variable( if let Some(target) = targets.iter().find(|target| range == target.range()) { if target.is_name_expr() { return if targets.len() > 1 - || contains_effect(value, |id| checker.ctx.is_builtin(id)) + || contains_effect(value, |id| checker.semantic_model().is_builtin(id)) { // If the expression is complex (`x = foo()`), remove the assignment, // but preserve the right-hand side. @@ -219,7 +219,7 @@ fn remove_unused_variable( )) } else { // If (e.g.) assigning to a constant (`x = 1`), delete the entire statement. - let parent = checker.ctx.stmts.parent(stmt); + let parent = checker.semantic_model().stmts.parent(stmt); let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); match delete_stmt( stmt, @@ -249,7 +249,7 @@ fn remove_unused_variable( }) = stmt { if target.is_name_expr() { - return if contains_effect(value, |id| checker.ctx.is_builtin(id)) { + return if contains_effect(value, |id| checker.semantic_model().is_builtin(id)) { // If the expression is complex (`x = foo()`), remove the assignment, // but preserve the right-hand side. #[allow(deprecated)] @@ -262,7 +262,7 @@ fn remove_unused_variable( )) } else { // If assigning to a constant (`x = 1`), delete the entire statement. - let parent = checker.ctx.stmts.parent(stmt); + let parent = checker.semantic_model().stmts.parent(stmt); let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); match delete_stmt( stmt, @@ -313,42 +313,45 @@ fn remove_unused_variable( /// F841 pub(crate) fn unused_variable(checker: &mut Checker, scope: ScopeId) { - let scope = &checker.ctx.scopes[scope]; + let scope = &checker.semantic_model().scopes[scope]; if scope.uses_locals && matches!(scope.kind, ScopeKind::Function(..)) { return; } - for (name, binding) in scope + let bindings: Vec<_> = scope .bindings() - .map(|(name, index)| (name, &checker.ctx.bindings[*index])) - { - if !binding.used() - && (binding.kind.is_assignment() || binding.kind.is_named_expr_assignment()) - && !checker.settings.dummy_variable_rgx.is_match(name) - && name != &"__tracebackhide__" - && name != &"__traceback_info__" - && name != &"__traceback_supplement__" - && name != &"__debuggerskip__" - { - let mut diagnostic = Diagnostic::new( - UnusedVariable { - name: (*name).to_string(), - }, - binding.range, - ); - if checker.patch(diagnostic.kind.rule()) { - if let Some(source) = binding.source { - let stmt = checker.ctx.stmts[source]; - if let Some((kind, fix)) = remove_unused_variable(stmt, binding.range, checker) - { - if matches!(kind, DeletionKind::Whole) { - checker.deletions.insert(RefEquality(stmt)); - } - diagnostic.set_fix(fix); + .map(|(name, binding_id)| (name, &checker.semantic_model().bindings[binding_id])) + .filter_map(|(name, binding)| { + if (binding.kind.is_assignment() || binding.kind.is_named_expr_assignment()) + && !binding.is_used() + && !checker.settings.dummy_variable_rgx.is_match(name) + && name != "__tracebackhide__" + && name != "__traceback_info__" + && name != "__traceback_supplement__" + && name != "__debuggerskip__" + { + return Some((name.to_string(), binding.range, binding.source)); + } + + None + }) + .collect(); + + for (name, range, source) in bindings { + let mut diagnostic = Diagnostic::new(UnusedVariable { name }, range); + + if checker.patch(diagnostic.kind.rule()) { + if let Some(source) = source { + let stmt = checker.semantic_model().stmts[source]; + if let Some((kind, fix)) = remove_unused_variable(stmt, range, checker) { + if matches!(kind, DeletionKind::Whole) { + checker.deletions.insert(RefEquality(stmt)); } + diagnostic.set_fix(fix); } } - checker.diagnostics.push(diagnostic); } + + checker.diagnostics.push(diagnostic); } } diff --git a/crates/ruff/src/rules/pyflakes/rules/yield_outside_function.rs b/crates/ruff/src/rules/pyflakes/rules/yield_outside_function.rs index 2fb4291551844..879118b829668 100644 --- a/crates/ruff/src/rules/pyflakes/rules/yield_outside_function.rs +++ b/crates/ruff/src/rules/pyflakes/rules/yield_outside_function.rs @@ -40,7 +40,7 @@ impl Violation for YieldOutsideFunction { pub(crate) fn yield_outside_function(checker: &mut Checker, expr: &Expr) { if matches!( - checker.ctx.scope().kind, + checker.semantic_model().scope().kind, ScopeKind::Class(_) | ScopeKind::Module ) { let keyword = match expr { diff --git a/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_12.py.snap b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_12.py.snap new file mode 100644 index 0000000000000..1976c4331d419 --- /dev/null +++ b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_12.py.snap @@ -0,0 +1,4 @@ +--- +source: crates/ruff/src/rules/pyflakes/mod.rs +--- + diff --git a/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_13.py.snap b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_13.py.snap new file mode 100644 index 0000000000000..1976c4331d419 --- /dev/null +++ b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_13.py.snap @@ -0,0 +1,4 @@ +--- +source: crates/ruff/src/rules/pyflakes/mod.rs +--- + diff --git a/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_14.py.snap b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_14.py.snap new file mode 100644 index 0000000000000..1976c4331d419 --- /dev/null +++ b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F401_F401_14.py.snap @@ -0,0 +1,4 @@ +--- +source: crates/ruff/src/rules/pyflakes/mod.rs +--- + diff --git a/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F811_F811_23.py.snap b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F811_F811_23.py.snap new file mode 100644 index 0000000000000..ddb037066fec8 --- /dev/null +++ b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F811_F811_23.py.snap @@ -0,0 +1,11 @@ +--- +source: crates/ruff/src/rules/pyflakes/mod.rs +--- +F811_23.py:4:8: F811 Redefinition of unused `foo` from line 3 + | +4 | import foo as foo +5 | import bar as foo + | ^^^^^^^^^^ F811 + | + + diff --git a/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F811_F811_24.py.snap b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F811_F811_24.py.snap new file mode 100644 index 0000000000000..1976c4331d419 --- /dev/null +++ b/crates/ruff/src/rules/pyflakes/snapshots/ruff__rules__pyflakes__tests__F811_F811_24.py.snap @@ -0,0 +1,4 @@ +--- +source: crates/ruff/src/rules/pyflakes/mod.rs +--- + diff --git a/crates/ruff/src/rules/pygrep_hooks/mod.rs b/crates/ruff/src/rules/pygrep_hooks/mod.rs index a82a3e42857b7..38b9333b135e5 100644 --- a/crates/ruff/src/rules/pygrep_hooks/mod.rs +++ b/crates/ruff/src/rules/pygrep_hooks/mod.rs @@ -6,7 +6,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/pygrep_hooks/rules/deprecated_log_warn.rs b/crates/ruff/src/rules/pygrep_hooks/rules/deprecated_log_warn.rs index 4dfeed38aa6ac..ba941d1d85510 100644 --- a/crates/ruff/src/rules/pygrep_hooks/rules/deprecated_log_warn.rs +++ b/crates/ruff/src/rules/pygrep_hooks/rules/deprecated_log_warn.rs @@ -44,7 +44,7 @@ impl Violation for DeprecatedLogWarn { /// PGH002 pub(crate) fn deprecated_log_warn(checker: &mut Checker, func: &Expr) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["logging", "warn"] diff --git a/crates/ruff/src/rules/pygrep_hooks/rules/no_eval.rs b/crates/ruff/src/rules/pygrep_hooks/rules/no_eval.rs index 5eaccae08e193..40b224a22c595 100644 --- a/crates/ruff/src/rules/pygrep_hooks/rules/no_eval.rs +++ b/crates/ruff/src/rules/pygrep_hooks/rules/no_eval.rs @@ -46,7 +46,7 @@ pub(crate) fn no_eval(checker: &mut Checker, func: &Expr) { if id != "eval" { return; } - if !checker.ctx.is_builtin("eval") { + if !checker.semantic_model().is_builtin("eval") { return; } checker diff --git a/crates/ruff/src/rules/pylint/helpers.rs b/crates/ruff/src/rules/pylint/helpers.rs index 1c51bef4b0a4d..f45937046119c 100644 --- a/crates/ruff/src/rules/pylint/helpers.rs +++ b/crates/ruff/src/rules/pylint/helpers.rs @@ -1,11 +1,12 @@ use ruff_python_semantic::analyze::function_type; use ruff_python_semantic::analyze::function_type::FunctionType; +use ruff_python_semantic::model::SemanticModel; use ruff_python_semantic::scope::{FunctionDef, ScopeKind}; -use crate::checkers::ast::Checker; +use crate::settings::Settings; -pub(crate) fn in_dunder_init(checker: &Checker) -> bool { - let scope = checker.ctx.scope(); +pub(crate) fn in_dunder_init(model: &SemanticModel, settings: &Settings) -> bool { + let scope = model.scope(); let ScopeKind::Function(FunctionDef { name, decorator_list, @@ -16,18 +17,18 @@ pub(crate) fn in_dunder_init(checker: &Checker) -> bool { if name != "__init__" { return false; } - let Some(parent) = scope.parent.map(|scope_id| &checker.ctx.scopes[scope_id]) else { + let Some(parent) = scope.parent.map(|scope_id| &model.scopes[scope_id]) else { return false; }; if !matches!( function_type::classify( - &checker.ctx, + model, parent, name, decorator_list, - &checker.settings.pep8_naming.classmethod_decorators, - &checker.settings.pep8_naming.staticmethod_decorators, + &settings.pep8_naming.classmethod_decorators, + &settings.pep8_naming.staticmethod_decorators, ), FunctionType::Method ) { diff --git a/crates/ruff/src/rules/pylint/mod.rs b/crates/ruff/src/rules/pylint/mod.rs index eb703e349703c..0c0a4952a5e62 100644 --- a/crates/ruff/src/rules/pylint/mod.rs +++ b/crates/ruff/src/rules/pylint/mod.rs @@ -7,12 +7,11 @@ pub mod settings; mod tests { use std::path::Path; - use crate::assert_messages; use anyhow::Result; - use regex::Regex; use test_case::test_case; + use crate::assert_messages; use crate::registry::Rule; use crate::rules::pylint; use crate::settings::types::PythonVersion; @@ -21,7 +20,7 @@ mod tests { #[test_case(Rule::AwaitOutsideAsync, Path::new("await_outside_async.py"); "PLE1142")] #[test_case(Rule::AssertOnStringLiteral, Path::new("assert_on_string_literal.py"); "PLW0129")] - #[test_case(Rule::BadStrStripCall, Path::new("bad_str_strip_call.py"); "PLE01310")] + #[test_case(Rule::BadStrStripCall, Path::new("bad_str_strip_call.py"); "PLE1310")] #[test_case(Rule::BadStringFormatType, Path::new("bad_string_format_type.py"); "PLE1307")] #[test_case(Rule::BidirectionalUnicode, Path::new("bidirectional_unicode.py"); "PLE2502")] #[test_case(Rule::BinaryOpException, Path::new("binary_op_exception.py"); "PLW0711")] @@ -47,6 +46,7 @@ mod tests { #[test_case(Rule::InvalidAllFormat, Path::new("invalid_all_format.py"); "PLE0605")] #[test_case(Rule::InvalidAllObject, Path::new("invalid_all_object.py"); "PLE0604")] #[test_case(Rule::DuplicateBases, Path::new("duplicate_bases.py"); "PLE0241")] + #[test_case(Rule::DuplicateValue, Path::new("duplicate_value.py"); "PLW0130")] #[test_case(Rule::InvalidCharacterBackspace, Path::new("invalid_characters.py"); "PLE2510")] #[test_case(Rule::InvalidCharacterEsc, Path::new("invalid_characters.py"); "PLE2513")] #[test_case(Rule::InvalidCharacterNul, Path::new("invalid_characters.py"); "PLE2514")] @@ -57,6 +57,7 @@ mod tests { #[test_case(Rule::LoggingTooFewArgs, Path::new("logging_too_few_args.py"); "PLE1206")] #[test_case(Rule::LoggingTooManyArgs, Path::new("logging_too_many_args.py"); "PLE1205")] #[test_case(Rule::MagicValueComparison, Path::new("magic_value_comparison.py"); "PLR2004")] + #[test_case(Rule::NamedExprWithoutContext, Path::new("named_expr_without_context.py"); "PLW0131")] #[test_case(Rule::NonlocalWithoutBinding, Path::new("nonlocal_without_binding.py"); "PLE0117")] #[test_case(Rule::PropertyWithParameters, Path::new("property_with_parameters.py"); "PLR0206")] #[test_case(Rule::RedefinedLoopName, Path::new("redefined_loop_name.py"); "PLW2901")] diff --git a/crates/ruff/src/rules/pylint/rules/await_outside_async.rs b/crates/ruff/src/rules/pylint/rules/await_outside_async.rs index 6419e10a9983a..30d1605f845b6 100644 --- a/crates/ruff/src/rules/pylint/rules/await_outside_async.rs +++ b/crates/ruff/src/rules/pylint/rules/await_outside_async.rs @@ -46,7 +46,7 @@ impl Violation for AwaitOutsideAsync { /// PLE1142 pub(crate) fn await_outside_async(checker: &mut Checker, expr: &Expr) { if !checker - .ctx + .semantic_model() .scopes() .find_map(|scope| { if let ScopeKind::Function(FunctionDef { async_, .. }) = &scope.kind { diff --git a/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs b/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs index d141286f0f5c6..cbb8b10b4e09f 100644 --- a/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs +++ b/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs @@ -1,6 +1,6 @@ -use ruff_text_size::TextRange; use std::str::FromStr; +use ruff_text_size::TextRange; use rustc_hash::FxHashMap; use rustpython_format::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString}; use rustpython_parser::ast::{self, Constant, Expr, Operator, Ranged}; diff --git a/crates/ruff/src/rules/pylint/rules/collapsible_else_if.rs b/crates/ruff/src/rules/pylint/rules/collapsible_else_if.rs index b99de7e37a621..d53fd72869b38 100644 --- a/crates/ruff/src/rules/pylint/rules/collapsible_else_if.rs +++ b/crates/ruff/src/rules/pylint/rules/collapsible_else_if.rs @@ -4,13 +4,45 @@ use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::source_code::Locator; +/// ## What it does +/// Checks for `else` blocks that consist of a single `if` statement. +/// +/// ## Why is this bad? +/// If an `else` block contains a single `if` statement, it can be collapsed +/// into an `elif`, thus reducing the indentation level. +/// +/// ## Example +/// ```python +/// def check_sign(value: int) -> None: +/// if value > 0: +/// print("Number is positive.") +/// else: +/// if value < 0: +/// print("Number is negative.") +/// else: +/// print("Number is zero.") +/// ``` +/// +/// Use instead: +/// ```python +/// def check_sign(value: int) -> None: +/// if value > 0: +/// print("Number is positive.") +/// elif value < 0: +/// print("Number is negative.") +/// else: +/// print("Number is zero.") +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/tutorial/controlflow.html#if-statements) #[violation] pub struct CollapsibleElseIf; impl Violation for CollapsibleElseIf { #[derive_message_formats] fn message(&self) -> String { - format!("Consider using `elif` instead of `else` then `if` to remove one indentation level") + format!("Use `elif` instead of `else` then `if`, to reduce indentation") } } diff --git a/crates/ruff/src/rules/pylint/rules/compare_to_empty_string.rs b/crates/ruff/src/rules/pylint/rules/compare_to_empty_string.rs index 1501c53253d7c..8cde351c82ef9 100644 --- a/crates/ruff/src/rules/pylint/rules/compare_to_empty_string.rs +++ b/crates/ruff/src/rules/pylint/rules/compare_to_empty_string.rs @@ -98,7 +98,7 @@ pub(crate) fn compare_to_empty_string( ) { // Omit string comparison rules within subscripts. This is mostly commonly used within // DataFrame and np.ndarray indexing. - for parent in checker.ctx.expr_ancestors() { + for parent in checker.semantic_model().expr_ancestors() { if matches!(parent, Expr::Subscript(_)) { return; } diff --git a/crates/ruff/src/rules/pylint/rules/comparison_of_constant.rs b/crates/ruff/src/rules/pylint/rules/comparison_of_constant.rs index 5897200301d91..76d4468f7ff8f 100644 --- a/crates/ruff/src/rules/pylint/rules/comparison_of_constant.rs +++ b/crates/ruff/src/rules/pylint/rules/comparison_of_constant.rs @@ -57,6 +57,26 @@ impl fmt::Display for ViolationsCmpop { } } +/// ## What it does +/// Checks for comparisons between constants. +/// +/// ## Why is this bad? +/// Comparing two constants will always resolve to the same value, so the +/// comparison is redundant. Instead, the expression should be replaced +/// with the result of the comparison. +/// +/// ## Example +/// ```python +/// foo = 1 == 1 +/// ``` +/// +/// Use instead: +/// ```python +/// foo = True +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/expressions.html#comparisons) #[violation] pub struct ComparisonOfConstant { left_constant: String, diff --git a/crates/ruff/src/rules/pylint/rules/duplicate_bases.rs b/crates/ruff/src/rules/pylint/rules/duplicate_bases.rs index c49517b7491dd..6d6256f4722d1 100644 --- a/crates/ruff/src/rules/pylint/rules/duplicate_bases.rs +++ b/crates/ruff/src/rules/pylint/rules/duplicate_bases.rs @@ -8,6 +8,34 @@ use ruff_macros::{derive_message_formats, violation}; use crate::checkers::ast::Checker; +/// ## What it does +/// Checks for duplicate base classes in class definitions. +/// +/// ## Why is this bad? +/// Including duplicate base classes will raise a `TypeError` at runtime. +/// +/// ## Example +/// ```python +/// class Foo: +/// pass +/// +/// +/// class Bar(Foo, Foo): +/// pass +/// ``` +/// +/// Use instead: +/// ```python +/// class Foo: +/// pass +/// +/// +/// class Bar(Foo): +/// pass +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/compound_stmts.html#class-definitions) #[violation] pub struct DuplicateBases { base: String, diff --git a/crates/ruff/src/rules/pylint/rules/duplicate_value.rs b/crates/ruff/src/rules/pylint/rules/duplicate_value.rs new file mode 100644 index 0000000000000..46ba673cc88a1 --- /dev/null +++ b/crates/ruff/src/rules/pylint/rules/duplicate_value.rs @@ -0,0 +1,57 @@ +use rustc_hash::FxHashSet; +use rustpython_parser::ast::{self, Expr, Ranged}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::comparable::ComparableExpr; + +use crate::checkers::ast::Checker; + +/// ## What it does +/// Checks for set literals that contain duplicate values. +/// +/// ## Why is this bad? +/// In Python, sets are unordered collections of unique elements. Including a +/// duplicate value in a set literal is redundant, and may be indicative of a +/// mistake. +/// +/// ## Example +/// ```python +/// {1, 2, 3, 1} +/// ``` +/// +/// Use instead: +/// ```python +/// {1, 2, 3} +/// ``` +#[violation] +pub struct DuplicateValue { + value: String, +} + +impl Violation for DuplicateValue { + #[derive_message_formats] + fn message(&self) -> String { + let DuplicateValue { value } = self; + format!("Duplicate value `{value}` in set") + } +} + +/// PLW0130 +pub(crate) fn duplicate_value(checker: &mut Checker, elts: &Vec) { + let mut seen_values: FxHashSet = FxHashSet::default(); + for elt in elts { + if let Expr::Constant(ast::ExprConstant { value, .. }) = elt { + let comparable_value: ComparableExpr = elt.into(); + + if !seen_values.insert(comparable_value) { + checker.diagnostics.push(Diagnostic::new( + DuplicateValue { + value: checker.generator().constant(value), + }, + elt.range(), + )); + } + }; + } +} diff --git a/crates/ruff/src/rules/pylint/rules/global_statement.rs b/crates/ruff/src/rules/pylint/rules/global_statement.rs index 9ec1147331c47..c65bf96d57473 100644 --- a/crates/ruff/src/rules/pylint/rules/global_statement.rs +++ b/crates/ruff/src/rules/pylint/rules/global_statement.rs @@ -1,6 +1,7 @@ +use rustpython_parser::ast::Ranged; + use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; -use rustpython_parser::ast::Ranged; use crate::checkers::ast::Checker; @@ -54,11 +55,11 @@ impl Violation for GlobalStatement { /// PLW0603 pub(crate) fn global_statement(checker: &mut Checker, name: &str) { - let scope = checker.ctx.scope(); - if let Some(index) = scope.get(name) { - let binding = &checker.ctx.bindings[*index]; + let scope = checker.semantic_model().scope(); + if let Some(binding_id) = scope.get(name) { + let binding = &checker.semantic_model().bindings[binding_id]; if binding.kind.is_global() { - let source = checker.ctx.stmts[binding + let source = checker.semantic_model().stmts[binding .source .expect("`global` bindings should always have a `source`")]; let diagnostic = Diagnostic::new( diff --git a/crates/ruff/src/rules/pylint/rules/invalid_envvar_default.rs b/crates/ruff/src/rules/pylint/rules/invalid_envvar_default.rs index d09d1b35f7f29..a70ae406d61ae 100644 --- a/crates/ruff/src/rules/pylint/rules/invalid_envvar_default.rs +++ b/crates/ruff/src/rules/pylint/rules/invalid_envvar_default.rs @@ -84,7 +84,7 @@ pub(crate) fn invalid_envvar_default( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["os", "getenv"]) { diff --git a/crates/ruff/src/rules/pylint/rules/invalid_envvar_value.rs b/crates/ruff/src/rules/pylint/rules/invalid_envvar_value.rs index a5f853e3bb899..2377de846615f 100644 --- a/crates/ruff/src/rules/pylint/rules/invalid_envvar_value.rs +++ b/crates/ruff/src/rules/pylint/rules/invalid_envvar_value.rs @@ -81,7 +81,7 @@ pub(crate) fn invalid_envvar_value( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["os", "getenv"]) { diff --git a/crates/ruff/src/rules/pylint/rules/load_before_global_declaration.rs b/crates/ruff/src/rules/pylint/rules/load_before_global_declaration.rs index 025d6384f634a..e205f55a5b310 100644 --- a/crates/ruff/src/rules/pylint/rules/load_before_global_declaration.rs +++ b/crates/ruff/src/rules/pylint/rules/load_before_global_declaration.rs @@ -55,7 +55,7 @@ impl Violation for LoadBeforeGlobalDeclaration { } /// PLE0118 pub(crate) fn load_before_global_declaration(checker: &mut Checker, name: &str, expr: &Expr) { - let globals = match &checker.ctx.scope().kind { + let globals = match &checker.semantic_model().scope().kind { ScopeKind::Class(class_def) => &class_def.globals, ScopeKind::Function(function_def) => &function_def.globals, _ => return, diff --git a/crates/ruff/src/rules/pylint/rules/logging.rs b/crates/ruff/src/rules/pylint/rules/logging.rs index 65fcf9e64ad5e..2cf9930caa44a 100644 --- a/crates/ruff/src/rules/pylint/rules/logging.rs +++ b/crates/ruff/src/rules/pylint/rules/logging.rs @@ -102,7 +102,7 @@ pub(crate) fn logging_call( return; } - if !logging::is_logger_candidate(&checker.ctx, func) { + if !logging::is_logger_candidate(func, checker.semantic_model()) { return; } @@ -124,7 +124,7 @@ pub(crate) fn logging_call( let message_args = call_args.args.len() - 1; - if checker.settings.rules.enabled(Rule::LoggingTooManyArgs) { + if checker.enabled(Rule::LoggingTooManyArgs) { if summary.num_positional < message_args { checker .diagnostics @@ -132,7 +132,7 @@ pub(crate) fn logging_call( } } - if checker.settings.rules.enabled(Rule::LoggingTooFewArgs) { + if checker.enabled(Rule::LoggingTooFewArgs) { if message_args > 0 && call_args.kwargs.is_empty() && summary.num_positional > message_args diff --git a/crates/ruff/src/rules/pylint/rules/magic_value_comparison.rs b/crates/ruff/src/rules/pylint/rules/magic_value_comparison.rs index 3128777707744..60a6a8e4c2a86 100644 --- a/crates/ruff/src/rules/pylint/rules/magic_value_comparison.rs +++ b/crates/ruff/src/rules/pylint/rules/magic_value_comparison.rs @@ -7,6 +7,35 @@ use ruff_macros::{derive_message_formats, violation}; use crate::checkers::ast::Checker; use crate::rules::pylint::settings::ConstantType; +/// ## What it does +/// Checks for the use of unnamed numerical constants ("magic") values in +/// comparisons. +/// +/// ## Why is this bad? +/// The use of "magic" can make code harder to read and maintain, as readers +/// will have to infer the meaning of the value from the context. +/// +/// For convenience, this rule excludes a variety of common values from the +/// "magic" value definition, such as `0`, `1`, `""`, and `"__main__"`. +/// +/// ## Example +/// ```python +/// def calculate_discount(price: float) -> float: +/// return price * (1 - 0.2) +/// ``` +/// +/// Use instead: +/// ```python +/// DISCOUNT_RATE = 0.2 +/// +/// +/// def calculate_discount(price: float) -> float: +/// return price * (1 - DISCOUNT_RATE) +/// ``` +/// +/// ## References +/// - [Wikipedia](https://en.wikipedia.org/wiki/Magic_number_(programming)#Unnamed_numerical_constants) +/// - [PEP 8](https://peps.python.org/pep-0008/#constants) #[violation] pub struct MagicValueComparison { value: String, diff --git a/crates/ruff/src/rules/pylint/rules/manual_import_from.rs b/crates/ruff/src/rules/pylint/rules/manual_import_from.rs index 30f8e218e0dcb..2274a3e435dde 100644 --- a/crates/ruff/src/rules/pylint/rules/manual_import_from.rs +++ b/crates/ruff/src/rules/pylint/rules/manual_import_from.rs @@ -7,6 +7,25 @@ use ruff_macros::{derive_message_formats, violation}; use crate::checkers::ast::Checker; use crate::registry::AsRule; +/// ## What it does +/// Checks for submodule imports that are aliased to the submodule name. +/// +/// ## Why is this bad? +/// Using the `from` keyword to import the submodule is more concise and +/// readable. +/// +/// ## Example +/// ```python +/// import concurrent.futures as futures +/// ``` +/// +/// Use instead: +/// ```python +/// from concurrent import futures +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/reference/import.html#submodules) #[violation] pub struct ManualFromImport { module: String, diff --git a/crates/ruff/src/rules/pylint/rules/mod.rs b/crates/ruff/src/rules/pylint/rules/mod.rs index 04a444ce209a6..a75c21d0765d2 100644 --- a/crates/ruff/src/rules/pylint/rules/mod.rs +++ b/crates/ruff/src/rules/pylint/rules/mod.rs @@ -9,6 +9,7 @@ pub(crate) use compare_to_empty_string::{compare_to_empty_string, CompareToEmpty pub(crate) use comparison_of_constant::{comparison_of_constant, ComparisonOfConstant}; pub(crate) use continue_in_finally::{continue_in_finally, ContinueInFinally}; pub(crate) use duplicate_bases::{duplicate_bases, DuplicateBases}; +pub(crate) use duplicate_value::{duplicate_value, DuplicateValue}; pub(crate) use global_statement::{global_statement, GlobalStatement}; pub(crate) use global_variable_not_assigned::GlobalVariableNotAssigned; pub(crate) use import_self::{import_from_self, import_self, ImportSelf}; @@ -26,6 +27,7 @@ pub(crate) use load_before_global_declaration::{ pub(crate) use logging::{logging_call, LoggingTooFewArgs, LoggingTooManyArgs}; pub(crate) use magic_value_comparison::{magic_value_comparison, MagicValueComparison}; pub(crate) use manual_import_from::{manual_from_import, ManualFromImport}; +pub(crate) use named_expr_without_context::{named_expr_without_context, NamedExprWithoutContext}; pub(crate) use nested_min_max::{nested_min_max, NestedMinMax}; pub(crate) use nonlocal_without_binding::NonlocalWithoutBinding; pub(crate) use property_with_parameters::{property_with_parameters, PropertyWithParameters}; @@ -59,6 +61,7 @@ mod compare_to_empty_string; mod comparison_of_constant; mod continue_in_finally; mod duplicate_bases; +mod duplicate_value; mod global_statement; mod global_variable_not_assigned; mod import_self; @@ -71,6 +74,7 @@ mod load_before_global_declaration; mod logging; mod magic_value_comparison; mod manual_import_from; +mod named_expr_without_context; mod nested_min_max; mod nonlocal_without_binding; mod property_with_parameters; diff --git a/crates/ruff/src/rules/pylint/rules/named_expr_without_context.rs b/crates/ruff/src/rules/pylint/rules/named_expr_without_context.rs new file mode 100644 index 0000000000000..3576d36ea393c --- /dev/null +++ b/crates/ruff/src/rules/pylint/rules/named_expr_without_context.rs @@ -0,0 +1,44 @@ +use rustpython_parser::ast::{self, Expr}; + +use ruff_diagnostics::{Diagnostic, Violation}; +use ruff_macros::{derive_message_formats, violation}; + +use crate::checkers::ast::Checker; + +/// ## What it does +/// Checks for usages of named expressions (e.g., `a := 42`) that can be +/// replaced by regular assignment statements (e.g., `a = 42`). +/// +/// ## Why is this bad? +/// While a top-level named expression is syntactically and semantically valid, +/// it's less clear than a regular assignment statement. Named expressions are +/// intended to be used in comprehensions and generator expressions, where +/// assignment statements are not allowed. +/// +/// ## Example +/// ```python +/// (a := 42) +/// ``` +/// +/// Use instead: +/// ```python +/// a = 42 +/// ``` +#[violation] +pub struct NamedExprWithoutContext; + +impl Violation for NamedExprWithoutContext { + #[derive_message_formats] + fn message(&self) -> String { + format!("Named expression used without context") + } +} + +/// PLW0131 +pub(crate) fn named_expr_without_context(checker: &mut Checker, value: &Expr) { + if let Expr::NamedExpr(ast::ExprNamedExpr { range, .. }) = value { + checker + .diagnostics + .push(Diagnostic::new(NamedExprWithoutContext, *range)); + } +} diff --git a/crates/ruff/src/rules/pylint/rules/nested_min_max.rs b/crates/ruff/src/rules/pylint/rules/nested_min_max.rs index 7c34721325a04..a35f588b43a44 100644 --- a/crates/ruff/src/rules/pylint/rules/nested_min_max.rs +++ b/crates/ruff/src/rules/pylint/rules/nested_min_max.rs @@ -4,7 +4,7 @@ use rustpython_parser::ast::{self, Expr, Keyword, Ranged}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::helpers::has_comments; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::{checkers::ast::Checker, registry::AsRule}; @@ -59,16 +59,16 @@ impl Violation for NestedMinMax { impl MinMax { /// Converts a function call [`Expr`] into a [`MinMax`] if it is a call to `min` or `max`. - fn try_from_call(func: &Expr, keywords: &[Keyword], context: &Context) -> Option { + fn try_from_call(func: &Expr, keywords: &[Keyword], model: &SemanticModel) -> Option { if !keywords.is_empty() { return None; } let Expr::Name(ast::ExprName { id, .. }) = func else { return None; }; - if id.as_str() == "min" && context.is_builtin("min") { + if id.as_str() == "min" && model.is_builtin("min") { Some(MinMax::Min) - } else if id.as_str() == "max" && context.is_builtin("max") { + } else if id.as_str() == "max" && model.is_builtin("max") { Some(MinMax::Max) } else { None @@ -87,8 +87,8 @@ impl std::fmt::Display for MinMax { /// Collect a new set of arguments to by either accepting existing args as-is or /// collecting child arguments, if it's a call to the same function. -fn collect_nested_args(context: &Context, min_max: MinMax, args: &[Expr]) -> Vec { - fn inner(context: &Context, min_max: MinMax, args: &[Expr], new_args: &mut Vec) { +fn collect_nested_args(model: &SemanticModel, min_max: MinMax, args: &[Expr]) -> Vec { + fn inner(model: &SemanticModel, min_max: MinMax, args: &[Expr], new_args: &mut Vec) { for arg in args { if let Expr::Call(ast::ExprCall { func, @@ -106,8 +106,8 @@ fn collect_nested_args(context: &Context, min_max: MinMax, args: &[Expr]) -> Vec new_args.push(new_arg); continue; } - if MinMax::try_from_call(func, keywords, context) == Some(min_max) { - inner(context, min_max, args, new_args); + if MinMax::try_from_call(func, keywords, model) == Some(min_max) { + inner(model, min_max, args, new_args); continue; } } @@ -116,7 +116,7 @@ fn collect_nested_args(context: &Context, min_max: MinMax, args: &[Expr]) -> Vec } let mut new_args = Vec::with_capacity(args.len()); - inner(context, min_max, args, &mut new_args); + inner(model, min_max, args, &mut new_args); new_args } @@ -128,7 +128,7 @@ pub(crate) fn nested_min_max( args: &[Expr], keywords: &[Keyword], ) { - let Some(min_max) = MinMax::try_from_call(func, keywords, &checker.ctx) else { + let Some(min_max) = MinMax::try_from_call(func, keywords, checker.semantic_model()) else { return; }; @@ -136,14 +136,15 @@ pub(crate) fn nested_min_max( let Expr::Call(ast::ExprCall { func, keywords, ..} )= arg else { return false; }; - MinMax::try_from_call(func.as_ref(), keywords.as_ref(), &checker.ctx) == Some(min_max) + MinMax::try_from_call(func.as_ref(), keywords.as_ref(), checker.semantic_model()) + == Some(min_max) }) { let fixable = !has_comments(expr, checker.locator); let mut diagnostic = Diagnostic::new(NestedMinMax { func: min_max }, expr.range()); if fixable && checker.patch(diagnostic.kind.rule()) { let flattened_expr = Expr::Call(ast::ExprCall { func: Box::new(func.clone()), - args: collect_nested_args(&checker.ctx, min_max, args), + args: collect_nested_args(checker.semantic_model(), min_max, args), keywords: keywords.to_owned(), range: TextRange::default(), }); diff --git a/crates/ruff/src/rules/pylint/rules/property_with_parameters.rs b/crates/ruff/src/rules/pylint/rules/property_with_parameters.rs index 7d28d1cfb7e6a..79b06331a4d55 100644 --- a/crates/ruff/src/rules/pylint/rules/property_with_parameters.rs +++ b/crates/ruff/src/rules/pylint/rules/property_with_parameters.rs @@ -6,6 +6,36 @@ use ruff_python_ast::helpers::identifier_range; use crate::checkers::ast::Checker; +/// ## What it does +/// Checks for property definitions that accept function parameters. +/// +/// ## Why is this bad? +/// Properties cannot be called with parameters. +/// +/// If you need to pass parameters to a property, create a method with the +/// desired parameters and call that method instead. +/// +/// ## Example +/// ```python +/// class Cat: +/// @property +/// def purr(self, volume): +/// ... +/// ``` +/// +/// Use instead: +/// ```python +/// class Cat: +/// @property +/// def purr(self): +/// ... +/// +/// def purr_volume(self, volume): +/// ... +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/functions.html#property) #[violation] pub struct PropertyWithParameters; @@ -29,7 +59,7 @@ pub(crate) fn property_with_parameters( { return; } - if checker.ctx.is_builtin("property") + if checker.semantic_model().is_builtin("property") && args .args .iter() diff --git a/crates/ruff/src/rules/pylint/rules/redefined_loop_name.rs b/crates/ruff/src/rules/pylint/rules/redefined_loop_name.rs index 6e89473b70c59..8964206d1c75d 100644 --- a/crates/ruff/src/rules/pylint/rules/redefined_loop_name.rs +++ b/crates/ruff/src/rules/pylint/rules/redefined_loop_name.rs @@ -6,10 +6,9 @@ use rustpython_parser::ast::{self, Expr, ExprContext, Ranged, Stmt, Withitem}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::comparable::ComparableExpr; - use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor}; use ruff_python_ast::types::Node; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; @@ -139,16 +138,13 @@ struct ExprWithInnerBindingKind<'a> { binding_kind: InnerBindingKind, } -struct InnerForWithAssignTargetsVisitor<'a> { - context: &'a Context<'a>, +struct InnerForWithAssignTargetsVisitor<'a, 'b> { + context: &'a SemanticModel<'b>, dummy_variable_rgx: &'a Regex, assignment_targets: Vec>, } -impl<'a, 'b> StatementVisitor<'b> for InnerForWithAssignTargetsVisitor<'a> -where - 'b: 'a, -{ +impl<'a, 'b> StatementVisitor<'b> for InnerForWithAssignTargetsVisitor<'a, 'b> { fn visit_stmt(&mut self, stmt: &'b Stmt) { // Collect target expressions. match stmt { @@ -240,7 +236,7 @@ where /// /// x = cast(int, x) /// ``` -fn assignment_is_cast_expr(context: &Context, value: &Expr, target: &Expr) -> bool { +fn assignment_is_cast_expr(model: &SemanticModel, value: &Expr, target: &Expr) -> bool { let Expr::Call(ast::ExprCall { func, args, .. }) = value else { return false; }; @@ -256,7 +252,7 @@ fn assignment_is_cast_expr(context: &Context, value: &Expr, target: &Expr) -> bo if arg_id != target_id { return false; } - context.match_typing_expr(func, "cast") + model.match_typing_expr(func, "cast") } fn assignment_targets_from_expr<'a, U>( @@ -349,7 +345,7 @@ pub(crate) fn redefined_loop_name<'a, 'b>(checker: &'a mut Checker<'b>, node: &N }) .collect(); let mut visitor = InnerForWithAssignTargetsVisitor { - context: &checker.ctx, + context: checker.semantic_model(), dummy_variable_rgx: &checker.settings.dummy_variable_rgx, assignment_targets: vec![], }; @@ -369,7 +365,7 @@ pub(crate) fn redefined_loop_name<'a, 'b>(checker: &'a mut Checker<'b>, node: &N }) .collect(); let mut visitor = InnerForWithAssignTargetsVisitor { - context: &checker.ctx, + context: checker.semantic_model(), dummy_variable_rgx: &checker.settings.dummy_variable_rgx, assignment_targets: vec![], }; @@ -385,13 +381,15 @@ pub(crate) fn redefined_loop_name<'a, 'b>(checker: &'a mut Checker<'b>, node: &N Node::Expr(_) => panic!("redefined_loop_name called on Node that is not a Statement"), }; + let mut diagnostics = Vec::new(); + for outer_assignment_target in &outer_assignment_targets { for inner_assignment_target in &inner_assignment_targets { // Compare the targets structurally. if ComparableExpr::from(outer_assignment_target.expr) .eq(&(ComparableExpr::from(inner_assignment_target.expr))) { - checker.diagnostics.push(Diagnostic::new( + diagnostics.push(Diagnostic::new( RedefinedLoopName { name: checker.generator().expr(outer_assignment_target.expr), outer_kind: outer_assignment_target.binding_kind, @@ -402,4 +400,6 @@ pub(crate) fn redefined_loop_name<'a, 'b>(checker: &'a mut Checker<'b>, node: &N } } } + + checker.diagnostics.extend(diagnostics); } diff --git a/crates/ruff/src/rules/pylint/rules/repeated_isinstance_calls.rs b/crates/ruff/src/rules/pylint/rules/repeated_isinstance_calls.rs index f8bb082c836e1..002d31a071815 100644 --- a/crates/ruff/src/rules/pylint/rules/repeated_isinstance_calls.rs +++ b/crates/ruff/src/rules/pylint/rules/repeated_isinstance_calls.rs @@ -8,6 +8,34 @@ use ruff_python_ast::hashable::HashableExpr; use crate::checkers::ast::Checker; +/// ## What it does +/// Checks for repeated `isinstance` calls on the same object. +/// +/// ## Why is this bad? +/// Repeated `isinstance` calls on the same object can be merged into a +/// single call. +/// +/// ## Example +/// ```python +/// def is_number(x): +/// return isinstance(x, int) or isinstance(x, float) or isinstance(x, complex) +/// ``` +/// +/// Use instead: +/// ```python +/// def is_number(x): +/// return isinstance(x, (int, float, complex)) +/// ``` +/// +/// Or, for Python 3.10 and later: +/// +/// ```python +/// def is_number(x): +/// return isinstance(x, int | float | complex) +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/functions.html#isinstance) #[violation] pub struct RepeatedIsinstanceCalls { obj: String, @@ -30,7 +58,7 @@ pub(crate) fn repeated_isinstance_calls( op: Boolop, values: &[Expr], ) { - if !matches!(op, Boolop::Or) || !checker.ctx.is_builtin("isinstance") { + if !matches!(op, Boolop::Or) || !checker.semantic_model().is_builtin("isinstance") { return; } diff --git a/crates/ruff/src/rules/pylint/rules/return_in_init.rs b/crates/ruff/src/rules/pylint/rules/return_in_init.rs index d5f981b54517f..0bd7cd77583be 100644 --- a/crates/ruff/src/rules/pylint/rules/return_in_init.rs +++ b/crates/ruff/src/rules/pylint/rules/return_in_init.rs @@ -62,7 +62,7 @@ pub(crate) fn return_in_init(checker: &mut Checker, stmt: &Stmt) { } } - if in_dunder_init(checker) { + if in_dunder_init(checker.semantic_model(), checker.settings) { checker .diagnostics .push(Diagnostic::new(ReturnInInit, stmt.range())); diff --git a/crates/ruff/src/rules/pylint/rules/sys_exit_alias.rs b/crates/ruff/src/rules/pylint/rules/sys_exit_alias.rs index 83eeefa0fbb2d..7cdfd8ba1b921 100644 --- a/crates/ruff/src/rules/pylint/rules/sys_exit_alias.rs +++ b/crates/ruff/src/rules/pylint/rules/sys_exit_alias.rs @@ -3,10 +3,38 @@ use rustpython_parser::ast::{self, Expr, Ranged}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; -use crate::autofix::actions::get_or_import_symbol; use crate::checkers::ast::Checker; use crate::registry::AsRule; +/// ## What it does +/// Checks for uses of the `exit()` and `quit()`. +/// +/// ## Why is this bad? +/// `exit` and `quit` come from the `site` module, which is typically imported +/// automatically during startup. However, it is not _guaranteed_ to be +/// imported, and so using these functions may result in a `NameError` at +/// runtime. Generally, these constants are intended to be used in an interactive +/// interpreter, and not in programs. +/// +/// Prefer `sys.exit()`, as the `sys` module is guaranteed to exist in all +/// contexts. +/// +/// ## Example +/// ```python +/// if __name__ == "__main__": +/// exit() +/// ``` +/// +/// Use instead: +/// ```python +/// import sys +/// +/// if __name__ == "__main__": +/// sys.exit() +/// ``` +/// +/// ## References +/// - [Python documentation](https://docs.python.org/3/library/constants.html#constants-added-by-the-site-module) #[violation] pub struct SysExitAlias { name: String, @@ -36,7 +64,7 @@ pub(crate) fn sys_exit_alias(checker: &mut Checker, func: &Expr) { if id != name { continue; } - if !checker.ctx.is_builtin(name) { + if !checker.semantic_model().is_builtin(name) { continue; } let mut diagnostic = Diagnostic::new( @@ -47,13 +75,11 @@ pub(crate) fn sys_exit_alias(checker: &mut Checker, func: &Expr) { ); if checker.patch(diagnostic.kind.rule()) { diagnostic.try_set_fix(|| { - let (import_edit, binding) = get_or_import_symbol( + let (import_edit, binding) = checker.importer.get_or_import_symbol( "sys", "exit", func.start(), - &checker.ctx, - &checker.importer, - checker.locator, + checker.semantic_model(), )?; let reference_edit = Edit::range_replacement(binding, func.range()); #[allow(deprecated)] diff --git a/crates/ruff/src/rules/pylint/rules/too_many_arguments.rs b/crates/ruff/src/rules/pylint/rules/too_many_arguments.rs index 8266aa290ede7..bd06b69c99560 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_arguments.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_arguments.rs @@ -6,6 +6,43 @@ use ruff_python_ast::helpers::identifier_range; use crate::checkers::ast::Checker; +/// ## What it does +/// Checks for function definitions that include too many arguments. +/// +/// By default, this rule allows up to five arguments, as configured by the +/// `pylint.max-args` option. +/// +/// ## Why is this bad? +/// Functions with many arguments are harder to understand, maintain, and call. +/// Consider refactoring functions with many arguments into smaller functions +/// with fewer arguments, or using objects to group related arguments. +/// +/// ## Example +/// ```python +/// def calculate_position(x_pos, y_pos, z_pos, x_vel, y_vel, z_vel, time): +/// new_x = x_pos + x_vel * time +/// new_y = y_pos + y_vel * time +/// new_z = z_pos + z_vel * time +/// return new_x, new_y, new_z +/// ``` +/// +/// Use instead: +/// ```python +/// from typing import NamedTuple +/// +/// +/// class Vector(NamedTuple): +/// x: float +/// y: float +/// z: float +/// +/// +/// def calculate_position(pos: Vector, vel: Vector, time: float) -> Vector: +/// return Vector(*(p + v * time for p, v in zip(pos, vel))) +/// ``` +/// +/// ## Options +/// - `pylint.max-args` #[violation] pub struct TooManyArguments { c_args: usize, diff --git a/crates/ruff/src/rules/pylint/rules/too_many_branches.rs b/crates/ruff/src/rules/pylint/rules/too_many_branches.rs index 8edede1586ad6..b597cbeae6722 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_branches.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_branches.rs @@ -5,6 +5,70 @@ use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::helpers::identifier_range; use ruff_python_ast::source_code::Locator; +/// ## What it does +/// Checks for functions or methods with too many branches. +/// +/// By default, this rule allows up to 12 branches. This can be configured +/// using the `max-branches` option. +/// +/// ## Why is this bad? +/// Functions or methods with many branches are harder to understand +/// and maintain than functions or methods with fewer branches. +/// +/// ## Example +/// ```python +/// def capital(country): +/// if country == "Australia": +/// return "Canberra" +/// elif country == "Brazil": +/// return "Brasilia" +/// elif country == "Canada": +/// return "Ottawa" +/// elif country == "England": +/// return "London" +/// elif country == "France": +/// return "Paris" +/// elif country == "Germany": +/// return "Berlin" +/// elif country == "Poland": +/// return "Warsaw" +/// elif country == "Romania": +/// return "Bucharest" +/// elif country == "Spain": +/// return "Madrid" +/// elif country == "Thailand": +/// return "Bangkok" +/// elif country == "Turkey": +/// return "Ankara" +/// elif country == "United States": +/// return "Washington" +/// else: +/// return "Unknown" # 13th branch +/// ``` +/// +/// Use instead: +/// ```python +/// def capital(country): +/// capitals = { +/// "Australia": "Canberra", +/// "Brazil": "Brasilia", +/// "Canada": "Ottawa", +/// "England": "London", +/// "France": "Paris", +/// "Germany": "Berlin", +/// "Poland": "Warsaw", +/// "Romania": "Bucharest", +/// "Spain": "Madrid", +/// "Thailand": "Bangkok", +/// "Turkey": "Ankara", +/// "United States": "Washington", +/// } +/// city = capitals.get(country, "Unknown") +/// return city +/// ``` +/// +/// ## References +/// - [Ruff configuration documentation](https://beta.ruff.rs/docs/settings/#max-branches) #[violation] pub struct TooManyBranches { branches: usize, diff --git a/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs b/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs index bdb59212dd71d..9ba5dd2202dcf 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs @@ -6,6 +6,51 @@ use ruff_python_ast::helpers::{identifier_range, ReturnStatementVisitor}; use ruff_python_ast::source_code::Locator; use ruff_python_ast::statement_visitor::StatementVisitor; +/// ## What it does +/// Checks for functions or methods with too many return statements. +/// +/// By default, this rule allows up to six return statements, as configured by +/// the `pylint.max-returns` option. +/// +/// ## Why is this bad? +/// Functions or methods with many return statements are harder to understand +/// and maintain, and often indicative of complex logic. +/// +/// ## Example +/// ```python +/// def capital(country: str) -> str | None: +/// if country == "England": +/// return "London" +/// elif country == "France": +/// return "Paris" +/// elif country == "Poland": +/// return "Warsaw" +/// elif country == "Romania": +/// return "Bucharest" +/// elif country == "Spain": +/// return "Madrid" +/// elif country == "Thailand": +/// return "Bangkok" +/// else: +/// return None +/// ``` +/// +/// Use instead: +/// ```python +/// def capital(country: str) -> str | None: +/// capitals = { +/// "England": "London", +/// "France": "Paris", +/// "Poland": "Warsaw", +/// "Romania": "Bucharest", +/// "Spain": "Madrid", +/// "Thailand": "Bangkok", +/// } +/// return capitals.get(country) +/// ``` +/// +/// ## Options +/// - `pylint.max-returns` #[violation] pub struct TooManyReturnStatements { returns: usize, diff --git a/crates/ruff/src/rules/pylint/rules/too_many_statements.rs b/crates/ruff/src/rules/pylint/rules/too_many_statements.rs index 829f85e471ca3..6912ca0c102b6 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_statements.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_statements.rs @@ -5,6 +5,47 @@ use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::helpers::identifier_range; use ruff_python_ast::source_code::Locator; +/// ## What it does +/// Checks for functions or methods with too many statements. +/// +/// By default, this rule allows up to 50 statements, as configured by the +/// `pylint.max-statements` option. +/// +/// ## Why is this bad? +/// Functions or methods with many statements are harder to understand +/// and maintain. +/// +/// Instead, consider refactoring the function or method into smaller +/// functions or methods, or identifying generalizable patterns and +/// replacing them with generic logic or abstractions. +/// +/// ## Example +/// ```python +/// def is_even(number: int) -> bool: +/// if number == 0: +/// return True +/// elif number == 1: +/// return False +/// elif number == 2: +/// return True +/// elif number == 3: +/// return False +/// elif number == 4: +/// return True +/// elif number == 5: +/// return False +/// else: +/// ... +/// ``` +/// +/// Use instead: +/// ```python +/// def is_even(number: int) -> bool: +/// return number % 2 == 0 +/// ``` +/// +/// ## Options +/// - `pylint.max-statements` #[violation] pub struct TooManyStatements { statements: usize, diff --git a/crates/ruff/src/rules/pylint/rules/unexpected_special_method_signature.rs b/crates/ruff/src/rules/pylint/rules/unexpected_special_method_signature.rs index 8204f44afcf4c..94b8cdbf0a7ed 100644 --- a/crates/ruff/src/rules/pylint/rules/unexpected_special_method_signature.rs +++ b/crates/ruff/src/rules/pylint/rules/unexpected_special_method_signature.rs @@ -145,7 +145,7 @@ pub(crate) fn unexpected_special_method_signature( args: &Arguments, locator: &Locator, ) { - if !checker.ctx.scope().kind.is_class() { + if !checker.semantic_model().scope().kind.is_class() { return; } @@ -163,7 +163,7 @@ pub(crate) fn unexpected_special_method_signature( let optional_params = args.defaults.len(); let mandatory_params = actual_params - optional_params; - let Some(expected_params) = ExpectedParams::from_method(name, is_staticmethod(&checker.ctx, decorator_list)) else { + let Some(expected_params) = ExpectedParams::from_method(name, is_staticmethod(checker.semantic_model(), decorator_list)) else { return; }; diff --git a/crates/ruff/src/rules/pylint/rules/yield_in_init.rs b/crates/ruff/src/rules/pylint/rules/yield_in_init.rs index f9f08650d463f..9ee87e70a712a 100644 --- a/crates/ruff/src/rules/pylint/rules/yield_in_init.rs +++ b/crates/ruff/src/rules/pylint/rules/yield_in_init.rs @@ -39,7 +39,7 @@ impl Violation for YieldInInit { /// PLE0100 pub(crate) fn yield_in_init(checker: &mut Checker, expr: &Expr) { - if in_dunder_init(checker) { + if in_dunder_init(checker.semantic_model(), checker.settings) { checker .diagnostics .push(Diagnostic::new(YieldInInit, expr.range())); diff --git a/crates/ruff/src/rules/pylint/settings.rs b/crates/ruff/src/rules/pylint/settings.rs index 2525d2f564ea2..bc40d7b6a4123 100644 --- a/crates/ruff/src/rules/pylint/settings.rs +++ b/crates/ruff/src/rules/pylint/settings.rs @@ -1,10 +1,11 @@ //! Settings for the `pylint` plugin. use anyhow::anyhow; -use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; use rustpython_parser::ast::Constant; use serde::{Deserialize, Serialize}; +use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions}; + #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)] #[serde(deny_unknown_fields, rename_all = "kebab-case")] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] diff --git a/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLR5501_collapsible_else_if.py.snap b/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLR5501_collapsible_else_if.py.snap index 59d16efdead37..567c03d257f78 100644 --- a/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLR5501_collapsible_else_if.py.snap +++ b/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLR5501_collapsible_else_if.py.snap @@ -1,7 +1,7 @@ --- source: crates/ruff/src/rules/pylint/mod.rs --- -collapsible_else_if.py:38:9: PLR5501 Consider using `elif` instead of `else` then `if` to remove one indentation level +collapsible_else_if.py:38:9: PLR5501 Use `elif` instead of `else` then `if`, to reduce indentation | 38 | pass 39 | else: @@ -11,7 +11,7 @@ collapsible_else_if.py:38:9: PLR5501 Consider using `elif` instead of `else` the | |________________^ PLR5501 | -collapsible_else_if.py:46:9: PLR5501 Consider using `elif` instead of `else` then `if` to remove one indentation level +collapsible_else_if.py:46:9: PLR5501 Use `elif` instead of `else` then `if`, to reduce indentation | 46 | pass 47 | else: diff --git a/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLW0130_duplicate_value.py.snap b/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLW0130_duplicate_value.py.snap new file mode 100644 index 0000000000000..6fdf6149d7202 --- /dev/null +++ b/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLW0130_duplicate_value.py.snap @@ -0,0 +1,23 @@ +--- +source: crates/ruff/src/rules/pylint/mod.rs +--- +duplicate_value.py:4:35: PLW0130 Duplicate value `"value1"` in set + | +4 | # Errors. +5 | ### +6 | incorrect_set = {"value1", 23, 5, "value1"} + | ^^^^^^^^ PLW0130 +7 | incorrect_set = {1, 1} + | + +duplicate_value.py:5:21: PLW0130 Duplicate value `1` in set + | +5 | ### +6 | incorrect_set = {"value1", 23, 5, "value1"} +7 | incorrect_set = {1, 1} + | ^ PLW0130 +8 | +9 | ### + | + + diff --git a/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLW0131_named_expr_without_context.py.snap b/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLW0131_named_expr_without_context.py.snap new file mode 100644 index 0000000000000..141525c79698b --- /dev/null +++ b/crates/ruff/src/rules/pylint/snapshots/ruff__rules__pylint__tests__PLW0131_named_expr_without_context.py.snap @@ -0,0 +1,28 @@ +--- +source: crates/ruff/src/rules/pylint/mod.rs +--- +named_expr_without_context.py:2:2: PLW0131 Named expression used without context + | +2 | # Errors +3 | (a := 42) + | ^^^^^^^ PLW0131 +4 | if True: +5 | (b := 1) + | + +named_expr_without_context.py:4:6: PLW0131 Named expression used without context + | +4 | (a := 42) +5 | if True: +6 | (b := 1) + | ^^^^^^ PLW0131 + | + +named_expr_without_context.py:8:6: PLW0131 Named expression used without context + | +8 | class Foo: +9 | (c := 1) + | ^^^^^^ PLW0131 + | + + diff --git a/crates/ruff/src/rules/pyupgrade/fixes.rs b/crates/ruff/src/rules/pyupgrade/fixes.rs index d29555109af05..ce242175f1b8f 100644 --- a/crates/ruff/src/rules/pyupgrade/fixes.rs +++ b/crates/ruff/src/rules/pyupgrade/fixes.rs @@ -1,8 +1,5 @@ -use anyhow::{bail, Result}; -use libcst_native::{ - Codegen, CodegenState, CompoundStatement, Expression, ParenthesizableWhitespace, - SmallStatement, Statement, Suite, -}; +use anyhow::Result; +use libcst_native::{Codegen, CodegenState, ParenthesizableWhitespace}; use ruff_text_size::{TextRange, TextSize}; use rustpython_parser::ast::{Expr, Ranged}; use rustpython_parser::{lexer, Mode, Tok}; @@ -10,7 +7,9 @@ use rustpython_parser::{lexer, Mode, Tok}; use ruff_diagnostics::Edit; use ruff_python_ast::source_code::{Locator, Stylist}; -use crate::cst::matchers::match_module; +use crate::cst::matchers::{ + match_call_mut, match_expression, match_function_def, match_indented_block, match_statement, +}; /// Safely adjust the indentation of the indented block at [`TextRange`]. pub(crate) fn adjust_indentation( @@ -23,15 +22,11 @@ pub(crate) fn adjust_indentation( let module_text = format!("def f():{}{contents}", stylist.line_ending().as_str()); - let mut tree = match_module(&module_text)?; + let mut tree = match_statement(&module_text)?; - let [Statement::Compound(CompoundStatement::FunctionDef(embedding))] = &mut *tree.body else { - bail!("Expected statement to be embedded in a function definition") - }; + let embedding = match_function_def(&mut tree)?; - let Suite::IndentedBlock(indented_block) = &mut embedding.body else { - bail!("Expected indented block") - }; + let indented_block = match_indented_block(&mut embedding.body)?; indented_block.indent = Some(indentation); let mut state = CodegenState { @@ -58,17 +53,9 @@ pub(crate) fn remove_super_arguments( let range = expr.range(); let contents = locator.slice(range); - let mut tree = libcst_native::parse_module(contents, None).ok()?; + let mut tree = match_expression(contents).ok()?; - let Statement::Simple(body) = tree.body.first_mut()? else { - return None; - }; - let SmallStatement::Expr(body) = body.body.first_mut()? else { - return None; - }; - let Expression::Call(body) = &mut body.value else { - return None; - }; + let body = match_call_mut(&mut tree).ok()?; body.args = vec![]; body.whitespace_before_args = ParenthesizableWhitespace::default(); diff --git a/crates/ruff/src/rules/pyupgrade/mod.rs b/crates/ruff/src/rules/pyupgrade/mod.rs index 22c30c7e7bf6c..e702f75279404 100644 --- a/crates/ruff/src/rules/pyupgrade/mod.rs +++ b/crates/ruff/src/rules/pyupgrade/mod.rs @@ -9,7 +9,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; @@ -59,6 +58,7 @@ mod tests { #[test_case(Rule::PrintfStringFormatting, Path::new("UP031_1.py"); "UP031_1")] #[test_case(Rule::FString, Path::new("UP032_0.py"); "UP032_0")] #[test_case(Rule::FString, Path::new("UP032_1.py"); "UP032_1")] + #[test_case(Rule::FString, Path::new("UP032_2.py"); "UP032_2")] #[test_case(Rule::LRUCacheWithMaxsizeNone, Path::new("UP033_0.py"); "UP033_0")] #[test_case(Rule::LRUCacheWithMaxsizeNone, Path::new("UP033_1.py"); "UP033_1")] #[test_case(Rule::ExtraneousParentheses, Path::new("UP034.py"); "UP034")] diff --git a/crates/ruff/src/rules/pyupgrade/rules/convert_named_tuple_functional_to_class.rs b/crates/ruff/src/rules/pyupgrade/rules/convert_named_tuple_functional_to_class.rs index 0143fa4a2e91c..f5f386492b44d 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/convert_named_tuple_functional_to_class.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/convert_named_tuple_functional_to_class.rs @@ -5,8 +5,8 @@ use rustpython_parser::ast::{self, Constant, Expr, ExprContext, Keyword, Ranged, use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_ast::source_code::Generator; +use ruff_python_semantic::model::SemanticModel; use ruff_python_stdlib::identifiers::is_identifier; use crate::checkers::ast::Checker; @@ -35,7 +35,7 @@ impl Violation for ConvertNamedTupleFunctionalToClass { /// Return the typename, args, keywords, and base class. fn match_named_tuple_assign<'a>( - checker: &Checker, + model: &SemanticModel, targets: &'a [Expr], value: &'a Expr, ) -> Option<(&'a str, &'a [Expr], &'a [Keyword], &'a Expr)> { @@ -51,13 +51,9 @@ fn match_named_tuple_assign<'a>( }) = value else { return None; }; - if !checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["typing", "NamedTuple"] - }) - { + if !model.resolve_call_path(func).map_or(false, |call_path| { + call_path.as_slice() == ["typing", "NamedTuple"] + }) { return None; } Some((typename, args, keywords, func)) @@ -188,7 +184,7 @@ pub(crate) fn convert_named_tuple_functional_to_class( value: &Expr, ) { let Some((typename, args, keywords, base_class)) = - match_named_tuple_assign(checker, targets, value) else + match_named_tuple_assign(checker.semantic_model(), targets, value) else { return; }; diff --git a/crates/ruff/src/rules/pyupgrade/rules/convert_typed_dict_functional_to_class.rs b/crates/ruff/src/rules/pyupgrade/rules/convert_typed_dict_functional_to_class.rs index 79921376127b0..3e51f00c6c8e9 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/convert_typed_dict_functional_to_class.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/convert_typed_dict_functional_to_class.rs @@ -5,8 +5,8 @@ use rustpython_parser::ast::{self, Constant, Expr, ExprContext, Keyword, Ranged, use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_ast::source_code::Generator; +use ruff_python_semantic::model::SemanticModel; use ruff_python_stdlib::identifiers::is_identifier; use crate::checkers::ast::Checker; @@ -36,7 +36,7 @@ impl Violation for ConvertTypedDictFunctionalToClass { /// Return the class name, arguments, keywords and base class for a `TypedDict` /// assignment. fn match_typed_dict_assign<'a>( - checker: &Checker, + model: &SemanticModel, targets: &'a [Expr], value: &'a Expr, ) -> Option<(&'a str, &'a [Expr], &'a [Keyword], &'a Expr)> { @@ -52,13 +52,9 @@ fn match_typed_dict_assign<'a>( }) = value else { return None; }; - if !checker - .ctx - .resolve_call_path(func) - .map_or(false, |call_path| { - call_path.as_slice() == ["typing", "TypedDict"] - }) - { + if !model.resolve_call_path(func).map_or(false, |call_path| { + call_path.as_slice() == ["typing", "TypedDict"] + }) { return None; } Some((class_name, args, keywords, func)) @@ -244,7 +240,7 @@ pub(crate) fn convert_typed_dict_functional_to_class( value: &Expr, ) { let Some((class_name, args, keywords, base_class)) = - match_typed_dict_assign(checker, targets, value) else + match_typed_dict_assign(checker.semantic_model(), targets, value) else { return; }; diff --git a/crates/ruff/src/rules/pyupgrade/rules/datetime_utc_alias.rs b/crates/ruff/src/rules/pyupgrade/rules/datetime_utc_alias.rs index 01055b6ba091f..16aa05a1454dc 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/datetime_utc_alias.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/datetime_utc_alias.rs @@ -28,7 +28,7 @@ impl Violation for DatetimeTimezoneUTC { /// UP017 pub(crate) fn datetime_utc_alias(checker: &mut Checker, expr: &Expr) { if checker - .ctx + .semantic_model() .resolve_call_path(expr) .map_or(false, |call_path| { call_path.as_slice() == ["datetime", "timezone", "utc"] diff --git a/crates/ruff/src/rules/pyupgrade/rules/deprecated_mock_import.rs b/crates/ruff/src/rules/pyupgrade/rules/deprecated_mock_import.rs index 6745134fd4aee..d99c272926d71 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/deprecated_mock_import.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/deprecated_mock_import.rs @@ -13,7 +13,7 @@ use ruff_python_ast::source_code::{Locator, Stylist}; use ruff_python_ast::whitespace::indentation; use crate::checkers::ast::Checker; -use crate::cst::matchers::{match_import, match_import_from, match_module}; +use crate::cst::matchers::{match_import, match_import_from, match_statement}; use crate::registry::{AsRule, Rule}; #[derive(Debug, PartialEq, Eq, Copy, Clone)] @@ -126,7 +126,7 @@ fn format_import( stylist: &Stylist, ) -> Result { let module_text = locator.slice(stmt.range()); - let mut tree = match_module(module_text)?; + let mut tree = match_statement(module_text)?; let mut import = match_import(&mut tree)?; let Import { names, .. } = import.clone(); @@ -160,7 +160,7 @@ fn format_import_from( stylist: &Stylist, ) -> Result { let module_text = locator.slice(stmt.range()); - let mut tree = match_module(module_text).unwrap(); + let mut tree = match_statement(module_text).unwrap(); let mut import = match_import_from(&mut tree)?; if let ImportFrom { diff --git a/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs b/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs index e21bbf0869988..2c23330d2dd9b 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use ruff_text_size::TextRange; use rustc_hash::FxHashMap; use rustpython_format::{ @@ -7,6 +9,7 @@ use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::source_code::Locator; use ruff_python_ast::str::{is_implicit_concatenation, leading_quote, trailing_quote}; use crate::checkers::ast::Checker; @@ -34,21 +37,20 @@ impl AlwaysAutofixableViolation for FString { /// respectively. #[derive(Debug)] struct FormatSummaryValues<'a> { - args: Vec, - kwargs: FxHashMap<&'a str, String>, + args: Vec<&'a Expr>, + kwargs: FxHashMap<&'a str, &'a Expr>, } impl<'a> FormatSummaryValues<'a> { - fn try_from_expr(checker: &'a Checker, expr: &'a Expr) -> Option { - let mut extracted_args: Vec = Vec::new(); - let mut extracted_kwargs: FxHashMap<&str, String> = FxHashMap::default(); + fn try_from_expr(expr: &'a Expr, locator: &'a Locator) -> Option { + let mut extracted_args: Vec<&Expr> = Vec::new(); + let mut extracted_kwargs: FxHashMap<&str, &Expr> = FxHashMap::default(); if let Expr::Call(ast::ExprCall { args, keywords, .. }) = expr { for arg in args { - let arg = checker.locator.slice(arg.range()); - if contains_invalids(arg) { + if contains_invalids(locator.slice(arg.range())) { return None; } - extracted_args.push(arg.to_string()); + extracted_args.push(arg); } for keyword in keywords { let Keyword { @@ -57,11 +59,10 @@ impl<'a> FormatSummaryValues<'a> { range: _, } = keyword; if let Some(key) = arg { - let kwarg = checker.locator.slice(value.range()); - if contains_invalids(kwarg) { + if contains_invalids(locator.slice(value.range())) { return None; } - extracted_kwargs.insert(key, kwarg.to_string()); + extracted_kwargs.insert(key, value); } } } @@ -76,7 +77,7 @@ impl<'a> FormatSummaryValues<'a> { }) } - fn consume_next(&mut self) -> Option { + fn consume_next(&mut self) -> Option<&Expr> { if self.args.is_empty() { None } else { @@ -84,7 +85,7 @@ impl<'a> FormatSummaryValues<'a> { } } - fn consume_arg(&mut self, index: usize) -> Option { + fn consume_arg(&mut self, index: usize) -> Option<&Expr> { if self.args.len() > index { Some(self.args.remove(index)) } else { @@ -92,13 +93,13 @@ impl<'a> FormatSummaryValues<'a> { } } - fn consume_kwarg(&mut self, key: &str) -> Option { + fn consume_kwarg(&mut self, key: &str) -> Option<&Expr> { self.kwargs.remove(key) } } -/// Return `true` if the string contains characters that are forbidden in -/// argument identifier. +/// Return `true` if the string contains characters that are forbidden by +/// argument identifiers. fn contains_invalids(string: &str) -> bool { string.contains('*') || string.contains('\'') @@ -106,8 +107,61 @@ fn contains_invalids(string: &str) -> bool { || string.contains("await") } +enum FormatContext { + /// The expression is used as a bare format spec (e.g., `{x}`). + Bare, + /// The expression is used with conversion flags, or attribute or subscript access + /// (e.g., `{x!r}`, `{x.y}`, `{x[y]}`). + Accessed, +} + +/// Given an [`Expr`], format it for use in a formatted expression within an f-string. +fn formatted_expr<'a>(expr: &Expr, context: FormatContext, locator: &Locator<'a>) -> Cow<'a, str> { + let text = locator.slice(expr.range()); + let parenthesize = match (context, expr) { + // E.g., `x + y` should be parenthesized in `f"{(x + y)[0]}"`. + ( + FormatContext::Accessed, + Expr::BinOp(_) + | Expr::UnaryOp(_) + | Expr::BoolOp(_) + | Expr::NamedExpr(_) + | Expr::Compare(_) + | Expr::IfExp(_) + | Expr::Lambda(_) + | Expr::Await(_) + | Expr::Yield(_) + | Expr::YieldFrom(_) + | Expr::Starred(_), + ) => true, + // E.g., `12` should be parenthesized in `f"{(12).real}"`. + ( + FormatContext::Accessed, + Expr::Constant(ast::ExprConstant { + value: Constant::Int(..), + .. + }), + ) => text.chars().all(|c| c.is_ascii_digit()), + // E.g., `{x, y}` should be parenthesized in `f"{(x, y)}"`. + ( + _, + Expr::GeneratorExp(_) + | Expr::Dict(_) + | Expr::Set(_) + | Expr::SetComp(_) + | Expr::DictComp(_), + ) => true, + _ => false, + }; + if parenthesize && !text.starts_with('(') && !text.ends_with(')') { + Cow::Owned(format!("({text})")) + } else { + Cow::Borrowed(text) + } +} + /// Generate an f-string from an [`Expr`]. -fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option { +fn try_convert_to_f_string(expr: &Expr, locator: &Locator) -> Option { let Expr::Call(ast::ExprCall { func, .. }) = expr else { return None; }; @@ -124,11 +178,11 @@ fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option { return None; }; - let Some(mut summary) = FormatSummaryValues::try_from_expr(checker, expr) else { + let Some(mut summary) = FormatSummaryValues::try_from_expr( expr, locator) else { return None; }; - let contents = checker.locator.slice(value.range()); + let contents = locator.slice(value.range()); // Skip implicit string concatenations. if is_implicit_concatenation(contents) { @@ -171,26 +225,20 @@ fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option { converted.push('{'); let field = FieldName::parse(&field_name).ok()?; - match field.field_type { - FieldType::Auto => { - let Some(arg) = summary.consume_next() else { - return None; - }; - converted.push_str(&arg); - } - FieldType::Index(index) => { - let Some(arg) = summary.consume_arg(index) else { - return None; - }; - converted.push_str(&arg); - } - FieldType::Keyword(name) => { - let Some(arg) = summary.consume_kwarg(&name) else { - return None; - }; - converted.push_str(&arg); - } - } + let arg = match field.field_type { + FieldType::Auto => summary.consume_next(), + FieldType::Index(index) => summary.consume_arg(index), + FieldType::Keyword(name) => summary.consume_kwarg(&name), + }?; + converted.push_str(&formatted_expr( + arg, + if field.parts.is_empty() { + FormatContext::Bare + } else { + FormatContext::Accessed + }, + locator, + )); for part in field.parts { match part { @@ -258,7 +306,7 @@ pub(crate) fn f_strings(checker: &mut Checker, summary: &FormatSummary, expr: &E // Currently, the only issue we know of is in LibCST: // https://github.com/Instagram/LibCST/issues/846 - let Some(mut contents) = try_convert_to_f_string(checker, expr) else { + let Some(mut contents) = try_convert_to_f_string( expr, checker.locator) else { return; }; diff --git a/crates/ruff/src/rules/pyupgrade/rules/format_literals.rs b/crates/ruff/src/rules/pyupgrade/rules/format_literals.rs index 802d63d91f4be..5352f3c008ff3 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/format_literals.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/format_literals.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, bail, Result}; -use libcst_native::{Arg, Codegen, CodegenState, Expression}; +use libcst_native::{Arg, Codegen, CodegenState}; use once_cell::sync::Lazy; use regex::Regex; use rustpython_parser::ast::{Expr, Ranged}; @@ -9,7 +9,7 @@ use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::source_code::{Locator, Stylist}; use crate::checkers::ast::Checker; -use crate::cst::matchers::{match_call, match_expression}; +use crate::cst::matchers::{match_attribute, match_call_mut, match_expression}; use crate::registry::AsRule; use crate::rules::pyflakes::format::FormatSummary; @@ -89,7 +89,7 @@ fn generate_call( ) -> Result { let module_text = locator.slice(expr.range()); let mut expression = match_expression(module_text)?; - let mut call = match_call(&mut expression)?; + let mut call = match_call_mut(&mut expression)?; // Fix the call arguments. if !is_sequential(correct_order) { @@ -97,9 +97,7 @@ fn generate_call( } // Fix the string itself. - let Expression::Attribute(item) = &*call.func else { - panic!("Expected: Expression::Attribute") - }; + let item = match_attribute(&mut call.func)?; let mut state = CodegenState { default_newline: &stylist.line_ending(), diff --git a/crates/ruff/src/rules/pyupgrade/rules/lru_cache_with_maxsize_none.rs b/crates/ruff/src/rules/pyupgrade/rules/lru_cache_with_maxsize_none.rs index 1eb96710d551b..d8753fd85f5eb 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/lru_cache_with_maxsize_none.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/lru_cache_with_maxsize_none.rs @@ -4,7 +4,6 @@ use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; -use crate::autofix::actions::get_or_import_symbol; use crate::checkers::ast::Checker; use crate::registry::AsRule; @@ -38,7 +37,7 @@ pub(crate) fn lru_cache_with_maxsize_none(checker: &mut Checker, decorator_list: if args.is_empty() && keywords.len() == 1 && checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["functools", "lru_cache"] @@ -65,13 +64,11 @@ pub(crate) fn lru_cache_with_maxsize_none(checker: &mut Checker, decorator_list: ); if checker.patch(diagnostic.kind.rule()) { diagnostic.try_set_fix(|| { - let (import_edit, binding) = get_or_import_symbol( + let (import_edit, binding) = checker.importer.get_or_import_symbol( "functools", "cache", expr.start(), - &checker.ctx, - &checker.importer, - checker.locator, + checker.semantic_model(), )?; let reference_edit = Edit::range_replacement(binding, expr.range()); #[allow(deprecated)] diff --git a/crates/ruff/src/rules/pyupgrade/rules/lru_cache_without_parameters.rs b/crates/ruff/src/rules/pyupgrade/rules/lru_cache_without_parameters.rs index 3675ecfc09ffc..f276d8f8e526b 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/lru_cache_without_parameters.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/lru_cache_without_parameters.rs @@ -37,7 +37,7 @@ pub(crate) fn lru_cache_without_parameters(checker: &mut Checker, decorator_list if args.is_empty() && keywords.is_empty() && checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["functools", "lru_cache"] diff --git a/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs b/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs index 4d154bb761197..5c5172791bfdc 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs @@ -4,7 +4,6 @@ use rustpython_parser::ast::{self, Constant, Expr, Keyword, Ranged}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_ast::str::is_implicit_concatenation; use crate::checkers::ast::Checker; @@ -57,7 +56,7 @@ pub(crate) fn native_literals( return; } - if (id == "str" || id == "bytes") && checker.ctx.is_builtin(id) { + if (id == "str" || id == "bytes") && checker.semantic_model().is_builtin(id) { let Some(arg) = args.get(0) else { let mut diagnostic = Diagnostic::new(NativeLiterals{literal_type:if id == "str" { LiteralType::Str diff --git a/crates/ruff/src/rules/pyupgrade/rules/open_alias.rs b/crates/ruff/src/rules/pyupgrade/rules/open_alias.rs index a7f8372a62fcc..b51c115271b2d 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/open_alias.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/open_alias.rs @@ -25,12 +25,12 @@ impl Violation for OpenAlias { /// UP020 pub(crate) fn open_alias(checker: &mut Checker, expr: &Expr, func: &Expr) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["io", "open"]) { let fixable = checker - .ctx + .semantic_model() .find_binding("open") .map_or(true, |binding| binding.kind.is_builtin()); let mut diagnostic = Diagnostic::new(OpenAlias, expr.range()); diff --git a/crates/ruff/src/rules/pyupgrade/rules/os_error_alias.rs b/crates/ruff/src/rules/pyupgrade/rules/os_error_alias.rs index c269474b2843d..82a3da68ba452 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/os_error_alias.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/os_error_alias.rs @@ -4,8 +4,7 @@ use rustpython_parser::ast::{self, Excepthandler, Expr, ExprContext, Ranged}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::call_path::compose_call_path; - -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; use crate::checkers::ast::Checker; use crate::registry::AsRule; @@ -40,8 +39,8 @@ const ALIASES: &[(&str, &str)] = &[ ]; /// Return `true` if an [`Expr`] is an alias of `OSError`. -fn is_alias(context: &Context, expr: &Expr) -> bool { - context.resolve_call_path(expr).map_or(false, |call_path| { +fn is_alias(model: &SemanticModel, expr: &Expr) -> bool { + model.resolve_call_path(expr).map_or(false, |call_path| { ALIASES .iter() .any(|(module, member)| call_path.as_slice() == [*module, *member]) @@ -49,8 +48,8 @@ fn is_alias(context: &Context, expr: &Expr) -> bool { } /// Return `true` if an [`Expr`] is `OSError`. -fn is_os_error(context: &Context, expr: &Expr) -> bool { - context +fn is_os_error(model: &SemanticModel, expr: &Expr) -> bool { + model .resolve_call_path(expr) .map_or(false, |call_path| call_path.as_slice() == ["", "OSError"]) } @@ -94,7 +93,10 @@ fn tuple_diagnostic(checker: &mut Checker, target: &Expr, aliases: &[&Expr]) { .collect(); // If `OSError` itself isn't already in the tuple, add it. - if elts.iter().all(|elt| !is_os_error(&checker.ctx, elt)) { + if elts + .iter() + .all(|elt| !is_os_error(checker.semantic_model(), elt)) + { let node = ast::ExprName { id: "OSError".into(), ctx: ExprContext::Load, @@ -134,7 +136,7 @@ pub(crate) fn os_error_alias_handlers(checker: &mut Checker, handlers: &[Excepth }; match expr.as_ref() { Expr::Name(_) | Expr::Attribute(_) => { - if is_alias(&checker.ctx, expr) { + if is_alias(checker.semantic_model(), expr) { atom_diagnostic(checker, expr); } } @@ -142,7 +144,7 @@ pub(crate) fn os_error_alias_handlers(checker: &mut Checker, handlers: &[Excepth // List of aliases to replace with `OSError`. let mut aliases: Vec<&Expr> = vec![]; for elt in elts { - if is_alias(&checker.ctx, elt) { + if is_alias(checker.semantic_model(), elt) { aliases.push(elt); } } @@ -157,7 +159,7 @@ pub(crate) fn os_error_alias_handlers(checker: &mut Checker, handlers: &[Excepth /// UP024 pub(crate) fn os_error_alias_call(checker: &mut Checker, func: &Expr) { - if is_alias(&checker.ctx, func) { + if is_alias(checker.semantic_model(), func) { atom_diagnostic(checker, func); } } @@ -165,7 +167,7 @@ pub(crate) fn os_error_alias_call(checker: &mut Checker, func: &Expr) { /// UP024 pub(crate) fn os_error_alias_raise(checker: &mut Checker, expr: &Expr) { if matches!(expr, Expr::Name(_) | Expr::Attribute(_)) { - if is_alias(&checker.ctx, expr) { + if is_alias(checker.semantic_model(), expr) { atom_diagnostic(checker, expr); } } diff --git a/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs b/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs index 6419c73ed1e44..cbeaa40f29e84 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs @@ -164,8 +164,8 @@ fn fix_py2_block( // of its parent, so avoid passing in the parent at all. Otherwise, // `delete_stmt` will erroneously include a `pass`. let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); - let defined_by = checker.ctx.stmt(); - let defined_in = checker.ctx.stmt_parent(); + let defined_by = checker.semantic_model().stmt(); + let defined_in = checker.semantic_model().stmt_parent(); return match delete_stmt( defined_by, if block.starter == Tok::If { @@ -323,7 +323,7 @@ pub(crate) fn outdated_version_block( }; if !checker - .ctx + .semantic_model() .resolve_call_path(left) .map_or(false, |call_path| { call_path.as_slice() == ["sys", "version_info"] diff --git a/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs b/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs index 08aa5fda1e2da..5bcf6c764daec 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs @@ -1,6 +1,6 @@ -use ruff_text_size::TextRange; use std::str::FromStr; +use ruff_text_size::TextRange; use rustpython_format::cformat::{ CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString, }; diff --git a/crates/ruff/src/rules/pyupgrade/rules/quoted_annotation.rs b/crates/ruff/src/rules/pyupgrade/rules/quoted_annotation.rs index 967abefee0a30..1a8a29e0a6c1b 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/quoted_annotation.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/quoted_annotation.rs @@ -1,6 +1,7 @@ +use ruff_text_size::TextRange; + use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; -use ruff_text_size::TextRange; use crate::checkers::ast::Checker; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs b/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs index b7afd6a51c468..9dabf61d244b8 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs @@ -173,7 +173,7 @@ fn create_remove_param_fix(locator: &Locator, expr: &Expr, mode_param: &Expr) -> /// UP015 pub(crate) fn redundant_open_modes(checker: &mut Checker, expr: &Expr) { // If `open` has been rebound, skip this check entirely. - if !checker.ctx.is_builtin(OPEN_FUNC_NAME) { + if !checker.semantic_model().is_builtin(OPEN_FUNC_NAME) { return; } let (mode_param, keywords): (Option<&Expr>, Vec) = match_open(expr); diff --git a/crates/ruff/src/rules/pyupgrade/rules/replace_stdout_stderr.rs b/crates/ruff/src/rules/pyupgrade/rules/replace_stdout_stderr.rs index 2c0c42e956b04..91f943b0adb9a 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/replace_stdout_stderr.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/replace_stdout_stderr.rs @@ -61,7 +61,7 @@ pub(crate) fn replace_stdout_stderr( keywords: &[Keyword], ) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["subprocess", "run"] @@ -77,13 +77,13 @@ pub(crate) fn replace_stdout_stderr( // Verify that they're both set to `subprocess.PIPE`. if !checker - .ctx + .semantic_model() .resolve_call_path(&stdout.value) .map_or(false, |call_path| { call_path.as_slice() == ["subprocess", "PIPE"] }) || !checker - .ctx + .semantic_model() .resolve_call_path(&stderr.value) .map_or(false, |call_path| { call_path.as_slice() == ["subprocess", "PIPE"] diff --git a/crates/ruff/src/rules/pyupgrade/rules/replace_universal_newlines.rs b/crates/ruff/src/rules/pyupgrade/rules/replace_universal_newlines.rs index 7df30f8b620c0..23c0e49de0181 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/replace_universal_newlines.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/replace_universal_newlines.rs @@ -25,7 +25,7 @@ impl AlwaysAutofixableViolation for ReplaceUniversalNewlines { /// UP021 pub(crate) fn replace_universal_newlines(checker: &mut Checker, func: &Expr, kwargs: &[Keyword]) { if checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| { call_path.as_slice() == ["subprocess", "run"] diff --git a/crates/ruff/src/rules/pyupgrade/rules/super_call_with_parameters.rs b/crates/ruff/src/rules/pyupgrade/rules/super_call_with_parameters.rs index 9d3fb07fb02e2..c175297910aeb 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/super_call_with_parameters.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/super_call_with_parameters.rs @@ -43,14 +43,14 @@ pub(crate) fn super_call_with_parameters( if !is_super_call_with_arguments(func, args) { return; } - let scope = checker.ctx.scope(); + let scope = checker.semantic_model().scope(); // Check: are we in a Function scope? if !matches!(scope.kind, ScopeKind::Function(_)) { return; } - let mut parents = checker.ctx.parents(); + let mut parents = checker.semantic_model().parents(); // For a `super` invocation to be unnecessary, the first argument needs to match // the enclosing class, and the second argument needs to match the first @@ -97,6 +97,7 @@ pub(crate) fn super_call_with_parameters( return; } + drop(parents); let mut diagnostic = Diagnostic::new(SuperCallWithParameters, expr.range()); if checker.patch(diagnostic.kind.rule()) { if let Some(edit) = fixes::remove_super_arguments(checker.locator, checker.stylist, expr) { diff --git a/crates/ruff/src/rules/pyupgrade/rules/type_of_primitive.rs b/crates/ruff/src/rules/pyupgrade/rules/type_of_primitive.rs index 9c80dd55799d1..7f2c9a59641bd 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/type_of_primitive.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/type_of_primitive.rs @@ -32,7 +32,7 @@ pub(crate) fn type_of_primitive(checker: &mut Checker, expr: &Expr, func: &Expr, return; } if !checker - .ctx + .semantic_model() .resolve_call_path(func) .map_or(false, |call_path| call_path.as_slice() == ["", "type"]) { diff --git a/crates/ruff/src/rules/pyupgrade/rules/typing_text_str_alias.rs b/crates/ruff/src/rules/pyupgrade/rules/typing_text_str_alias.rs index dd165ca1c6388..5b1a867cca35e 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/typing_text_str_alias.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/typing_text_str_alias.rs @@ -23,7 +23,7 @@ impl AlwaysAutofixableViolation for TypingTextStrAlias { /// UP019 pub(crate) fn typing_text_str_alias(checker: &mut Checker, expr: &Expr) { if checker - .ctx + .semantic_model() .resolve_call_path(expr) .map_or(false, |call_path| { call_path.as_slice() == ["typing", "Text"] diff --git a/crates/ruff/src/rules/pyupgrade/rules/unnecessary_builtin_import.rs b/crates/ruff/src/rules/pyupgrade/rules/unnecessary_builtin_import.rs index 2f037a111614a..4c7415de11d84 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/unnecessary_builtin_import.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/unnecessary_builtin_import.rs @@ -106,8 +106,8 @@ pub(crate) fn unnecessary_builtin_import( if checker.patch(diagnostic.kind.rule()) { let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); - let defined_by = checker.ctx.stmt(); - let defined_in = checker.ctx.stmt_parent(); + let defined_by = checker.semantic_model().stmt(); + let defined_in = checker.semantic_model().stmt_parent(); let unused_imports: Vec = unused_imports .iter() .map(|alias| format!("{module}.{}", alias.name)) diff --git a/crates/ruff/src/rules/pyupgrade/rules/unnecessary_future_import.rs b/crates/ruff/src/rules/pyupgrade/rules/unnecessary_future_import.rs index 7924b55b16905..e9da4149d0352 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/unnecessary_future_import.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/unnecessary_future_import.rs @@ -86,8 +86,8 @@ pub(crate) fn unnecessary_future_import(checker: &mut Checker, stmt: &Stmt, name if checker.patch(diagnostic.kind.rule()) { let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); - let defined_by = checker.ctx.stmt(); - let defined_in = checker.ctx.stmt_parent(); + let defined_by = checker.semantic_model().stmt(); + let defined_in = checker.semantic_model().stmt_parent(); let unused_imports: Vec = unused_imports .iter() .map(|alias| format!("__future__.{}", alias.name)) diff --git a/crates/ruff/src/rules/pyupgrade/rules/use_pep585_annotation.rs b/crates/ruff/src/rules/pyupgrade/rules/use_pep585_annotation.rs index b216e11e1e9ad..4f1703bca9c34 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/use_pep585_annotation.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/use_pep585_annotation.rs @@ -5,7 +5,6 @@ use ruff_macros::{derive_message_formats, violation}; use ruff_python_ast::call_path::compose_call_path; use ruff_python_semantic::analyze::typing::ModuleMember; -use crate::autofix::actions::get_or_import_symbol; use crate::checkers::ast::Checker; use crate::registry::AsRule; @@ -46,12 +45,12 @@ pub(crate) fn use_pep585_annotation( }, expr.range(), ); - let fixable = !checker.ctx.in_complex_string_type_definition(); + let fixable = !checker.semantic_model().in_complex_string_type_definition(); if fixable && checker.patch(diagnostic.kind.rule()) { match replacement { ModuleMember::BuiltIn(name) => { // Built-in type, like `list`. - if checker.ctx.is_builtin(name) { + if checker.semantic_model().is_builtin(name) { diagnostic.set_fix(Fix::automatic(Edit::range_replacement( (*name).to_string(), expr.range(), @@ -61,13 +60,11 @@ pub(crate) fn use_pep585_annotation( ModuleMember::Member(module, member) => { // Imported type, like `collections.deque`. diagnostic.try_set_fix(|| { - let (import_edit, binding) = get_or_import_symbol( + let (import_edit, binding) = checker.importer.get_or_import_symbol( module, member, expr.start(), - &checker.ctx, - &checker.importer, - checker.locator, + checker.semantic_model(), )?; let reference_edit = Edit::range_replacement(binding, expr.range()); Ok(Fix::suggested_edits(import_edit, [reference_edit])) diff --git a/crates/ruff/src/rules/pyupgrade/rules/use_pep604_annotation.rs b/crates/ruff/src/rules/pyupgrade/rules/use_pep604_annotation.rs index 75149e13493e2..789e89b68545d 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/use_pep604_annotation.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/use_pep604_annotation.rs @@ -3,7 +3,6 @@ use rustpython_parser::ast::{self, Constant, Expr, Operator, Ranged}; use ruff_diagnostics::{AutofixKind, Diagnostic, Edit, Fix, Violation}; use ruff_macros::{derive_message_formats, violation}; - use ruff_python_semantic::analyze::typing::Pep604Operator; use crate::checkers::ast::Checker; @@ -59,8 +58,8 @@ pub(crate) fn use_pep604_annotation( operator: Pep604Operator, ) { // Avoid fixing forward references, or types not in an annotation. - let fixable = - checker.ctx.in_type_definition() && !checker.ctx.in_complex_string_type_definition(); + let fixable = checker.semantic_model().in_type_definition() + && !checker.semantic_model().in_complex_string_type_definition(); match operator { Pep604Operator::Optional => { let mut diagnostic = Diagnostic::new(NonPEP604Annotation, expr.range()); diff --git a/crates/ruff/src/rules/pyupgrade/rules/use_pep604_isinstance.rs b/crates/ruff/src/rules/pyupgrade/rules/use_pep604_isinstance.rs index 04c585aa0c4fc..d0f8baaf9211d 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/use_pep604_isinstance.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/use_pep604_isinstance.rs @@ -74,7 +74,7 @@ pub(crate) fn use_pep604_isinstance( let Some(kind) = CallKind::from_name(id) else { return; }; - if !checker.ctx.is_builtin(id) { + if !checker.semantic_model().is_builtin(id) { return; }; if let Some(types) = args.get(1) { diff --git a/crates/ruff/src/rules/pyupgrade/rules/useless_metaclass_type.rs b/crates/ruff/src/rules/pyupgrade/rules/useless_metaclass_type.rs index 0bfdca15c67f3..9cf7e040574d8 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/useless_metaclass_type.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/useless_metaclass_type.rs @@ -56,8 +56,8 @@ pub(crate) fn useless_metaclass_type( }; if checker.patch(diagnostic.kind.rule()) { let deleted: Vec<&Stmt> = checker.deletions.iter().map(Into::into).collect(); - let defined_by = checker.ctx.stmt(); - let defined_in = checker.ctx.stmt_parent(); + let defined_by = checker.semantic_model().stmt(); + let defined_in = checker.semantic_model().stmt_parent(); match actions::delete_stmt( defined_by, defined_in, diff --git a/crates/ruff/src/rules/pyupgrade/rules/useless_object_inheritance.rs b/crates/ruff/src/rules/pyupgrade/rules/useless_object_inheritance.rs index 1397a4525ff43..858e2aa55d665 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/useless_object_inheritance.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/useless_object_inheritance.rs @@ -35,7 +35,9 @@ fn rule(name: &str, bases: &[Expr], scope: &Scope, bindings: &Bindings) -> Optio continue; } if !matches!( - scope.get(id.as_str()).map(|index| &bindings[*index]), + scope + .get(id.as_str()) + .map(|binding_id| &bindings[binding_id]), None | Some(Binding { kind: BindingKind::Builtin, .. @@ -62,7 +64,12 @@ pub(crate) fn useless_object_inheritance( bases: &[Expr], keywords: &[Keyword], ) { - if let Some(mut diagnostic) = rule(name, bases, checker.ctx.scope(), &checker.ctx.bindings) { + if let Some(mut diagnostic) = rule( + name, + bases, + checker.semantic_model().scope(), + &checker.semantic_model().bindings, + ) { if checker.patch(diagnostic.kind.rule()) { let expr_range = diagnostic.range(); #[allow(deprecated)] diff --git a/crates/ruff/src/rules/pyupgrade/snapshots/ruff__rules__pyupgrade__tests__UP032_2.py.snap b/crates/ruff/src/rules/pyupgrade/snapshots/ruff__rules__pyupgrade__tests__UP032_2.py.snap new file mode 100644 index 0000000000000..572576e1ee29c --- /dev/null +++ b/crates/ruff/src/rules/pyupgrade/snapshots/ruff__rules__pyupgrade__tests__UP032_2.py.snap @@ -0,0 +1,443 @@ +--- +source: crates/ruff/src/rules/pyupgrade/mod.rs +--- +UP032_2.py:2:1: UP032 [*] Use f-string instead of `format` call + | +2 | # Errors +3 | "{.real}".format(1) + | ^^^^^^^^^^^^^^^^^^^ UP032 +4 | "{0.real}".format(1) +5 | "{a.real}".format(a=1) + | + = help: Convert to f-string + +β„Ή Suggested fix +1 1 | # Errors +2 |-"{.real}".format(1) + 2 |+f"{(1).real}" +3 3 | "{0.real}".format(1) +4 4 | "{a.real}".format(a=1) +5 5 | + +UP032_2.py:3:1: UP032 [*] Use f-string instead of `format` call + | +3 | # Errors +4 | "{.real}".format(1) +5 | "{0.real}".format(1) + | ^^^^^^^^^^^^^^^^^^^^ UP032 +6 | "{a.real}".format(a=1) + | + = help: Convert to f-string + +β„Ή Suggested fix +1 1 | # Errors +2 2 | "{.real}".format(1) +3 |-"{0.real}".format(1) + 3 |+f"{(1).real}" +4 4 | "{a.real}".format(a=1) +5 5 | +6 6 | "{.real}".format(1.0) + +UP032_2.py:4:1: UP032 [*] Use f-string instead of `format` call + | +4 | "{.real}".format(1) +5 | "{0.real}".format(1) +6 | "{a.real}".format(a=1) + | ^^^^^^^^^^^^^^^^^^^^^^ UP032 +7 | +8 | "{.real}".format(1.0) + | + = help: Convert to f-string + +β„Ή Suggested fix +1 1 | # Errors +2 2 | "{.real}".format(1) +3 3 | "{0.real}".format(1) +4 |-"{a.real}".format(a=1) + 4 |+f"{(1).real}" +5 5 | +6 6 | "{.real}".format(1.0) +7 7 | "{0.real}".format(1.0) + +UP032_2.py:6:1: UP032 [*] Use f-string instead of `format` call + | + 6 | "{a.real}".format(a=1) + 7 | + 8 | "{.real}".format(1.0) + | ^^^^^^^^^^^^^^^^^^^^^ UP032 + 9 | "{0.real}".format(1.0) +10 | "{a.real}".format(a=1.0) + | + = help: Convert to f-string + +β„Ή Suggested fix +3 3 | "{0.real}".format(1) +4 4 | "{a.real}".format(a=1) +5 5 | +6 |-"{.real}".format(1.0) + 6 |+f"{1.0.real}" +7 7 | "{0.real}".format(1.0) +8 8 | "{a.real}".format(a=1.0) +9 9 | + +UP032_2.py:7:1: UP032 [*] Use f-string instead of `format` call + | +7 | "{.real}".format(1.0) +8 | "{0.real}".format(1.0) + | ^^^^^^^^^^^^^^^^^^^^^^ UP032 +9 | "{a.real}".format(a=1.0) + | + = help: Convert to f-string + +β„Ή Suggested fix +4 4 | "{a.real}".format(a=1) +5 5 | +6 6 | "{.real}".format(1.0) +7 |-"{0.real}".format(1.0) + 7 |+f"{1.0.real}" +8 8 | "{a.real}".format(a=1.0) +9 9 | +10 10 | "{.real}".format(1j) + +UP032_2.py:8:1: UP032 [*] Use f-string instead of `format` call + | + 8 | "{.real}".format(1.0) + 9 | "{0.real}".format(1.0) +10 | "{a.real}".format(a=1.0) + | ^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +11 | +12 | "{.real}".format(1j) + | + = help: Convert to f-string + +β„Ή Suggested fix +5 5 | +6 6 | "{.real}".format(1.0) +7 7 | "{0.real}".format(1.0) +8 |-"{a.real}".format(a=1.0) + 8 |+f"{1.0.real}" +9 9 | +10 10 | "{.real}".format(1j) +11 11 | "{0.real}".format(1j) + +UP032_2.py:10:1: UP032 [*] Use f-string instead of `format` call + | +10 | "{a.real}".format(a=1.0) +11 | +12 | "{.real}".format(1j) + | ^^^^^^^^^^^^^^^^^^^^ UP032 +13 | "{0.real}".format(1j) +14 | "{a.real}".format(a=1j) + | + = help: Convert to f-string + +β„Ή Suggested fix +7 7 | "{0.real}".format(1.0) +8 8 | "{a.real}".format(a=1.0) +9 9 | +10 |-"{.real}".format(1j) + 10 |+f"{1j.real}" +11 11 | "{0.real}".format(1j) +12 12 | "{a.real}".format(a=1j) +13 13 | + +UP032_2.py:11:1: UP032 [*] Use f-string instead of `format` call + | +11 | "{.real}".format(1j) +12 | "{0.real}".format(1j) + | ^^^^^^^^^^^^^^^^^^^^^ UP032 +13 | "{a.real}".format(a=1j) + | + = help: Convert to f-string + +β„Ή Suggested fix +8 8 | "{a.real}".format(a=1.0) +9 9 | +10 10 | "{.real}".format(1j) +11 |-"{0.real}".format(1j) + 11 |+f"{1j.real}" +12 12 | "{a.real}".format(a=1j) +13 13 | +14 14 | "{.real}".format(0b01) + +UP032_2.py:12:1: UP032 [*] Use f-string instead of `format` call + | +12 | "{.real}".format(1j) +13 | "{0.real}".format(1j) +14 | "{a.real}".format(a=1j) + | ^^^^^^^^^^^^^^^^^^^^^^^ UP032 +15 | +16 | "{.real}".format(0b01) + | + = help: Convert to f-string + +β„Ή Suggested fix +9 9 | +10 10 | "{.real}".format(1j) +11 11 | "{0.real}".format(1j) +12 |-"{a.real}".format(a=1j) + 12 |+f"{1j.real}" +13 13 | +14 14 | "{.real}".format(0b01) +15 15 | "{0.real}".format(0b01) + +UP032_2.py:14:1: UP032 [*] Use f-string instead of `format` call + | +14 | "{a.real}".format(a=1j) +15 | +16 | "{.real}".format(0b01) + | ^^^^^^^^^^^^^^^^^^^^^^ UP032 +17 | "{0.real}".format(0b01) +18 | "{a.real}".format(a=0b01) + | + = help: Convert to f-string + +β„Ή Suggested fix +11 11 | "{0.real}".format(1j) +12 12 | "{a.real}".format(a=1j) +13 13 | +14 |-"{.real}".format(0b01) + 14 |+f"{0b01.real}" +15 15 | "{0.real}".format(0b01) +16 16 | "{a.real}".format(a=0b01) +17 17 | + +UP032_2.py:15:1: UP032 [*] Use f-string instead of `format` call + | +15 | "{.real}".format(0b01) +16 | "{0.real}".format(0b01) + | ^^^^^^^^^^^^^^^^^^^^^^^ UP032 +17 | "{a.real}".format(a=0b01) + | + = help: Convert to f-string + +β„Ή Suggested fix +12 12 | "{a.real}".format(a=1j) +13 13 | +14 14 | "{.real}".format(0b01) +15 |-"{0.real}".format(0b01) + 15 |+f"{0b01.real}" +16 16 | "{a.real}".format(a=0b01) +17 17 | +18 18 | "{}".format(1 + 2) + +UP032_2.py:16:1: UP032 [*] Use f-string instead of `format` call + | +16 | "{.real}".format(0b01) +17 | "{0.real}".format(0b01) +18 | "{a.real}".format(a=0b01) + | ^^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +19 | +20 | "{}".format(1 + 2) + | + = help: Convert to f-string + +β„Ή Suggested fix +13 13 | +14 14 | "{.real}".format(0b01) +15 15 | "{0.real}".format(0b01) +16 |-"{a.real}".format(a=0b01) + 16 |+f"{0b01.real}" +17 17 | +18 18 | "{}".format(1 + 2) +19 19 | "{}".format([1, 2]) + +UP032_2.py:18:1: UP032 [*] Use f-string instead of `format` call + | +18 | "{a.real}".format(a=0b01) +19 | +20 | "{}".format(1 + 2) + | ^^^^^^^^^^^^^^^^^^ UP032 +21 | "{}".format([1, 2]) +22 | "{}".format({1, 2}) + | + = help: Convert to f-string + +β„Ή Suggested fix +15 15 | "{0.real}".format(0b01) +16 16 | "{a.real}".format(a=0b01) +17 17 | +18 |-"{}".format(1 + 2) + 18 |+f"{1 + 2}" +19 19 | "{}".format([1, 2]) +20 20 | "{}".format({1, 2}) +21 21 | "{}".format({1: 2, 3: 4}) + +UP032_2.py:19:1: UP032 [*] Use f-string instead of `format` call + | +19 | "{}".format(1 + 2) +20 | "{}".format([1, 2]) + | ^^^^^^^^^^^^^^^^^^^ UP032 +21 | "{}".format({1, 2}) +22 | "{}".format({1: 2, 3: 4}) + | + = help: Convert to f-string + +β„Ή Suggested fix +16 16 | "{a.real}".format(a=0b01) +17 17 | +18 18 | "{}".format(1 + 2) +19 |-"{}".format([1, 2]) + 19 |+f"{[1, 2]}" +20 20 | "{}".format({1, 2}) +21 21 | "{}".format({1: 2, 3: 4}) +22 22 | "{}".format((i for i in range(2))) + +UP032_2.py:20:1: UP032 [*] Use f-string instead of `format` call + | +20 | "{}".format(1 + 2) +21 | "{}".format([1, 2]) +22 | "{}".format({1, 2}) + | ^^^^^^^^^^^^^^^^^^^ UP032 +23 | "{}".format({1: 2, 3: 4}) +24 | "{}".format((i for i in range(2))) + | + = help: Convert to f-string + +β„Ή Suggested fix +17 17 | +18 18 | "{}".format(1 + 2) +19 19 | "{}".format([1, 2]) +20 |-"{}".format({1, 2}) + 20 |+f"{({1, 2})}" +21 21 | "{}".format({1: 2, 3: 4}) +22 22 | "{}".format((i for i in range(2))) +23 23 | + +UP032_2.py:21:1: UP032 [*] Use f-string instead of `format` call + | +21 | "{}".format([1, 2]) +22 | "{}".format({1, 2}) +23 | "{}".format({1: 2, 3: 4}) + | ^^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +24 | "{}".format((i for i in range(2))) + | + = help: Convert to f-string + +β„Ή Suggested fix +18 18 | "{}".format(1 + 2) +19 19 | "{}".format([1, 2]) +20 20 | "{}".format({1, 2}) +21 |-"{}".format({1: 2, 3: 4}) + 21 |+f"{({1: 2, 3: 4})}" +22 22 | "{}".format((i for i in range(2))) +23 23 | +24 24 | "{.real}".format(1 + 2) + +UP032_2.py:22:1: UP032 [*] Use f-string instead of `format` call + | +22 | "{}".format({1, 2}) +23 | "{}".format({1: 2, 3: 4}) +24 | "{}".format((i for i in range(2))) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +25 | +26 | "{.real}".format(1 + 2) + | + = help: Convert to f-string + +β„Ή Suggested fix +19 19 | "{}".format([1, 2]) +20 20 | "{}".format({1, 2}) +21 21 | "{}".format({1: 2, 3: 4}) +22 |-"{}".format((i for i in range(2))) + 22 |+f"{(i for i in range(2))}" +23 23 | +24 24 | "{.real}".format(1 + 2) +25 25 | "{.real}".format([1, 2]) + +UP032_2.py:24:1: UP032 [*] Use f-string instead of `format` call + | +24 | "{}".format((i for i in range(2))) +25 | +26 | "{.real}".format(1 + 2) + | ^^^^^^^^^^^^^^^^^^^^^^^ UP032 +27 | "{.real}".format([1, 2]) +28 | "{.real}".format({1, 2}) + | + = help: Convert to f-string + +β„Ή Suggested fix +21 21 | "{}".format({1: 2, 3: 4}) +22 22 | "{}".format((i for i in range(2))) +23 23 | +24 |-"{.real}".format(1 + 2) + 24 |+f"{(1 + 2).real}" +25 25 | "{.real}".format([1, 2]) +26 26 | "{.real}".format({1, 2}) +27 27 | "{.real}".format({1: 2, 3: 4}) + +UP032_2.py:25:1: UP032 [*] Use f-string instead of `format` call + | +25 | "{.real}".format(1 + 2) +26 | "{.real}".format([1, 2]) + | ^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +27 | "{.real}".format({1, 2}) +28 | "{.real}".format({1: 2, 3: 4}) + | + = help: Convert to f-string + +β„Ή Suggested fix +22 22 | "{}".format((i for i in range(2))) +23 23 | +24 24 | "{.real}".format(1 + 2) +25 |-"{.real}".format([1, 2]) + 25 |+f"{[1, 2].real}" +26 26 | "{.real}".format({1, 2}) +27 27 | "{.real}".format({1: 2, 3: 4}) +28 28 | "{}".format((i for i in range(2))) + +UP032_2.py:26:1: UP032 [*] Use f-string instead of `format` call + | +26 | "{.real}".format(1 + 2) +27 | "{.real}".format([1, 2]) +28 | "{.real}".format({1, 2}) + | ^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +29 | "{.real}".format({1: 2, 3: 4}) +30 | "{}".format((i for i in range(2))) + | + = help: Convert to f-string + +β„Ή Suggested fix +23 23 | +24 24 | "{.real}".format(1 + 2) +25 25 | "{.real}".format([1, 2]) +26 |-"{.real}".format({1, 2}) + 26 |+f"{({1, 2}).real}" +27 27 | "{.real}".format({1: 2, 3: 4}) +28 28 | "{}".format((i for i in range(2))) + +UP032_2.py:27:1: UP032 [*] Use f-string instead of `format` call + | +27 | "{.real}".format([1, 2]) +28 | "{.real}".format({1, 2}) +29 | "{.real}".format({1: 2, 3: 4}) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ UP032 +30 | "{}".format((i for i in range(2))) + | + = help: Convert to f-string + +β„Ή Suggested fix +24 24 | "{.real}".format(1 + 2) +25 25 | "{.real}".format([1, 2]) +26 26 | "{.real}".format({1, 2}) +27 |-"{.real}".format({1: 2, 3: 4}) + 27 |+f"{({1: 2, 3: 4}).real}" +28 28 | "{}".format((i for i in range(2))) + +UP032_2.py:28:1: UP032 [*] Use f-string instead of `format` call + | +28 | "{.real}".format({1, 2}) +29 | "{.real}".format({1: 2, 3: 4}) +30 | "{}".format((i for i in range(2))) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ UP032 + | + = help: Convert to f-string + +β„Ή Suggested fix +25 25 | "{.real}".format([1, 2]) +26 26 | "{.real}".format({1, 2}) +27 27 | "{.real}".format({1: 2, 3: 4}) +28 |-"{}".format((i for i in range(2))) + 28 |+f"{(i for i in range(2))}" + + diff --git a/crates/ruff/src/rules/ruff/mod.rs b/crates/ruff/src/rules/ruff/mod.rs index 727711225f187..3bc568f354f4a 100644 --- a/crates/ruff/src/rules/ruff/mod.rs +++ b/crates/ruff/src/rules/ruff/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use rustc_hash::FxHashSet; use test_case::test_case; @@ -47,6 +46,16 @@ mod tests { Ok(()) } + #[test] + fn noqa() -> Result<()> { + let diagnostics = test_path( + Path::new("ruff/noqa.py"), + &settings::Settings::for_rules(vec![Rule::UnusedVariable, Rule::AmbiguousVariableName]), + )?; + assert_messages!(diagnostics); + Ok(()) + } + #[test] fn ruf100_0() -> Result<()> { let diagnostics = test_path( diff --git a/crates/ruff/src/rules/ruff/rules/ambiguous_unicode_character.rs b/crates/ruff/src/rules/ruff/rules/ambiguous_unicode_character.rs index c82fafc0be304..65d05f9095b4d 100644 --- a/crates/ruff/src/rules/ruff/rules/ambiguous_unicode_character.rs +++ b/crates/ruff/src/rules/ruff/rules/ambiguous_unicode_character.rs @@ -1,4 +1,6 @@ +use bitflags::bitflags; use ruff_text_size::{TextLen, TextRange, TextSize}; +use std::fmt; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, DiagnosticKind, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; @@ -23,8 +25,9 @@ impl AlwaysAutofixableViolation for AmbiguousUnicodeCharacterString { representant, } = self; format!( - "String contains ambiguous unicode character `{confusable}` (did you mean \ - `{representant}`?)" + "String contains ambiguous {}. Did you mean {}?", + NamedUnicode(*confusable), + NamedUnicode(*representant) ) } @@ -33,7 +36,11 @@ impl AlwaysAutofixableViolation for AmbiguousUnicodeCharacterString { confusable, representant, } = self; - format!("Replace `{confusable}` with `{representant}`") + format!( + "Replace {} with {}", + NamedUnicode(*confusable), + NamedUnicode(*representant) + ) } } @@ -51,8 +58,9 @@ impl AlwaysAutofixableViolation for AmbiguousUnicodeCharacterDocstring { representant, } = self; format!( - "Docstring contains ambiguous unicode character `{confusable}` (did you mean \ - `{representant}`?)" + "Docstring contains ambiguous {}. Did you mean {}?", + NamedUnicode(*confusable), + NamedUnicode(*representant) ) } @@ -61,7 +69,11 @@ impl AlwaysAutofixableViolation for AmbiguousUnicodeCharacterDocstring { confusable, representant, } = self; - format!("Replace `{confusable}` with `{representant}`") + format!( + "Replace {} with {}", + NamedUnicode(*confusable), + NamedUnicode(*representant) + ) } } @@ -79,8 +91,9 @@ impl AlwaysAutofixableViolation for AmbiguousUnicodeCharacterComment { representant, } = self; format!( - "Comment contains ambiguous unicode character `{confusable}` (did you mean \ - `{representant}`?)" + "Comment contains ambiguous {}. Did you mean {}?", + NamedUnicode(*confusable), + NamedUnicode(*representant) ) } @@ -89,7 +102,11 @@ impl AlwaysAutofixableViolation for AmbiguousUnicodeCharacterComment { confusable, representant, } = self; - format!("Replace `{confusable}` with `{representant}`") + format!( + "Replace {} with {}", + NamedUnicode(*confusable), + NamedUnicode(*representant) + ) } } @@ -103,50 +120,164 @@ pub(crate) fn ambiguous_unicode_character( let text = locator.slice(range); + // Most of the time, we don't need to check for ambiguous unicode characters at all. + if text.is_ascii() { + return diagnostics; + } + + // Iterate over the "words" in the text. + let mut word_flags = WordFlags::empty(); + let mut word_candidates: Vec = vec![]; for (relative_offset, current_char) in text.char_indices() { - if !current_char.is_ascii() { - // Search for confusing characters. - if let Some(representant) = CONFUSABLES.get(&(current_char as u32)).copied() { - if !settings.allowed_confusables.contains(¤t_char) { - let char_range = TextRange::at( - TextSize::try_from(relative_offset).unwrap() + range.start(), - current_char.text_len(), - ); + // Word boundary. + if !current_char.is_alphanumeric() { + if !word_candidates.is_empty() { + if word_flags.is_candidate_word() { + for candidate in word_candidates.drain(..) { + if let Some(diagnostic) = candidate.into_diagnostic(context, settings) { + diagnostics.push(diagnostic); + } + } + } + word_candidates.clear(); + } + word_flags = WordFlags::empty(); - let mut diagnostic = Diagnostic::new::( - match context { - Context::String => AmbiguousUnicodeCharacterString { - confusable: current_char, - representant: representant as char, - } - .into(), - Context::Docstring => AmbiguousUnicodeCharacterDocstring { - confusable: current_char, - representant: representant as char, - } - .into(), - Context::Comment => AmbiguousUnicodeCharacterComment { - confusable: current_char, - representant: representant as char, - } - .into(), - }, - char_range, + // Check if the boundary character is itself an ambiguous unicode character, in which + // case, it's always included as a diagnostic. + if !current_char.is_ascii() { + if let Some(representant) = CONFUSABLES.get(&(current_char as u32)).copied() { + let candidate = Candidate::new( + TextSize::try_from(relative_offset).unwrap() + range.start(), + current_char, + representant as char, ); - if settings.rules.enabled(diagnostic.kind.rule()) { - if settings.rules.should_fix(diagnostic.kind.rule()) { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( - (representant as char).to_string(), - char_range, - ))); - } + if let Some(diagnostic) = candidate.into_diagnostic(context, settings) { diagnostics.push(diagnostic); } } } + } else if current_char.is_ascii() { + // The current word contains at least one ASCII character. + word_flags |= WordFlags::ASCII; + } else if let Some(representant) = CONFUSABLES.get(&(current_char as u32)).copied() { + // The current word contains an ambiguous unicode character. + word_candidates.push(Candidate::new( + TextSize::try_from(relative_offset).unwrap() + range.start(), + current_char, + representant as char, + )); + } else { + // The current word contains at least one unambiguous unicode character. + word_flags |= WordFlags::UNAMBIGUOUS_UNICODE; } } + // End of the text. + if !word_candidates.is_empty() { + if word_flags.is_candidate_word() { + for candidate in word_candidates.drain(..) { + if let Some(diagnostic) = candidate.into_diagnostic(context, settings) { + diagnostics.push(diagnostic); + } + } + } + word_candidates.clear(); + } + diagnostics } + +bitflags! { + #[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] + pub struct WordFlags: u8 { + /// The word contains at least one ASCII character (like `B`). + const ASCII = 0b0000_0001; + /// The word contains at least one unambiguous unicode character (like `Ξ²`). + const UNAMBIGUOUS_UNICODE = 0b0000_0010; + } +} + +impl WordFlags { + /// Return `true` if the flags indicate that the word is a candidate for flagging + /// ambiguous unicode characters. + /// + /// We follow VS Code's logic for determining whether ambiguous unicode characters within a + /// given word should be flagged, i.e., we flag a word if it contains at least one ASCII + /// character, or is purely unicode but _only_ consists of ambiguous characters. + /// + /// See: [VS Code](https://github.com/microsoft/vscode/issues/143720#issuecomment-1048757234) + const fn is_candidate_word(self) -> bool { + self.contains(WordFlags::ASCII) || !self.contains(WordFlags::UNAMBIGUOUS_UNICODE) + } +} + +/// An ambiguous unicode character in the text. +struct Candidate { + /// The offset of the candidate in the text. + offset: TextSize, + /// The ambiguous unicode character. + confusable: char, + /// The character with which the ambiguous unicode character is confusable. + representant: char, +} + +impl Candidate { + fn new(offset: TextSize, confusable: char, representant: char) -> Self { + Self { + offset, + confusable, + representant, + } + } + + fn into_diagnostic(self, context: Context, settings: &Settings) -> Option { + if !settings.allowed_confusables.contains(&self.confusable) { + let char_range = TextRange::at(self.offset, self.confusable.text_len()); + let mut diagnostic = Diagnostic::new::( + match context { + Context::String => AmbiguousUnicodeCharacterString { + confusable: self.confusable, + representant: self.representant, + } + .into(), + Context::Docstring => AmbiguousUnicodeCharacterDocstring { + confusable: self.confusable, + representant: self.representant, + } + .into(), + Context::Comment => AmbiguousUnicodeCharacterComment { + confusable: self.confusable, + representant: self.representant, + } + .into(), + }, + char_range, + ); + if settings.rules.enabled(diagnostic.kind.rule()) { + if settings.rules.should_fix(diagnostic.kind.rule()) { + #[allow(deprecated)] + diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + self.representant.to_string(), + char_range, + ))); + } + return Some(diagnostic); + } + } + None + } +} + +struct NamedUnicode(char); + +impl fmt::Display for NamedUnicode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let NamedUnicode(c) = self; + if let Some(name) = unicode_names2::name(*c) { + write!(f, "`{c}` ({name})") + } else { + write!(f, "`{c}`") + } + } +} diff --git a/crates/ruff/src/rules/ruff/rules/collection_literal_concatenation.rs b/crates/ruff/src/rules/ruff/rules/collection_literal_concatenation.rs index 4f868b30838e6..c5b2196d53ef9 100644 --- a/crates/ruff/src/rules/ruff/rules/collection_literal_concatenation.rs +++ b/crates/ruff/src/rules/ruff/rules/collection_literal_concatenation.rs @@ -49,92 +49,106 @@ fn make_splat_elts( } #[derive(Debug, Copy, Clone)] -enum Kind { +enum Type { List, Tuple, } -/// RUF005 -/// This suggestion could be unsafe if the non-literal expression in the -/// expression has overridden the `__add__` (or `__radd__`) magic methods. -pub(crate) fn collection_literal_concatenation(checker: &mut Checker, expr: &Expr) { +/// Recursively merge all the tuples and lists in the expression. +fn concatenate_expressions(expr: &Expr) -> Option<(Expr, Type)> { let Expr::BinOp(ast::ExprBinOp { left, op: Operator::Add, right, range: _ }) = expr else { - return; + return None; }; - // Figure out which way the splat is, and what the kind of the collection is. - let (kind, splat_element, other_elements, splat_at_left, ctx) = - match (left.as_ref(), right.as_ref()) { - ( - Expr::List(ast::ExprList { - elts: l_elts, - ctx, - range: _, - }), - _, - ) => (Kind::List, right, l_elts, false, ctx), - ( - Expr::Tuple(ast::ExprTuple { - elts: l_elts, - ctx, - range: _, - }), - _, - ) => (Kind::Tuple, right, l_elts, false, ctx), - ( - _, - Expr::List(ast::ExprList { - elts: r_elts, - ctx, - range: _, - }), - ) => (Kind::List, left, r_elts, true, ctx), - ( - _, - Expr::Tuple(ast::ExprTuple { - elts: r_elts, - ctx, - range: _, - }), - ) => (Kind::Tuple, left, r_elts, true, ctx), - _ => return, - }; - - // We'll be a bit conservative here; only calls, names and attribute accesses - // will be considered as splat elements. - if !(splat_element.is_call_expr() - || splat_element.is_name_expr() - || splat_element.is_attribute_expr()) - { - return; - } + let new_left = match left.as_ref() { + Expr::BinOp(ast::ExprBinOp { .. }) => match concatenate_expressions(left) { + Some((new_left, _)) => new_left, + None => *left.clone(), + }, + _ => *left.clone(), + }; - let new_expr = match kind { - Kind::List => { - let node = ast::ExprList { - elts: make_splat_elts(splat_element, other_elements, splat_at_left), - ctx: *ctx, - range: TextRange::default(), - }; - node.into() + let new_right = match right.as_ref() { + Expr::BinOp(ast::ExprBinOp { .. }) => match concatenate_expressions(right) { + Some((new_right, _)) => new_right, + None => *right.clone(), + }, + _ => *right.clone(), + }; + + // Figure out which way the splat is, and the type of the collection. + let (type_, splat_element, other_elements, splat_at_left) = match (&new_left, &new_right) { + (Expr::List(ast::ExprList { elts: l_elts, .. }), _) => { + (Type::List, &new_right, l_elts, false) + } + (Expr::Tuple(ast::ExprTuple { elts: l_elts, .. }), _) => { + (Type::Tuple, &new_right, l_elts, false) + } + (_, Expr::List(ast::ExprList { elts: r_elts, .. })) => { + (Type::List, &new_left, r_elts, true) } - Kind::Tuple => { - let node = ast::ExprTuple { - elts: make_splat_elts(splat_element, other_elements, splat_at_left), - ctx: *ctx, - range: TextRange::default(), - }; - node.into() + (_, Expr::Tuple(ast::ExprTuple { elts: r_elts, .. })) => { + (Type::Tuple, &new_left, r_elts, true) } + _ => return None, }; - let contents = match kind { - // Wrap the new expression in parentheses if it was a tuple - Kind::Tuple => format!("({})", checker.generator().expr(&new_expr)), - Kind::List => checker.generator().expr(&new_expr), + let new_elts = match splat_element { + // We'll be a bit conservative here; only calls, names and attribute accesses + // will be considered as splat elements. + Expr::Call(_) | Expr::Attribute(_) | Expr::Name(_) => { + make_splat_elts(splat_element, other_elements, splat_at_left) + } + // If the splat element is itself a list/tuple, insert them in the other list/tuple. + Expr::List(ast::ExprList { elts, .. }) if matches!(type_, Type::List) => { + other_elements.iter().chain(elts.iter()).cloned().collect() + } + Expr::Tuple(ast::ExprTuple { elts, .. }) if matches!(type_, Type::Tuple) => { + other_elements.iter().chain(elts.iter()).cloned().collect() + } + _ => return None, }; - let fixable = !has_comments(expr, checker.locator); + let new_expr = match type_ { + Type::List => ast::ExprList { + elts: new_elts, + ctx: ExprContext::Load, + range: TextRange::default(), + } + .into(), + Type::Tuple => ast::ExprTuple { + elts: new_elts, + ctx: ExprContext::Load, + range: TextRange::default(), + } + .into(), + }; + + Some((new_expr, type_)) +} + +/// RUF005 +pub(crate) fn collection_literal_concatenation(checker: &mut Checker, expr: &Expr) { + // If the expression is already a child of an addition, we'll have analyzed it already. + if matches!( + checker.semantic_model().expr_parent(), + Some(Expr::BinOp(ast::ExprBinOp { + op: Operator::Add, + .. + })) + ) { + return; + } + + let Some((new_expr, type_)) = concatenate_expressions(expr) else { + return + }; + + let contents = match type_ { + // Wrap the new expression in parentheses if it was a tuple. + Type::Tuple => format!("({})", checker.generator().expr(&new_expr)), + Type::List => checker.generator().expr(&new_expr), + }; let mut diagnostic = Diagnostic::new( CollectionLiteralConcatenation { expr: contents.clone(), @@ -142,9 +156,10 @@ pub(crate) fn collection_literal_concatenation(checker: &mut Checker, expr: &Exp expr.range(), ); if checker.patch(diagnostic.kind.rule()) { - if fixable { - #[allow(deprecated)] - diagnostic.set_fix(Fix::unspecified(Edit::range_replacement( + if !has_comments(expr, checker.locator) { + // This suggestion could be unsafe if the non-literal expression in the + // expression has overridden the `__add__` (or `__radd__`) magic methods. + diagnostic.set_fix(Fix::suggested(Edit::range_replacement( contents, expr.range(), ))); diff --git a/crates/ruff/src/rules/ruff/rules/explicit_f_string_type_conversion.rs b/crates/ruff/src/rules/ruff/rules/explicit_f_string_type_conversion.rs index 219176d49e26b..a08071d29d831 100644 --- a/crates/ruff/src/rules/ruff/rules/explicit_f_string_type_conversion.rs +++ b/crates/ruff/src/rules/ruff/rules/explicit_f_string_type_conversion.rs @@ -1,9 +1,16 @@ -use rustpython_parser::ast::{self, ConversionFlag, Expr, Ranged}; +use anyhow::{bail, Result}; +use libcst_native::{Codegen, CodegenState}; +use rustpython_parser::ast::{self, Expr, Ranged}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::source_code::{Locator, Stylist}; use crate::checkers::ast::Checker; +use crate::cst::matchers::{ + match_call_mut, match_expression, match_formatted_string, match_formatted_string_expression, + match_name, +}; use crate::registry::AsRule; /// ## What it does @@ -39,59 +46,101 @@ impl AlwaysAutofixableViolation for ExplicitFStringTypeConversion { } } +fn fix_explicit_f_string_type_conversion( + expr: &Expr, + index: usize, + locator: &Locator, + stylist: &Stylist, +) -> Result { + // Replace the call node with its argument and a conversion flag. + let range = expr.range(); + let content = locator.slice(range); + let mut expression = match_expression(content)?; + let formatted_string = match_formatted_string(&mut expression)?; + + // Replace the formatted call expression at `index` with a conversion flag. + let mut formatted_string_expression = + match_formatted_string_expression(&mut formatted_string.parts[index])?; + let call = match_call_mut(&mut formatted_string_expression.expression)?; + let name = match_name(&call.func)?; + match name.value { + "str" => { + formatted_string_expression.conversion = Some("s"); + } + "repr" => { + formatted_string_expression.conversion = Some("r"); + } + "ascii" => { + formatted_string_expression.conversion = Some("a"); + } + _ => bail!("Unexpected function call: `{:?}`", name.value), + } + formatted_string_expression.expression = call.args[0].value.clone(); + + let mut state = CodegenState { + default_newline: &stylist.line_ending(), + default_indent: stylist.indentation(), + ..CodegenState::default() + }; + expression.codegen(&mut state); + + Ok(Fix::automatic(Edit::range_replacement( + state.to_string(), + range, + ))) +} + /// RUF010 pub(crate) fn explicit_f_string_type_conversion( checker: &mut Checker, - formatted_value: &Expr, - conversion: ConversionFlag, + expr: &Expr, + values: &[Expr], ) { - // Skip if there's already a conversion flag. - if !conversion.is_none() { - return; - } + for (index, formatted_value) in values.iter().enumerate() { + let Expr::FormattedValue(ast::ExprFormattedValue { + conversion, + value, + .. + }) = &formatted_value else { + continue; + }; + // Skip if there's already a conversion flag. + if !conversion.is_none() { + return; + } - let Expr::Call(ast::ExprCall { - func, - args, - keywords, - range: _, - }) = formatted_value else { - return; - }; + let Expr::Call(ast::ExprCall { + func, + args, + keywords, + .. + }) = value.as_ref() else { + return; + }; - // Can't be a conversion otherwise. - if args.len() != 1 || !keywords.is_empty() { - return; - } + // Can't be a conversion otherwise. + if args.len() != 1 || !keywords.is_empty() { + return; + } - let Expr::Name(ast::ExprName { id, .. }) = func.as_ref() else { - return; - }; + let Expr::Name(ast::ExprName { id, .. }) = func.as_ref() else { + return; + }; - let conversion = match id.as_str() { - "ascii" => 'a', - "str" => 's', - "repr" => 'r', - _ => return, - }; + if !matches!(id.as_str(), "str" | "repr" | "ascii") { + return; + }; - if !checker.ctx.is_builtin(id) { - return; - } + if !checker.semantic_model().is_builtin(id) { + return; + } - let formatted_value_range = formatted_value.range(); - let mut diagnostic = Diagnostic::new(ExplicitFStringTypeConversion, formatted_value_range); - - if checker.patch(diagnostic.kind.rule()) { - let arg_range = args[0].range(); - let remove_call = Edit::deletion(formatted_value_range.start(), arg_range.start()); - let add_conversion = Edit::replacement( - format!("!{conversion}"), - arg_range.end(), - formatted_value_range.end(), - ); - diagnostic.set_fix(Fix::automatic_edits(remove_call, [add_conversion])); + let mut diagnostic = Diagnostic::new(ExplicitFStringTypeConversion, value.range()); + if checker.patch(diagnostic.kind.rule()) { + diagnostic.try_set_fix(|| { + fix_explicit_f_string_type_conversion(expr, index, checker.locator, checker.stylist) + }); + } + checker.diagnostics.push(diagnostic); } - - checker.diagnostics.push(diagnostic); } diff --git a/crates/ruff/src/rules/ruff/rules/mod.rs b/crates/ruff/src/rules/ruff/rules/mod.rs index 61a9ff69a32f4..43543fe7cef40 100644 --- a/crates/ruff/src/rules/ruff/rules/mod.rs +++ b/crates/ruff/src/rules/ruff/rules/mod.rs @@ -1,12 +1,3 @@ -mod ambiguous_unicode_character; -mod asyncio_dangling_task; -mod collection_literal_concatenation; -mod confusables; -mod explicit_f_string_type_conversion; -mod mutable_defaults_in_dataclass_fields; -mod pairwise_over_zipped; -mod unused_noqa; - pub(crate) use ambiguous_unicode_character::{ ambiguous_unicode_character, AmbiguousUnicodeCharacterComment, AmbiguousUnicodeCharacterDocstring, AmbiguousUnicodeCharacterString, @@ -15,6 +6,9 @@ pub(crate) use asyncio_dangling_task::{asyncio_dangling_task, AsyncioDanglingTas pub(crate) use collection_literal_concatenation::{ collection_literal_concatenation, CollectionLiteralConcatenation, }; +pub(crate) use explicit_f_string_type_conversion::{ + explicit_f_string_type_conversion, ExplicitFStringTypeConversion, +}; pub(crate) use mutable_defaults_in_dataclass_fields::{ function_call_in_dataclass_defaults, is_dataclass, mutable_dataclass_default, FunctionCallInDataclassDefaultArgument, MutableDataclassDefault, @@ -22,9 +16,14 @@ pub(crate) use mutable_defaults_in_dataclass_fields::{ pub(crate) use pairwise_over_zipped::{pairwise_over_zipped, PairwiseOverZipped}; pub(crate) use unused_noqa::{UnusedCodes, UnusedNOQA}; -pub(crate) use explicit_f_string_type_conversion::{ - explicit_f_string_type_conversion, ExplicitFStringTypeConversion, -}; +mod ambiguous_unicode_character; +mod asyncio_dangling_task; +mod collection_literal_concatenation; +mod confusables; +mod explicit_f_string_type_conversion; +mod mutable_defaults_in_dataclass_fields; +mod pairwise_over_zipped; +mod unused_noqa; #[derive(Clone, Copy)] pub(crate) enum Context { diff --git a/crates/ruff/src/rules/ruff/rules/mutable_defaults_in_dataclass_fields.rs b/crates/ruff/src/rules/ruff/rules/mutable_defaults_in_dataclass_fields.rs index f478b3e38e3e5..b1c72ecdf8503 100644 --- a/crates/ruff/src/rules/ruff/rules/mutable_defaults_in_dataclass_fields.rs +++ b/crates/ruff/src/rules/ruff/rules/mutable_defaults_in_dataclass_fields.rs @@ -1,12 +1,12 @@ -use ruff_python_ast::call_path::{from_qualified_name, CallPath}; use rustpython_parser::ast::{self, Expr, Ranged, Stmt}; use ruff_diagnostics::{Diagnostic, Violation}; use ruff_macros::{derive_message_formats, violation}; +use ruff_python_ast::call_path::{from_qualified_name, CallPath}; use ruff_python_ast::{call_path::compose_call_path, helpers::map_callable}; use ruff_python_semantic::{ analyze::typing::{is_immutable_annotation, is_immutable_func}, - context::Context, + model::SemanticModel, }; use crate::checkers::ast::Checker; @@ -162,8 +162,8 @@ fn is_mutable_expr(expr: &Expr) -> bool { const ALLOWED_DATACLASS_SPECIFIC_FUNCTIONS: &[&[&str]] = &[&["dataclasses", "field"]]; -fn is_allowed_dataclass_function(context: &Context, func: &Expr) -> bool { - context.resolve_call_path(func).map_or(false, |call_path| { +fn is_allowed_dataclass_function(model: &SemanticModel, func: &Expr) -> bool { + model.resolve_call_path(func).map_or(false, |call_path| { ALLOWED_DATACLASS_SPECIFIC_FUNCTIONS .iter() .any(|target| call_path.as_slice() == *target) @@ -171,11 +171,11 @@ fn is_allowed_dataclass_function(context: &Context, func: &Expr) -> bool { } /// Returns `true` if the given [`Expr`] is a `typing.ClassVar` annotation. -fn is_class_var_annotation(context: &Context, annotation: &Expr) -> bool { +fn is_class_var_annotation(model: &SemanticModel, annotation: &Expr) -> bool { let Expr::Subscript(ast::ExprSubscript { value, .. }) = &annotation else { return false; }; - context.match_typing_expr(value, "ClassVar") + model.match_typing_expr(value, "ClassVar") } /// RUF009 @@ -195,12 +195,12 @@ pub(crate) fn function_call_in_dataclass_defaults(checker: &mut Checker, body: & .. }) = statement { - if is_class_var_annotation(&checker.ctx, annotation) { + if is_class_var_annotation(checker.semantic_model(), annotation) { continue; } if let Expr::Call(ast::ExprCall { func, .. }) = expr.as_ref() { - if !is_immutable_func(&checker.ctx, func, &extend_immutable_calls) - && !is_allowed_dataclass_function(&checker.ctx, func) + if !is_immutable_func(checker.semantic_model(), func, &extend_immutable_calls) + && !is_allowed_dataclass_function(checker.semantic_model(), func) { checker.diagnostics.push(Diagnostic::new( FunctionCallInDataclassDefaultArgument { @@ -223,8 +223,8 @@ pub(crate) fn mutable_dataclass_default(checker: &mut Checker, body: &[Stmt]) { value: Some(value), .. }) => { - if !is_class_var_annotation(&checker.ctx, annotation) - && !is_immutable_annotation(&checker.ctx, annotation) + if !is_class_var_annotation(checker.semantic_model(), annotation) + && !is_immutable_annotation(checker.semantic_model(), annotation) && is_mutable_expr(value) { checker @@ -244,10 +244,9 @@ pub(crate) fn mutable_dataclass_default(checker: &mut Checker, body: &[Stmt]) { } } -pub(crate) fn is_dataclass(checker: &Checker, decorator_list: &[Expr]) -> bool { +pub(crate) fn is_dataclass(model: &SemanticModel, decorator_list: &[Expr]) -> bool { decorator_list.iter().any(|decorator| { - checker - .ctx + model .resolve_call_path(map_callable(decorator)) .map_or(false, |call_path| { call_path.as_slice() == ["dataclasses", "dataclass"] diff --git a/crates/ruff/src/rules/ruff/rules/pairwise_over_zipped.rs b/crates/ruff/src/rules/ruff/rules/pairwise_over_zipped.rs index e93a5d5a2926f..99c2286a759bd 100644 --- a/crates/ruff/src/rules/ruff/rules/pairwise_over_zipped.rs +++ b/crates/ruff/src/rules/ruff/rules/pairwise_over_zipped.rs @@ -99,7 +99,7 @@ pub(crate) fn pairwise_over_zipped(checker: &mut Checker, func: &Expr, args: &[E } // Require the function to be the builtin `zip`. - if !(id == "zip" && checker.ctx.is_builtin(id)) { + if !(id == "zip" && checker.semantic_model().is_builtin(id)) { return; } diff --git a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF005_RUF005.py.snap b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF005_RUF005.py.snap index bb508f24fa47e..a2885f683f89c 100644 --- a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF005_RUF005.py.snap +++ b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF005_RUF005.py.snap @@ -1,271 +1,357 @@ --- source: crates/ruff/src/rules/ruff/mod.rs --- -RUF005.py:10:7: RUF005 [*] Consider `[1, 2, 3, *foo]` instead of concatenation +RUF005.py:4:1: RUF005 Consider `[*foo]` instead of concatenation + | +4 | # Non-fixable Errors. +5 | ### +6 | / foo + [ # This will be preserved. +7 | | ] + | |_^ RUF005 +8 | [*foo] + [ # This will be preserved. +9 | ] + | + = help: Replace with `[*foo]` + +RUF005.py:6:1: RUF005 Consider `[*foo]` instead of concatenation + | + 6 | foo + [ # This will be preserved. + 7 | ] + 8 | / [*foo] + [ # This will be preserved. + 9 | | ] + | |_^ RUF005 +10 | first = [ +11 | # The order | -10 | foo = [4, 5, 6] -11 | bar = [1, 2, 3] + foo + = help: Replace with `[*foo]` + +RUF005.py:16:10: RUF005 Consider `[*first, 4, 5, 6]` instead of concatenation + | +16 | # to preserve +17 | ] +18 | second = first + [ + | __________^ +19 | | # please +20 | | 4, +21 | | # don't +22 | | 5, +23 | | # touch +24 | | 6, +25 | | ] + | |_^ RUF005 + | + = help: Replace with `[*first, 4, 5, 6]` + +RUF005.py:39:7: RUF005 [*] Consider `[1, 2, 3, *foo]` instead of concatenation + | +39 | foo = [4, 5, 6] +40 | bar = [1, 2, 3] + foo | ^^^^^^^^^^^^^^^ RUF005 -12 | zoob = tuple(bar) -13 | quux = (7, 8, 9) + zoob +41 | zoob = tuple(bar) +42 | quux = (7, 8, 9) + zoob | = help: Replace with `[1, 2, 3, *foo]` β„Ή Suggested fix -7 7 | yay = Fun().yay -8 8 | -9 9 | foo = [4, 5, 6] -10 |-bar = [1, 2, 3] + foo - 10 |+bar = [1, 2, 3, *foo] -11 11 | zoob = tuple(bar) -12 12 | quux = (7, 8, 9) + zoob -13 13 | spam = quux + (10, 11, 12) - -RUF005.py:12:8: RUF005 [*] Consider `(7, 8, 9, *zoob)` instead of concatenation - | -12 | bar = [1, 2, 3] + foo -13 | zoob = tuple(bar) -14 | quux = (7, 8, 9) + zoob +36 36 | yay = Fun().yay +37 37 | +38 38 | foo = [4, 5, 6] +39 |-bar = [1, 2, 3] + foo + 39 |+bar = [1, 2, 3, *foo] +40 40 | zoob = tuple(bar) +41 41 | quux = (7, 8, 9) + zoob +42 42 | spam = quux + (10, 11, 12) + +RUF005.py:41:8: RUF005 [*] Consider `(7, 8, 9, *zoob)` instead of concatenation + | +41 | bar = [1, 2, 3] + foo +42 | zoob = tuple(bar) +43 | quux = (7, 8, 9) + zoob | ^^^^^^^^^^^^^^^^ RUF005 -15 | spam = quux + (10, 11, 12) -16 | spom = list(spam) +44 | spam = quux + (10, 11, 12) +45 | spom = list(spam) | = help: Replace with `(7, 8, 9, *zoob)` β„Ή Suggested fix -9 9 | foo = [4, 5, 6] -10 10 | bar = [1, 2, 3] + foo -11 11 | zoob = tuple(bar) -12 |-quux = (7, 8, 9) + zoob - 12 |+quux = (7, 8, 9, *zoob) -13 13 | spam = quux + (10, 11, 12) -14 14 | spom = list(spam) -15 15 | eggs = spom + [13, 14, 15] - -RUF005.py:13:8: RUF005 [*] Consider `(*quux, 10, 11, 12)` instead of concatenation - | -13 | zoob = tuple(bar) -14 | quux = (7, 8, 9) + zoob -15 | spam = quux + (10, 11, 12) +38 38 | foo = [4, 5, 6] +39 39 | bar = [1, 2, 3] + foo +40 40 | zoob = tuple(bar) +41 |-quux = (7, 8, 9) + zoob + 41 |+quux = (7, 8, 9, *zoob) +42 42 | spam = quux + (10, 11, 12) +43 43 | spom = list(spam) +44 44 | eggs = spom + [13, 14, 15] + +RUF005.py:42:8: RUF005 [*] Consider `(*quux, 10, 11, 12)` instead of concatenation + | +42 | zoob = tuple(bar) +43 | quux = (7, 8, 9) + zoob +44 | spam = quux + (10, 11, 12) | ^^^^^^^^^^^^^^^^^^^ RUF005 -16 | spom = list(spam) -17 | eggs = spom + [13, 14, 15] +45 | spom = list(spam) +46 | eggs = spom + [13, 14, 15] | = help: Replace with `(*quux, 10, 11, 12)` β„Ή Suggested fix -10 10 | bar = [1, 2, 3] + foo -11 11 | zoob = tuple(bar) -12 12 | quux = (7, 8, 9) + zoob -13 |-spam = quux + (10, 11, 12) - 13 |+spam = (*quux, 10, 11, 12) -14 14 | spom = list(spam) -15 15 | eggs = spom + [13, 14, 15] -16 16 | elatement = ("we all say", ) + yay() - -RUF005.py:15:8: RUF005 [*] Consider `[*spom, 13, 14, 15]` instead of concatenation - | -15 | spam = quux + (10, 11, 12) -16 | spom = list(spam) -17 | eggs = spom + [13, 14, 15] +39 39 | bar = [1, 2, 3] + foo +40 40 | zoob = tuple(bar) +41 41 | quux = (7, 8, 9) + zoob +42 |-spam = quux + (10, 11, 12) + 42 |+spam = (*quux, 10, 11, 12) +43 43 | spom = list(spam) +44 44 | eggs = spom + [13, 14, 15] +45 45 | elatement = ("we all say",) + yay() + +RUF005.py:44:8: RUF005 [*] Consider `[*spom, 13, 14, 15]` instead of concatenation + | +44 | spam = quux + (10, 11, 12) +45 | spom = list(spam) +46 | eggs = spom + [13, 14, 15] | ^^^^^^^^^^^^^^^^^^^ RUF005 -18 | elatement = ("we all say", ) + yay() -19 | excitement = ("we all think", ) + Fun().yay() +47 | elatement = ("we all say",) + yay() +48 | excitement = ("we all think",) + Fun().yay() | = help: Replace with `[*spom, 13, 14, 15]` β„Ή Suggested fix -12 12 | quux = (7, 8, 9) + zoob -13 13 | spam = quux + (10, 11, 12) -14 14 | spom = list(spam) -15 |-eggs = spom + [13, 14, 15] - 15 |+eggs = [*spom, 13, 14, 15] -16 16 | elatement = ("we all say", ) + yay() -17 17 | excitement = ("we all think", ) + Fun().yay() -18 18 | astonishment = ("we all feel", ) + Fun.words - -RUF005.py:16:13: RUF005 [*] Consider `("we all say", *yay())` instead of concatenation - | -16 | spom = list(spam) -17 | eggs = spom + [13, 14, 15] -18 | elatement = ("we all say", ) + yay() - | ^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 -19 | excitement = ("we all think", ) + Fun().yay() -20 | astonishment = ("we all feel", ) + Fun.words +41 41 | quux = (7, 8, 9) + zoob +42 42 | spam = quux + (10, 11, 12) +43 43 | spom = list(spam) +44 |-eggs = spom + [13, 14, 15] + 44 |+eggs = [*spom, 13, 14, 15] +45 45 | elatement = ("we all say",) + yay() +46 46 | excitement = ("we all think",) + Fun().yay() +47 47 | astonishment = ("we all feel",) + Fun.words + +RUF005.py:45:13: RUF005 [*] Consider `("we all say", *yay())` instead of concatenation + | +45 | spom = list(spam) +46 | eggs = spom + [13, 14, 15] +47 | elatement = ("we all say",) + yay() + | ^^^^^^^^^^^^^^^^^^^^^^^ RUF005 +48 | excitement = ("we all think",) + Fun().yay() +49 | astonishment = ("we all feel",) + Fun.words | = help: Replace with `("we all say", *yay())` β„Ή Suggested fix -13 13 | spam = quux + (10, 11, 12) -14 14 | spom = list(spam) -15 15 | eggs = spom + [13, 14, 15] -16 |-elatement = ("we all say", ) + yay() - 16 |+elatement = ("we all say", *yay()) -17 17 | excitement = ("we all think", ) + Fun().yay() -18 18 | astonishment = ("we all feel", ) + Fun.words -19 19 | - -RUF005.py:17:14: RUF005 [*] Consider `("we all think", *Fun().yay())` instead of concatenation - | -17 | eggs = spom + [13, 14, 15] -18 | elatement = ("we all say", ) + yay() -19 | excitement = ("we all think", ) + Fun().yay() - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 -20 | astonishment = ("we all feel", ) + Fun.words +42 42 | spam = quux + (10, 11, 12) +43 43 | spom = list(spam) +44 44 | eggs = spom + [13, 14, 15] +45 |-elatement = ("we all say",) + yay() + 45 |+elatement = ("we all say", *yay()) +46 46 | excitement = ("we all think",) + Fun().yay() +47 47 | astonishment = ("we all feel",) + Fun.words +48 48 | + +RUF005.py:46:14: RUF005 [*] Consider `("we all think", *Fun().yay())` instead of concatenation + | +46 | eggs = spom + [13, 14, 15] +47 | elatement = ("we all say",) + yay() +48 | excitement = ("we all think",) + Fun().yay() + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 +49 | astonishment = ("we all feel",) + Fun.words | = help: Replace with `("we all think", *Fun().yay())` β„Ή Suggested fix -14 14 | spom = list(spam) -15 15 | eggs = spom + [13, 14, 15] -16 16 | elatement = ("we all say", ) + yay() -17 |-excitement = ("we all think", ) + Fun().yay() - 17 |+excitement = ("we all think", *Fun().yay()) -18 18 | astonishment = ("we all feel", ) + Fun.words -19 19 | -20 20 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) - -RUF005.py:18:16: RUF005 [*] Consider `("we all feel", *Fun.words)` instead of concatenation - | -18 | elatement = ("we all say", ) + yay() -19 | excitement = ("we all think", ) + Fun().yay() -20 | astonishment = ("we all feel", ) + Fun.words - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 -21 | -22 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) +43 43 | spom = list(spam) +44 44 | eggs = spom + [13, 14, 15] +45 45 | elatement = ("we all say",) + yay() +46 |-excitement = ("we all think",) + Fun().yay() + 46 |+excitement = ("we all think", *Fun().yay()) +47 47 | astonishment = ("we all feel",) + Fun.words +48 48 | +49 49 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) + +RUF005.py:47:16: RUF005 [*] Consider `("we all feel", *Fun.words)` instead of concatenation + | +47 | elatement = ("we all say",) + yay() +48 | excitement = ("we all think",) + Fun().yay() +49 | astonishment = ("we all feel",) + Fun.words + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 +50 | +51 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) | = help: Replace with `("we all feel", *Fun.words)` β„Ή Suggested fix -15 15 | eggs = spom + [13, 14, 15] -16 16 | elatement = ("we all say", ) + yay() -17 17 | excitement = ("we all think", ) + Fun().yay() -18 |-astonishment = ("we all feel", ) + Fun.words - 18 |+astonishment = ("we all feel", *Fun.words) -19 19 | -20 20 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) -21 21 | - -RUF005.py:20:9: RUF005 [*] Consider `["a", "b", "c", *eggs]` instead of concatenation - | -20 | astonishment = ("we all feel", ) + Fun.words -21 | -22 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) - | ^^^^^^^^^^^^^^^^^^^^^^ RUF005 -23 | -24 | baz = () + zoob - | - = help: Replace with `["a", "b", "c", *eggs]` +44 44 | eggs = spom + [13, 14, 15] +45 45 | elatement = ("we all say",) + yay() +46 46 | excitement = ("we all think",) + Fun().yay() +47 |-astonishment = ("we all feel",) + Fun.words + 47 |+astonishment = ("we all feel", *Fun.words) +48 48 | +49 49 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) +50 50 | + +RUF005.py:49:9: RUF005 [*] Consider `["a", "b", "c", *eggs, *list(("yes", "no", "pants") + zoob)]` instead of concatenation + | +49 | astonishment = ("we all feel",) + Fun.words +50 | +51 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 +52 | +53 | baz = () + zoob + | + = help: Replace with `["a", "b", "c", *eggs, *list(("yes", "no", "pants") + zoob)]` β„Ή Suggested fix -17 17 | excitement = ("we all think", ) + Fun().yay() -18 18 | astonishment = ("we all feel", ) + Fun.words -19 19 | -20 |-chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) - 20 |+chain = ["a", "b", "c", *eggs] + list(('yes', 'no', 'pants') + zoob) -21 21 | -22 22 | baz = () + zoob -23 23 | - -RUF005.py:20:39: RUF005 [*] Consider `("yes", "no", "pants", *zoob)` instead of concatenation - | -20 | astonishment = ("we all feel", ) + Fun.words -21 | -22 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) +46 46 | excitement = ("we all think",) + Fun().yay() +47 47 | astonishment = ("we all feel",) + Fun.words +48 48 | +49 |-chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) + 49 |+chain = ["a", "b", "c", *eggs, *list(("yes", "no", "pants") + zoob)] +50 50 | +51 51 | baz = () + zoob +52 52 | + +RUF005.py:49:39: RUF005 [*] Consider `("yes", "no", "pants", *zoob)` instead of concatenation + | +49 | astonishment = ("we all feel",) + Fun.words +50 | +51 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 -23 | -24 | baz = () + zoob +52 | +53 | baz = () + zoob | = help: Replace with `("yes", "no", "pants", *zoob)` β„Ή Suggested fix -17 17 | excitement = ("we all think", ) + Fun().yay() -18 18 | astonishment = ("we all feel", ) + Fun.words -19 19 | -20 |-chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) - 20 |+chain = ['a', 'b', 'c'] + eggs + list(("yes", "no", "pants", *zoob)) -21 21 | -22 22 | baz = () + zoob -23 23 | - -RUF005.py:22:7: RUF005 [*] Consider `(*zoob,)` instead of concatenation - | -22 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) -23 | -24 | baz = () + zoob +46 46 | excitement = ("we all think",) + Fun().yay() +47 47 | astonishment = ("we all feel",) + Fun.words +48 48 | +49 |-chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) + 49 |+chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants", *zoob)) +50 50 | +51 51 | baz = () + zoob +52 52 | + +RUF005.py:51:7: RUF005 [*] Consider `(*zoob,)` instead of concatenation + | +51 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) +52 | +53 | baz = () + zoob | ^^^^^^^^^ RUF005 -25 | -26 | first = [ +54 | +55 | [] + foo + [ | = help: Replace with `(*zoob,)` β„Ή Suggested fix -19 19 | -20 20 | chain = ['a', 'b', 'c'] + eggs + list(('yes', 'no', 'pants') + zoob) -21 21 | -22 |-baz = () + zoob - 22 |+baz = (*zoob,) -23 23 | -24 24 | first = [ -25 25 | # The order - -RUF005.py:32:10: RUF005 Consider `[*first, 4, 5, 6]` instead of concatenation - | -32 | # to preserve -33 | ] -34 | second = first + [ - | __________^ -35 | | # please -36 | | 4, -37 | | # don't -38 | | 5, -39 | | # touch -40 | | 6, -41 | | ] +48 48 | +49 49 | chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob) +50 50 | +51 |-baz = () + zoob + 51 |+baz = (*zoob,) +52 52 | +53 53 | [] + foo + [ +54 54 | ] + +RUF005.py:53:1: RUF005 [*] Consider `[*foo]` instead of concatenation + | +53 | baz = () + zoob +54 | +55 | / [] + foo + [ +56 | | ] | |_^ RUF005 -42 | -43 | [] + foo + [ +57 | +58 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] | - = help: Replace with `[*first, 4, 5, 6]` + = help: Replace with `[*foo]` -RUF005.py:41:1: RUF005 [*] Consider `[*foo]` instead of concatenation +β„Ή Suggested fix +50 50 | +51 51 | baz = () + zoob +52 52 | +53 |-[] + foo + [ +54 |-] + 53 |+[*foo] +55 54 | +56 55 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] +57 56 | pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) + +RUF005.py:56:15: RUF005 [*] Consider `[sys.executable, "-m", "pylint", *args, path]` instead of concatenation | -41 | ] -42 | -43 | [] + foo + [ - | ^^^^^^^^ RUF005 -44 | ] +56 | ] +57 | +58 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 +59 | pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) +60 | b = a + [2, 3] + [4] | - = help: Replace with `[*foo]` + = help: Replace with `[sys.executable, "-m", "pylint", *args, path]` β„Ή Suggested fix -38 38 | 6, -39 39 | ] -40 40 | -41 |-[] + foo + [ - 41 |+[*foo] + [ -42 42 | ] -43 43 | -44 44 | [] + foo + [ # This will be preserved, but doesn't prevent the fix - -RUF005.py:44:1: RUF005 [*] Consider `[*foo]` instead of concatenation - | -44 | ] -45 | -46 | [] + foo + [ # This will be preserved, but doesn't prevent the fix - | ^^^^^^^^ RUF005 -47 | ] +53 53 | [] + foo + [ +54 54 | ] +55 55 | +56 |-pylint_call = [sys.executable, "-m", "pylint"] + args + [path] + 56 |+pylint_call = [sys.executable, "-m", "pylint", *args, path] +57 57 | pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) +58 58 | b = a + [2, 3] + [4] +59 59 | + +RUF005.py:57:21: RUF005 [*] Consider `(sys.executable, "-m", "pylint", *args, path, path2)` instead of concatenation | - = help: Replace with `[*foo]` +57 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] +58 | pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ RUF005 +59 | b = a + [2, 3] + [4] + | + = help: Replace with `(sys.executable, "-m", "pylint", *args, path, path2)` + +β„Ή Suggested fix +54 54 | ] +55 55 | +56 56 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] +57 |-pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) + 57 |+pylint_call_tuple = (sys.executable, "-m", "pylint", *args, path, path2) +58 58 | b = a + [2, 3] + [4] +59 59 | +60 60 | # Uses the non-preferred quote style, which should be retained. + +RUF005.py:58:5: RUF005 [*] Consider `[*a, 2, 3, 4]` instead of concatenation + | +58 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] +59 | pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) +60 | b = a + [2, 3] + [4] + | ^^^^^^^^^^^^^^^^ RUF005 +61 | +62 | # Uses the non-preferred quote style, which should be retained. + | + = help: Replace with `[*a, 2, 3, 4]` + +β„Ή Suggested fix +55 55 | +56 56 | pylint_call = [sys.executable, "-m", "pylint"] + args + [path] +57 57 | pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2) +58 |-b = a + [2, 3] + [4] + 58 |+b = [*a, 2, 3, 4] +59 59 | +60 60 | # Uses the non-preferred quote style, which should be retained. +61 61 | f"{a() + ['b']}" + +RUF005.py:61:4: RUF005 [*] Consider `[*a(), 'b']` instead of concatenation + | +61 | # Uses the non-preferred quote style, which should be retained. +62 | f"{a() + ['b']}" + | ^^^^^^^^^^^ RUF005 +63 | +64 | ### + | + = help: Replace with `[*a(), 'b']` β„Ή Suggested fix -41 41 | [] + foo + [ -42 42 | ] -43 43 | -44 |-[] + foo + [ # This will be preserved, but doesn't prevent the fix - 44 |+[*foo] + [ # This will be preserved, but doesn't prevent the fix -45 45 | ] -46 46 | -47 47 | # Uses the non-preferred quote style, which should be retained. +58 58 | b = a + [2, 3] + [4] +59 59 | +60 60 | # Uses the non-preferred quote style, which should be retained. +61 |-f"{a() + ['b']}" + 61 |+f"{[*a(), 'b']}" +62 62 | +63 63 | ### +64 64 | # Non-errors. diff --git a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF010_RUF010.py.snap b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF010_RUF010.py.snap index 26fc6ede22b47..df895039e7a65 100644 --- a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF010_RUF010.py.snap +++ b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__RUF010_RUF010.py.snap @@ -65,7 +65,7 @@ RUF010.py:11:4: RUF010 [*] Use conversion in f-string 13 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 | ^^^^^^^^^^^ RUF010 14 | -15 | f"{foo(bla)}" # OK +15 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 | = help: Replace f-string function call with conversion @@ -76,7 +76,7 @@ RUF010.py:11:4: RUF010 [*] Use conversion in f-string 11 |-f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 11 |+f"{d['a']!s}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 12 12 | -13 13 | f"{foo(bla)}" # OK +13 13 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 14 14 | RUF010.py:11:19: RUF010 [*] Use conversion in f-string @@ -86,7 +86,7 @@ RUF010.py:11:19: RUF010 [*] Use conversion in f-string 13 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 | ^^^^^^^^^^^^ RUF010 14 | -15 | f"{foo(bla)}" # OK +15 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 | = help: Replace f-string function call with conversion @@ -97,7 +97,7 @@ RUF010.py:11:19: RUF010 [*] Use conversion in f-string 11 |-f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 11 |+f"{str(d['a'])}, {d['b']!r}, {ascii(d['c'])}" # RUF010 12 12 | -13 13 | f"{foo(bla)}" # OK +13 13 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 14 14 | RUF010.py:11:35: RUF010 [*] Use conversion in f-string @@ -107,7 +107,7 @@ RUF010.py:11:35: RUF010 [*] Use conversion in f-string 13 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 | ^^^^^^^^^^^^^ RUF010 14 | -15 | f"{foo(bla)}" # OK +15 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 | = help: Replace f-string function call with conversion @@ -118,7 +118,70 @@ RUF010.py:11:35: RUF010 [*] Use conversion in f-string 11 |-f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 11 |+f"{str(d['a'])}, {repr(d['b'])}, {d['c']!a}" # RUF010 12 12 | -13 13 | f"{foo(bla)}" # OK +13 13 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 14 14 | +RUF010.py:13:5: RUF010 [*] Use conversion in f-string + | +13 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +14 | +15 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + | ^^^^^^^^ RUF010 +16 | +17 | f"{foo(bla)}" # OK + | + = help: Replace f-string function call with conversion + +β„Ή Fix +10 10 | +11 11 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +12 12 | +13 |-f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + 13 |+f"{bla!s}, {(repr(bla))}, {(ascii(bla))}" # RUF010 +14 14 | +15 15 | f"{foo(bla)}" # OK +16 16 | + +RUF010.py:13:19: RUF010 [*] Use conversion in f-string + | +13 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +14 | +15 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + | ^^^^^^^^^ RUF010 +16 | +17 | f"{foo(bla)}" # OK + | + = help: Replace f-string function call with conversion + +β„Ή Fix +10 10 | +11 11 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +12 12 | +13 |-f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + 13 |+f"{(str(bla))}, {bla!r}, {(ascii(bla))}" # RUF010 +14 14 | +15 15 | f"{foo(bla)}" # OK +16 16 | + +RUF010.py:13:34: RUF010 [*] Use conversion in f-string + | +13 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +14 | +15 | f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + | ^^^^^^^^^^ RUF010 +16 | +17 | f"{foo(bla)}" # OK + | + = help: Replace f-string function call with conversion + +β„Ή Fix +10 10 | +11 11 | f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010 +12 12 | +13 |-f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010 + 13 |+f"{(str(bla))}, {(repr(bla))}, {bla!a}" # RUF010 +14 14 | +15 15 | f"{foo(bla)}" # OK +16 16 | + diff --git a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__confusables.snap b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__confusables.snap index 75d127a751bf3..e9e180c02383e 100644 --- a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__confusables.snap +++ b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__confusables.snap @@ -1,13 +1,13 @@ --- source: crates/ruff/src/rules/ruff/mod.rs --- -confusables.py:1:6: RUF001 [*] String contains ambiguous unicode character `𝐁` (did you mean `B`?) +confusables.py:1:6: RUF001 [*] String contains ambiguous `𝐁` (MATHEMATICAL BOLD CAPITAL B). Did you mean `B` (LATIN CAPITAL LETTER B)? | 1 | x = "𝐁ad string" | ^ RUF001 2 | y = "βˆ’" | - = help: Replace `𝐁` with `B` + = help: Replace `𝐁` (MATHEMATICAL BOLD CAPITAL B) with `B` (LATIN CAPITAL LETTER B) β„Ή Suggested fix 1 |-x = "𝐁ad string" @@ -16,7 +16,7 @@ confusables.py:1:6: RUF001 [*] String contains ambiguous unicode character `𝐁 3 3 | 4 4 | -confusables.py:6:56: RUF002 [*] Docstring contains ambiguous unicode character `οΌ‰` (did you mean `)`?) +confusables.py:6:56: RUF002 [*] Docstring contains ambiguous `οΌ‰` (FULLWIDTH RIGHT PARENTHESIS). Did you mean `)` (RIGHT PARENTHESIS)? | 6 | def f(): 7 | """Here's a docstring with an unusual parenthesis: οΌ‰""" @@ -24,7 +24,7 @@ confusables.py:6:56: RUF002 [*] Docstring contains ambiguous unicode character ` 8 | # And here's a comment with an unusual punctuation mark: ᜡ 9 | ... | - = help: Replace `οΌ‰` with `)` + = help: Replace `οΌ‰` (FULLWIDTH RIGHT PARENTHESIS) with `)` (RIGHT PARENTHESIS) β„Ή Suggested fix 3 3 | @@ -36,7 +36,7 @@ confusables.py:6:56: RUF002 [*] Docstring contains ambiguous unicode character ` 8 8 | ... 9 9 | -confusables.py:7:62: RUF003 [*] Comment contains ambiguous unicode character `ᜡ` (did you mean `/`?) +confusables.py:7:62: RUF003 [*] Comment contains ambiguous `ᜡ` (PHILIPPINE SINGLE PUNCTUATION). Did you mean `/` (SOLIDUS)? | 7 | def f(): 8 | """Here's a docstring with an unusual parenthesis: οΌ‰""" @@ -44,7 +44,7 @@ confusables.py:7:62: RUF003 [*] Comment contains ambiguous unicode character ` | ^ RUF003 10 | ... | - = help: Replace `ᜡ` with `/` + = help: Replace `ᜡ` (PHILIPPINE SINGLE PUNCTUATION) with `/` (SOLIDUS) β„Ή Suggested fix 4 4 | @@ -56,4 +56,75 @@ confusables.py:7:62: RUF003 [*] Comment contains ambiguous unicode character ` 9 9 | 10 10 | +confusables.py:17:6: RUF001 [*] String contains ambiguous `𝐁` (MATHEMATICAL BOLD CAPITAL B). Did you mean `B` (LATIN CAPITAL LETTER B)? + | +17 | x = "𝐁ad string" + | ^ RUF001 +18 | x = "βˆ’" + | + = help: Replace `𝐁` (MATHEMATICAL BOLD CAPITAL B) with `B` (LATIN CAPITAL LETTER B) + +β„Ή Suggested fix +14 14 | ... +15 15 | +16 16 | +17 |-x = "𝐁ad string" + 17 |+x = "Bad string" +18 18 | x = "βˆ’" +19 19 | +20 20 | # This should be ignored, since it contains an unambiguous unicode character, and no + +confusables.py:26:10: RUF001 [*] String contains ambiguous `Ξ±` (GREEK SMALL LETTER ALPHA). Did you mean `a` (LATIN SMALL LETTER A)? + | +26 | # The first word should be ignored, while the second should be included, since it +27 | # contains ASCII. +28 | x = "Ξ²Ξ± BΞ±d" + | ^ RUF001 +29 | +30 | # The two characters should be flagged here. The first character is a "word" + | + = help: Replace `Ξ±` (GREEK SMALL LETTER ALPHA) with `a` (LATIN SMALL LETTER A) + +β„Ή Suggested fix +23 23 | +24 24 | # The first word should be ignored, while the second should be included, since it +25 25 | # contains ASCII. +26 |-x = "Ξ²Ξ± BΞ±d" + 26 |+x = "Ξ²Ξ± Bad" +27 27 | +28 28 | # The two characters should be flagged here. The first character is a "word" +29 29 | # consisting of a single ambiguous character, while the second character is a "word + +confusables.py:31:6: RUF001 [*] String contains ambiguous `Π ` (CYRILLIC CAPITAL LETTER ER). Did you mean `P` (LATIN CAPITAL LETTER P)? + | +31 | # consisting of a single ambiguous character, while the second character is a "word +32 | # boundary" (whitespace) that it itself ambiguous. +33 | x = "Р усский" + | ^ RUF001 + | + = help: Replace `Π ` (CYRILLIC CAPITAL LETTER ER) with `P` (LATIN CAPITAL LETTER P) + +β„Ή Suggested fix +28 28 | # The two characters should be flagged here. The first character is a "word" +29 29 | # consisting of a single ambiguous character, while the second character is a "word +30 30 | # boundary" (whitespace) that it itself ambiguous. +31 |-x = "Р усский" + 31 |+x = "P усский" + +confusables.py:31:7: RUF001 [*] String contains ambiguous `β€€` (EN QUAD). Did you mean ` ` (SPACE)? + | +31 | # consisting of a single ambiguous character, while the second character is a "word +32 | # boundary" (whitespace) that it itself ambiguous. +33 | x = "Р усский" + | ^ RUF001 + | + = help: Replace `β€€` (EN QUAD) with ` ` (SPACE) + +β„Ή Suggested fix +28 28 | # The two characters should be flagged here. The first character is a "word" +29 29 | # consisting of a single ambiguous character, while the second character is a "word +30 30 | # boundary" (whitespace) that it itself ambiguous. +31 |-x = "Р усский" + 31 |+x = "Π  усский" + diff --git a/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__noqa.snap b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__noqa.snap new file mode 100644 index 0000000000000..01c68b7ef6fec --- /dev/null +++ b/crates/ruff/src/rules/ruff/snapshots/ruff__rules__ruff__tests__noqa.snap @@ -0,0 +1,20 @@ +--- +source: crates/ruff/src/rules/ruff/mod.rs +--- +noqa.py:23:5: F841 [*] Local variable `I` is assigned to but never used + | +23 | def f(): +24 | # Only `E741` should be ignored by the `noqa`. +25 | I = 1 # noqa: E741.F841 + | ^ F841 + | + = help: Remove assignment to unused variable `I` + +β„Ή Suggested fix +20 20 | +21 21 | def f(): +22 22 | # Only `E741` should be ignored by the `noqa`. +23 |- I = 1 # noqa: E741.F841 + 23 |+ pass # noqa: E741.F841 + + diff --git a/crates/ruff/src/rules/tryceratops/helpers.rs b/crates/ruff/src/rules/tryceratops/helpers.rs index adc856e1ecdf0..b0a1cdb99249b 100644 --- a/crates/ruff/src/rules/tryceratops/helpers.rs +++ b/crates/ruff/src/rules/tryceratops/helpers.rs @@ -3,16 +3,16 @@ use rustpython_parser::ast::{self, Expr}; use ruff_python_ast::visitor; use ruff_python_ast::visitor::Visitor; use ruff_python_semantic::analyze::logging; -use ruff_python_semantic::context::Context; +use ruff_python_semantic::model::SemanticModel; /// Collect `logging`-like calls from an AST. -pub(crate) struct LoggerCandidateVisitor<'a> { - context: &'a Context<'a>, - pub(crate) calls: Vec<(&'a Expr, &'a Expr)>, +pub(crate) struct LoggerCandidateVisitor<'a, 'b> { + context: &'a SemanticModel<'b>, + pub(crate) calls: Vec<(&'b Expr, &'b Expr)>, } -impl<'a> LoggerCandidateVisitor<'a> { - pub(crate) fn new(context: &'a Context<'a>) -> Self { +impl<'a, 'b> LoggerCandidateVisitor<'a, 'b> { + pub(crate) fn new(context: &'a SemanticModel<'b>) -> Self { LoggerCandidateVisitor { context, calls: Vec::new(), @@ -20,13 +20,10 @@ impl<'a> LoggerCandidateVisitor<'a> { } } -impl<'a, 'b> Visitor<'b> for LoggerCandidateVisitor<'a> -where - 'b: 'a, -{ +impl<'a, 'b> Visitor<'b> for LoggerCandidateVisitor<'a, 'b> { fn visit_expr(&mut self, expr: &'b Expr) { if let Expr::Call(ast::ExprCall { func, .. }) = expr { - if logging::is_logger_candidate(self.context, func) { + if logging::is_logger_candidate(func, self.context) { self.calls.push((expr, func)); } } diff --git a/crates/ruff/src/rules/tryceratops/mod.rs b/crates/ruff/src/rules/tryceratops/mod.rs index 36387df2988a8..f4694b1d704b1 100644 --- a/crates/ruff/src/rules/tryceratops/mod.rs +++ b/crates/ruff/src/rules/tryceratops/mod.rs @@ -8,7 +8,6 @@ mod tests { use std::path::Path; use anyhow::Result; - use test_case::test_case; use crate::registry::Rule; diff --git a/crates/ruff/src/rules/tryceratops/rules/error_instead_of_exception.rs b/crates/ruff/src/rules/tryceratops/rules/error_instead_of_exception.rs index 646a35ffedc38..ed27a6f10d7be 100644 --- a/crates/ruff/src/rules/tryceratops/rules/error_instead_of_exception.rs +++ b/crates/ruff/src/rules/tryceratops/rules/error_instead_of_exception.rs @@ -57,7 +57,7 @@ pub(crate) fn error_instead_of_exception(checker: &mut Checker, handlers: &[Exce for handler in handlers { let Excepthandler::ExceptHandler(ast::ExcepthandlerExceptHandler { body, .. }) = handler; let calls = { - let mut visitor = LoggerCandidateVisitor::new(&checker.ctx); + let mut visitor = LoggerCandidateVisitor::new(checker.semantic_model()); visitor.visit_body(body); visitor.calls }; diff --git a/crates/ruff/src/rules/tryceratops/rules/raise_vanilla_class.rs b/crates/ruff/src/rules/tryceratops/rules/raise_vanilla_class.rs index 163bb032030dc..46024b2db3bca 100644 --- a/crates/ruff/src/rules/tryceratops/rules/raise_vanilla_class.rs +++ b/crates/ruff/src/rules/tryceratops/rules/raise_vanilla_class.rs @@ -63,7 +63,7 @@ impl Violation for RaiseVanillaClass { /// TRY002 pub(crate) fn raise_vanilla_class(checker: &mut Checker, expr: &Expr) { if checker - .ctx + .semantic_model() .resolve_call_path(if let Expr::Call(ast::ExprCall { func, .. }) = expr { func } else { diff --git a/crates/ruff/src/rules/tryceratops/rules/try_consider_else.rs b/crates/ruff/src/rules/tryceratops/rules/try_consider_else.rs index 77ab803bb51f6..91e7624f94aa6 100644 --- a/crates/ruff/src/rules/tryceratops/rules/try_consider_else.rs +++ b/crates/ruff/src/rules/tryceratops/rules/try_consider_else.rs @@ -66,7 +66,7 @@ pub(crate) fn try_consider_else( if let Some(stmt) = body.last() { if let Stmt::Return(ast::StmtReturn { value, range: _ }) = stmt { if let Some(value) = value { - if contains_effect(value, |id| checker.ctx.is_builtin(id)) { + if contains_effect(value, |id| checker.semantic_model().is_builtin(id)) { return; } } diff --git a/crates/ruff/src/rules/tryceratops/rules/type_check_without_type_error.rs b/crates/ruff/src/rules/tryceratops/rules/type_check_without_type_error.rs index 5cc964c3a4c6a..f87d7b010afd0 100644 --- a/crates/ruff/src/rules/tryceratops/rules/type_check_without_type_error.rs +++ b/crates/ruff/src/rules/tryceratops/rules/type_check_without_type_error.rs @@ -77,7 +77,7 @@ fn has_control_flow(stmt: &Stmt) -> bool { /// Returns `true` if an [`Expr`] is a call to check types. fn check_type_check_call(checker: &mut Checker, call: &Expr) -> bool { checker - .ctx + .semantic_model() .resolve_call_path(call) .map_or(false, |call_path| { call_path.as_slice() == ["", "isinstance"] @@ -101,7 +101,7 @@ fn check_type_check_test(checker: &mut Checker, test: &Expr) -> bool { /// Returns `true` if `exc` is a reference to a builtin exception. fn is_builtin_exception(checker: &mut Checker, exc: &Expr) -> bool { return checker - .ctx + .semantic_model() .resolve_call_path(exc) .map_or(false, |call_path| { [ diff --git a/crates/ruff/src/rules/tryceratops/rules/useless_try_except.rs b/crates/ruff/src/rules/tryceratops/rules/useless_try_except.rs index e2446ddc6d679..8869e4eccdc26 100644 --- a/crates/ruff/src/rules/tryceratops/rules/useless_try_except.rs +++ b/crates/ruff/src/rules/tryceratops/rules/useless_try_except.rs @@ -44,7 +44,7 @@ pub(crate) fn useless_try_except(checker: &mut Checker, handlers: &[Excepthandle .iter() .map(|handler| { let ExceptHandler(ExcepthandlerExceptHandler { name, body, .. }) = handler; - let Some(Stmt::Raise(ast::StmtRaise { exc, .. })) = &body.first() else { + let Some(Stmt::Raise(ast::StmtRaise { exc, cause: None, .. })) = &body.first() else { return None; }; if let Some(expr) = exc { diff --git a/crates/ruff/src/rules/tryceratops/rules/verbose_log_message.rs b/crates/ruff/src/rules/tryceratops/rules/verbose_log_message.rs index 660fd89f7b2ba..46d4d7465e924 100644 --- a/crates/ruff/src/rules/tryceratops/rules/verbose_log_message.rs +++ b/crates/ruff/src/rules/tryceratops/rules/verbose_log_message.rs @@ -74,7 +74,7 @@ pub(crate) fn verbose_log_message(checker: &mut Checker, handlers: &[Excepthandl // Find all calls to `logging.exception`. let calls = { - let mut visitor = LoggerCandidateVisitor::new(&checker.ctx); + let mut visitor = LoggerCandidateVisitor::new(checker.semantic_model()); visitor.visit_body(body); visitor.calls }; diff --git a/crates/ruff/src/settings/configuration.rs b/crates/ruff/src/settings/configuration.rs index 612122cefc5aa..86212fd45e17f 100644 --- a/crates/ruff/src/settings/configuration.rs +++ b/crates/ruff/src/settings/configuration.rs @@ -13,6 +13,7 @@ use shellexpand; use shellexpand::LookupError; use crate::fs; +use crate::line_width::{LineLength, TabSize}; use crate::rule_selector::RuleSelector; use crate::rules::{ flake8_annotations, flake8_bandit, flake8_bugbear, flake8_builtins, flake8_comprehensions, @@ -32,6 +33,7 @@ pub struct RuleSelection { pub extend_select: Vec, pub fixable: Option>, pub unfixable: Vec, + pub extend_fixable: Vec, } #[derive(Debug, Default)] @@ -47,6 +49,7 @@ pub struct Configuration { pub extend: Option, pub extend_exclude: Vec, pub extend_include: Vec, + pub extend_per_file_ignores: Vec, pub external: Option>, pub fix: Option, pub fix_only: Option, @@ -54,7 +57,8 @@ pub struct Configuration { pub format: Option, pub ignore_init_module_imports: Option, pub include: Option>, - pub line_length: Option, + pub line_length: Option, + pub tab_size: Option, pub namespace_packages: Option>, pub required_version: Option, pub respect_gitignore: Option, @@ -101,7 +105,13 @@ impl Configuration { .collect(), extend_select: options.extend_select.unwrap_or_default(), fixable: options.fixable, - unfixable: options.unfixable.unwrap_or_default(), + unfixable: options + .unfixable + .into_iter() + .flatten() + .chain(options.extend_unfixable.into_iter().flatten()) + .collect(), + extend_fixable: options.extend_fixable.unwrap_or_default(), }], allowed_confusables: options.allowed_confusables, builtins: options.builtins, @@ -159,6 +169,17 @@ impl Configuration { .collect() }) .unwrap_or_default(), + extend_per_file_ignores: options + .extend_per_file_ignores + .map(|per_file_ignores| { + per_file_ignores + .into_iter() + .map(|(pattern, prefixes)| { + PerFileIgnore::new(pattern, &prefixes, Some(project_root)) + }) + .collect() + }) + .unwrap_or_default(), external: options.external, fix: options.fix, fix_only: options.fix_only, @@ -175,6 +196,7 @@ impl Configuration { .collect() }), line_length: options.line_length, + tab_size: options.tab_size, namespace_packages: options .namespace_packages .map(|namespace_package| resolve_src(&namespace_package, project_root)) @@ -247,6 +269,11 @@ impl Configuration { .into_iter() .chain(self.extend_include.into_iter()) .collect(), + extend_per_file_ignores: config + .extend_per_file_ignores + .into_iter() + .chain(self.extend_per_file_ignores.into_iter()) + .collect(), external: self.external.or(config.external), fix: self.fix.or(config.fix), fix_only: self.fix_only.or(config.fix_only), @@ -257,6 +284,7 @@ impl Configuration { .ignore_init_module_imports .or(config.ignore_init_module_imports), line_length: self.line_length.or(config.line_length), + tab_size: self.tab_size.or(config.tab_size), namespace_packages: self.namespace_packages.or(config.namespace_packages), per_file_ignores: self.per_file_ignores.or(config.per_file_ignores), required_version: self.required_version.or(config.required_version), diff --git a/crates/ruff/src/settings/defaults.rs b/crates/ruff/src/settings/defaults.rs index 5d468784f407e..ae7dbc962d35b 100644 --- a/crates/ruff/src/settings/defaults.rs +++ b/crates/ruff/src/settings/defaults.rs @@ -6,6 +6,7 @@ use regex::Regex; use rustc_hash::FxHashSet; use crate::codes::{self, RuleCodePrefix}; +use crate::line_width::{LineLength, TabSize}; use crate::registry::Linter; use crate::rule_selector::{prefix_to_selector, RuleSelector}; use crate::rules::{ @@ -26,8 +27,6 @@ pub const PREFIXES: &[RuleSelector] = &[ pub const TARGET_VERSION: PythonVersion = PythonVersion::Py310; -pub const LINE_LENGTH: usize = 88; - pub const TASK_TAGS: &[&str] = &["TODO", "FIXME", "XXX"]; pub static DUMMY_VARIABLE_RGX: Lazy = @@ -76,7 +75,8 @@ impl Default for Settings { force_exclude: false, ignore_init_module_imports: false, include: FilePatternSet::try_from_vec(INCLUDE.clone()).unwrap(), - line_length: LINE_LENGTH, + line_length: LineLength::default(), + tab_size: TabSize::default(), namespace_packages: vec![], per_file_ignores: vec![], respect_gitignore: true, @@ -97,7 +97,7 @@ impl Default for Settings { flake8_quotes: flake8_quotes::settings::Settings::default(), flake8_gettext: flake8_gettext::settings::Settings::default(), flake8_self: flake8_self::settings::Settings::default(), - flake8_tidy_imports: flake8_tidy_imports::Settings::default(), + flake8_tidy_imports: flake8_tidy_imports::settings::Settings::default(), flake8_type_checking: flake8_type_checking::settings::Settings::default(), flake8_unused_arguments: flake8_unused_arguments::settings::Settings::default(), isort: isort::settings::Settings::default(), diff --git a/crates/ruff/src/settings/mod.rs b/crates/ruff/src/settings/mod.rs index 791daf47901e7..2f31b6d221521 100644 --- a/crates/ruff/src/settings/mod.rs +++ b/crates/ruff/src/settings/mod.rs @@ -26,6 +26,7 @@ use crate::settings::types::{FilePatternSet, PerFileIgnore, PythonVersion, Seria use crate::warn_user_once_by_id; use self::rule_table::RuleTable; +use super::line_width::{LineLength, TabSize}; pub mod configuration; pub mod defaults; @@ -98,7 +99,8 @@ pub struct Settings { pub dummy_variable_rgx: Regex, pub external: FxHashSet, pub ignore_init_module_imports: bool, - pub line_length: usize, + pub line_length: LineLength, + pub tab_size: TabSize, pub namespace_packages: Vec, pub src: Vec, pub task_tags: Vec, @@ -116,7 +118,7 @@ pub struct Settings { pub flake8_pytest_style: flake8_pytest_style::settings::Settings, pub flake8_quotes: flake8_quotes::settings::Settings, pub flake8_self: flake8_self::settings::Settings, - pub flake8_tidy_imports: flake8_tidy_imports::Settings, + pub flake8_tidy_imports: flake8_tidy_imports::settings::Settings, pub flake8_type_checking: flake8_type_checking::settings::Settings, pub flake8_unused_arguments: flake8_unused_arguments::settings::Settings, pub isort: isort::settings::Settings, @@ -160,10 +162,16 @@ impl Settings { config.include.unwrap_or_else(|| defaults::INCLUDE.clone()), )?, ignore_init_module_imports: config.ignore_init_module_imports.unwrap_or_default(), - line_length: config.line_length.unwrap_or(defaults::LINE_LENGTH), + line_length: config.line_length.unwrap_or_default(), + tab_size: config.tab_size.unwrap_or_default(), namespace_packages: config.namespace_packages.unwrap_or_default(), per_file_ignores: resolve_per_file_ignores( - config.per_file_ignores.unwrap_or_default(), + config + .per_file_ignores + .unwrap_or_default() + .into_iter() + .chain(config.extend_per_file_ignores) + .collect(), )?, respect_gitignore: config.respect_gitignore.unwrap_or(true), src: config @@ -258,16 +266,11 @@ impl From<&Configuration> for RuleTable { // across config files (which otherwise wouldn't be possible since ruff // only has `extended` but no `extended-by`). let mut carryover_ignores: Option<&[RuleSelector]> = None; + let mut carryover_unfixables: Option<&[RuleSelector]> = None; let mut redirects = FxHashMap::default(); for selection in &config.rule_selections { - // We do not have an extend-fixable option, so fixable and unfixable - // selectors can simply be applied directly to fixable_set. - if selection.fixable.is_some() { - fixable_set.clear(); - } - // If a selection only specifies extend-select we cannot directly // apply its rule selectors to the select_set because we firstly have // to resolve the effectively selected rules within the current rule selection @@ -276,10 +279,13 @@ impl From<&Configuration> for RuleTable { // We do this via the following HashMap where the bool indicates // whether to enable or disable the given rule. let mut select_map_updates: FxHashMap = FxHashMap::default(); + let mut fixable_map_updates: FxHashMap = FxHashMap::default(); let carriedover_ignores = carryover_ignores.take(); + let carriedover_unfixables = carryover_unfixables.take(); for spec in Specificity::iter() { + // Iterate over rule selectors in order of specificity. for selector in selection .select .iter() @@ -301,17 +307,26 @@ impl From<&Configuration> for RuleTable { select_map_updates.insert(rule, false); } } - if let Some(fixable) = &selection.fixable { - fixable_set - .extend(fixable.iter().filter(|s| s.specificity() == spec).flatten()); + // Apply the same logic to `fixable` and `unfixable`. + for selector in selection + .fixable + .iter() + .flatten() + .chain(selection.extend_fixable.iter()) + .filter(|s| s.specificity() == spec) + { + for rule in selector { + fixable_map_updates.insert(rule, true); + } } for selector in selection .unfixable .iter() + .chain(carriedover_unfixables.into_iter().flatten()) .filter(|s| s.specificity() == spec) { for rule in selector { - fixable_set.remove(rule); + fixable_map_updates.insert(rule, false); } } } @@ -341,6 +356,29 @@ impl From<&Configuration> for RuleTable { } } + // Apply the same logic to `fixable` and `unfixable`. + if let Some(fixable) = &selection.fixable { + fixable_set = fixable_map_updates + .into_iter() + .filter_map(|(rule, enabled)| enabled.then_some(rule)) + .collect(); + + if fixable.is_empty() + && selection.extend_fixable.is_empty() + && !selection.unfixable.is_empty() + { + carryover_unfixables = Some(&selection.unfixable); + } + } else { + for (rule, enabled) in fixable_map_updates { + if enabled { + fixable_set.insert(rule); + } else { + fixable_set.remove(rule); + } + } + } + // We insert redirects into the hashmap so that we // can warn the users about remapped rule codes. for selector in selection @@ -351,6 +389,7 @@ impl From<&Configuration> for RuleTable { .chain(selection.ignore.iter()) .chain(selection.extend_select.iter()) .chain(selection.unfixable.iter()) + .chain(selection.extend_fixable.iter()) { if let RuleSelector::Prefix { prefix, diff --git a/crates/ruff/src/settings/options.rs b/crates/ruff/src/settings/options.rs index 52a202693aa3e..5904adcc39f34 100644 --- a/crates/ruff/src/settings/options.rs +++ b/crates/ruff/src/settings/options.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use ruff_macros::ConfigurationOptions; +use crate::line_width::{LineLength, TabSize}; use crate::rule_selector::RuleSelector; use crate::rules::{ flake8_annotations, flake8_bandit, flake8_bugbear, flake8_builtins, flake8_comprehensions, @@ -175,6 +176,24 @@ pub struct Options { /// A list of rule codes or prefixes to enable, in addition to those /// specified by `select`. pub extend_select: Option>, + #[option( + default = r#"[]"#, + value_type = "list[RuleSelector]", + example = r#" + # Enable autofix for flake8-bugbear (`B`), on top of any rules specified by `fixable`. + extend-fixable = ["B"] + "# + )] + /// A list of rule codes or prefixes to consider autofixable, in addition to those + /// specified by `fixable`. + pub extend_fixable: Option>, + /// A list of rule codes or prefixes to consider non-auto-fixable, in addition to those + /// specified by `unfixable`. + /// + /// This option has been **deprecated** in favor of `unfixable` since its usage is now + /// interchangeable with `unfixable`. + #[cfg_attr(feature = "schemars", schemars(skip))] + pub extend_unfixable: Option>, #[option( default = "[]", value_type = "list[str]", @@ -235,7 +254,7 @@ pub struct Options { /// respect these exclusions unequivocally. /// /// This is useful for [`pre-commit`](https://pre-commit.com/), which explicitly passes all - /// changed files to the [`ruff-pre-commit`](https://github.com/charliermarsh/ruff-pre-commit) + /// changed files to the [`ruff-pre-commit`](https://github.com/astral-sh/ruff-pre-commit) /// plugin, regardless of whether they're marked as excluded by Ruff's own /// settings. pub force_exclude: Option, @@ -285,13 +304,22 @@ pub struct Options { default = "88", value_type = "int", example = r#" - # Allow lines to be as long as 120 characters. - line-length = 120 + # Allow lines to be as long as 120 characters. + line-length = 120 "# )] /// The line length to use when enforcing long-lines violations (like /// `E501`). - pub line_length: Option, + pub line_length: Option, + #[option( + default = "4", + value_type = "int", + example = r#" + tab_size = 8 + "# + )] + /// The tabulation size to calculate line length. + pub tab_size: Option, #[option( default = "None", value_type = "str", @@ -523,4 +551,16 @@ pub struct Options { /// A list of mappings from file pattern to rule codes or prefixes to /// exclude, when considering any matching files. pub per_file_ignores: Option>>, + #[option( + default = "{}", + value_type = "dict[str, list[RuleSelector]]", + example = r#" + # Also ignore `E401` in all `__init__.py` files. + [tool.ruff.extend-per-file-ignores] + "__init__.py" = ["E402"] + "# + )] + /// A list of mappings from file pattern to rule codes or prefixes to + /// exclude, in addition to any rules excluded by `per-file-ignores`. + pub extend_per_file_ignores: Option>>, } diff --git a/crates/ruff/src/settings/pyproject.rs b/crates/ruff/src/settings/pyproject.rs index 929d5bb541235..6577dedd5cc3f 100644 --- a/crates/ruff/src/settings/pyproject.rs +++ b/crates/ruff/src/settings/pyproject.rs @@ -151,9 +151,9 @@ mod tests { use rustc_hash::FxHashMap; use crate::codes::{self, RuleCodePrefix}; + use crate::line_width::LineLength; use crate::rules::flake8_quotes::settings::Quote; - use crate::rules::flake8_tidy_imports::banned_api::ApiBan; - use crate::rules::flake8_tidy_imports::relative_imports::Strictness; + use crate::rules::flake8_tidy_imports::settings::{ApiBan, Strictness}; use crate::rules::{ flake8_bugbear, flake8_builtins, flake8_errmsg, flake8_import_conventions, flake8_pytest_style, flake8_quotes, flake8_tidy_imports, mccabe, pep8_naming, @@ -200,7 +200,7 @@ line-length = 79 pyproject.tool, Some(Tools { ruff: Some(Options { - line_length: Some(79), + line_length: Some(LineLength::from(79)), ..Options::default() }) }) @@ -301,7 +301,7 @@ other-attribute = 1 config, Options { allowed_confusables: Some(vec!['βˆ’', 'ρ', 'βˆ—']), - line_length: Some(88), + line_length: Some(LineLength::from(88)), extend_exclude: Some(vec![ "excluded_file.py".to_string(), "migrations".to_string(), diff --git a/crates/ruff/src/settings/types.rs b/crates/ruff/src/settings/types.rs index 7a0bd96ecdebb..49689d8948b16 100644 --- a/crates/ruff/src/settings/types.rs +++ b/crates/ruff/src/settings/types.rs @@ -1,12 +1,13 @@ -use anyhow::{bail, Result}; -use globset::{Glob, GlobSet, GlobSetBuilder}; -use pep440_rs::{Version as Pep440Version, VersionSpecifiers}; -use serde::{de, Deserialize, Deserializer, Serialize}; use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::string::ToString; + +use anyhow::{bail, Result}; +use globset::{Glob, GlobSet, GlobSetBuilder}; +use pep440_rs::{Version as Pep440Version, VersionSpecifiers}; +use serde::{de, Deserialize, Deserializer, Serialize}; use strum::IntoEnumIterator; use strum_macros::EnumIter; diff --git a/crates/ruff/src/test.rs b/crates/ruff/src/test.rs index 92b3ba25c992a..989272cf78377 100644 --- a/crates/ruff/src/test.rs +++ b/crates/ruff/src/test.rs @@ -5,10 +5,10 @@ use std::path::Path; use anyhow::Result; use itertools::Itertools; -use ruff_diagnostics::{AutofixKind, Diagnostic}; use rustc_hash::FxHashMap; use rustpython_parser::lexer::LexResult; +use ruff_diagnostics::{AutofixKind, Diagnostic}; use ruff_python_ast::source_code::{Indexer, Locator, SourceFileBuilder, Stylist}; use crate::autofix::fix_file; @@ -193,7 +193,7 @@ pub(crate) fn print_messages(messages: &[Message]) -> String { TextEmitter::default() .with_show_fix_status(true) - .with_show_fix(true) + .with_show_fix_diff(true) .with_show_source(true) .emit( &mut output, diff --git a/crates/ruff_benchmark/benches/linter.rs b/crates/ruff_benchmark/benches/linter.rs index b07390edbb09c..98ea5c1bd3327 100644 --- a/crates/ruff_benchmark/benches/linter.rs +++ b/crates/ruff_benchmark/benches/linter.rs @@ -1,12 +1,14 @@ +use std::time::Duration; + use criterion::measurement::WallTime; use criterion::{ criterion_group, criterion_main, BenchmarkGroup, BenchmarkId, Criterion, Throughput, }; + use ruff::linter::lint_only; use ruff::settings::{flags, Settings}; use ruff::RuleSelector; use ruff_benchmark::{TestCase, TestCaseSpeed, TestFile, TestFileDownloadError}; -use std::time::Duration; #[cfg(target_os = "windows")] #[global_allocator] diff --git a/crates/ruff_benchmark/benches/parser.rs b/crates/ruff_benchmark/benches/parser.rs index afdcc598321f3..1dbd95af424a8 100644 --- a/crates/ruff_benchmark/benches/parser.rs +++ b/crates/ruff_benchmark/benches/parser.rs @@ -1,9 +1,11 @@ +use std::time::Duration; + use criterion::measurement::WallTime; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use rustpython_parser::ast::Stmt; + use ruff_benchmark::{TestCase, TestCaseSpeed, TestFile, TestFileDownloadError}; use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor}; -use rustpython_parser::ast::Stmt; -use std::time::Duration; #[cfg(target_os = "windows")] #[global_allocator] diff --git a/crates/ruff_benchmark/src/lib.rs b/crates/ruff_benchmark/src/lib.rs index 6ea1fcabe160c..70d2e7a34f34d 100644 --- a/crates/ruff_benchmark/src/lib.rs +++ b/crates/ruff_benchmark/src/lib.rs @@ -1,6 +1,7 @@ use std::fmt::{Display, Formatter}; use std::path::PathBuf; use std::process::Command; + use url::Url; /// Relative size of a test case. Benchmarks can use it to configure the time for how long a benchmark should run to get stable results. diff --git a/crates/ruff_cache/src/cache_key.rs b/crates/ruff_cache/src/cache_key.rs index 999aa8861e2fc..c05bf48ead8ce 100644 --- a/crates/ruff_cache/src/cache_key.rs +++ b/crates/ruff_cache/src/cache_key.rs @@ -1,5 +1,3 @@ -use itertools::Itertools; -use regex::Regex; use std::borrow::Cow; use std::collections::hash_map::DefaultHasher; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; @@ -7,6 +5,9 @@ use std::hash::{Hash, Hasher}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; +use itertools::Itertools; +use regex::Regex; + #[derive(Clone, Debug, Default)] pub struct CacheKeyHasher { inner: DefaultHasher, diff --git a/crates/ruff_cache/src/filetime.rs b/crates/ruff_cache/src/filetime.rs index 17e8eb551e742..29c2d074a54a5 100644 --- a/crates/ruff_cache/src/filetime.rs +++ b/crates/ruff_cache/src/filetime.rs @@ -1,7 +1,9 @@ -use crate::{CacheKey, CacheKeyHasher}; -use filetime::FileTime; use std::hash::Hash; +use filetime::FileTime; + +use crate::{CacheKey, CacheKeyHasher}; + impl CacheKey for FileTime { fn cache_key(&self, state: &mut CacheKeyHasher) { self.hash(&mut **state); diff --git a/crates/ruff_cache/src/globset.rs b/crates/ruff_cache/src/globset.rs index fbaf79ca05709..8011b7e0558d1 100644 --- a/crates/ruff_cache/src/globset.rs +++ b/crates/ruff_cache/src/globset.rs @@ -1,6 +1,7 @@ -use crate::{CacheKey, CacheKeyHasher}; use globset::{Glob, GlobMatcher}; +use crate::{CacheKey, CacheKeyHasher}; + impl CacheKey for GlobMatcher { fn cache_key(&self, state: &mut CacheKeyHasher) { self.glob().cache_key(state); diff --git a/crates/ruff_cache/src/lib.rs b/crates/ruff_cache/src/lib.rs index ddde67db1f66b..3d17f883365dc 100644 --- a/crates/ruff_cache/src/lib.rs +++ b/crates/ruff_cache/src/lib.rs @@ -1,10 +1,10 @@ -mod cache_key; -pub mod filetime; -pub mod globset; +use std::path::{Path, PathBuf}; pub use cache_key::{CacheKey, CacheKeyHasher}; -use std::path::{Path, PathBuf}; +mod cache_key; +pub mod filetime; +pub mod globset; pub const CACHE_DIR_NAME: &str = ".ruff_cache"; diff --git a/crates/ruff_cache/tests/cache_key.rs b/crates/ruff_cache/tests/cache_key.rs index 0041afd253947..43e8f092032fc 100644 --- a/crates/ruff_cache/tests/cache_key.rs +++ b/crates/ruff_cache/tests/cache_key.rs @@ -1,8 +1,9 @@ -use ruff_cache::{CacheKey, CacheKeyHasher}; -use ruff_macros::CacheKey; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; +use ruff_cache::{CacheKey, CacheKeyHasher}; +use ruff_macros::CacheKey; + #[derive(CacheKey, Hash)] struct UnitStruct; diff --git a/crates/ruff_cli/Cargo.toml b/crates/ruff_cli/Cargo.toml index 7a1eeb47ff4d3..ec45b03b610d0 100644 --- a/crates/ruff_cli/Cargo.toml +++ b/crates/ruff_cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ruff_cli" -version = "0.0.269" +version = "0.0.270" authors = ["Charlie Marsh "] edition = { workspace = true } rust-version = { workspace = true } @@ -66,7 +66,6 @@ ureq = { version = "2.6.2", features = [] } [features] jupyter_notebook = ["ruff/jupyter_notebook"] -ecosystem_ci = ["ruff/ecosystem_ci"] [target.'cfg(target_os = "windows")'.dependencies] mimalloc = "0.1.34" diff --git a/crates/ruff_cli/src/args.rs b/crates/ruff_cli/src/args.rs index 6b037e621788f..09662c827281a 100644 --- a/crates/ruff_cli/src/args.rs +++ b/crates/ruff_cli/src/args.rs @@ -3,6 +3,9 @@ use std::str::FromStr; use clap::{command, Parser}; use regex::Regex; +use rustc_hash::FxHashMap; + +use ruff::line_width::LineLength; use ruff::logging::LogLevel; use ruff::registry::Rule; use ruff::resolver::ConfigProcessor; @@ -11,7 +14,6 @@ use ruff::settings::types::{ FilePattern, PatternPrefixPair, PerFileIgnore, PythonVersion, SerializationFormat, }; use ruff::RuleSelector; -use rustc_hash::FxHashMap; #[derive(Debug, Parser)] #[command( @@ -57,6 +59,13 @@ pub enum Command { /// Generate shell completion. #[clap(alias = "--generate-shell-completion", hide = true)] GenerateShellCompletion { shell: clap_complete_command::Shell }, + /// Format the given files, or stdin when using `-`. + #[doc(hidden)] + #[clap(hide = true)] + Format { + /// List of files or directories to format or `-` for stdin + files: Vec, + }, } #[derive(Debug, clap::Args)] @@ -126,8 +135,8 @@ pub struct CheckArgs { hide_possible_values = true )] pub ignore: Option>, - /// Like --select, but adds additional rule codes on top of the selected - /// ones. + /// Like --select, but adds additional rule codes on top of those already + /// specified. #[arg( long, value_delimiter = ',', @@ -147,9 +156,13 @@ pub struct CheckArgs { hide = true )] pub extend_ignore: Option>, - /// List of mappings from file pattern to code to exclude + /// List of mappings from file pattern to code to exclude. #[arg(long, value_delimiter = ',', help_heading = "Rule selection")] pub per_file_ignores: Option>, + /// Like `--per-file-ignores`, but adds additional ignores on top of + /// those already specified. + #[arg(long, value_delimiter = ',', help_heading = "Rule selection")] + pub extend_per_file_ignores: Option>, /// List of paths, used to omit files and/or directories from analysis. #[arg( long, @@ -189,6 +202,27 @@ pub struct CheckArgs { hide_possible_values = true )] pub unfixable: Option>, + /// Like --fixable, but adds additional rule codes on top of those already + /// specified. + #[arg( + long, + value_delimiter = ',', + value_name = "RULE_CODE", + value_parser = parse_rule_selector, + help_heading = "Rule selection", + hide_possible_values = true + )] + pub extend_fixable: Option>, + /// Like --unfixable. (Deprecated: You can just use --unfixable instead.) + #[arg( + long, + value_delimiter = ',', + value_name = "RULE_CODE", + value_parser = parse_rule_selector, + help_heading = "Rule selection", + hide = true + )] + pub extend_unfixable: Option>, /// Respect file exclusions via `.gitignore` and other standard ignore /// files. #[arg( @@ -292,7 +326,6 @@ pub struct CheckArgs { )] pub show_settings: bool, /// Dev-only argument to show fixes - #[cfg(feature = "ecosystem_ci")] #[arg(long, hide = true)] pub ecosystem_ci: bool, } @@ -359,8 +392,9 @@ impl CheckArgs { add_noqa: self.add_noqa, config: self.config, diff: self.diff, - exit_zero: self.exit_zero, + ecosystem_ci: self.ecosystem_ci, exit_non_zero_on_fix: self.exit_non_zero_on_fix, + exit_zero: self.exit_zero, files: self.files, ignore_noqa: self.ignore_noqa, isolated: self.isolated, @@ -375,8 +409,10 @@ impl CheckArgs { dummy_variable_rgx: self.dummy_variable_rgx, exclude: self.exclude, extend_exclude: self.extend_exclude, + extend_fixable: self.extend_fixable, extend_ignore: self.extend_ignore, extend_select: self.extend_select, + extend_unfixable: self.extend_unfixable, fixable: self.fixable, ignore: self.ignore, line_length: self.line_length, @@ -422,8 +458,9 @@ pub struct Arguments { pub add_noqa: bool, pub config: Option, pub diff: bool, - pub exit_zero: bool, + pub ecosystem_ci: bool, pub exit_non_zero_on_fix: bool, + pub exit_zero: bool, pub files: Vec, pub ignore_noqa: bool, pub isolated: bool, @@ -442,8 +479,10 @@ pub struct Overrides { pub dummy_variable_rgx: Option, pub exclude: Option>, pub extend_exclude: Option>, + pub extend_fixable: Option>, pub extend_ignore: Option>, pub extend_select: Option>, + pub extend_unfixable: Option>, pub fixable: Option>, pub ignore: Option>, pub line_length: Option, @@ -493,7 +532,14 @@ impl ConfigProcessor for &Overrides { .collect(), extend_select: self.extend_select.clone().unwrap_or_default(), fixable: self.fixable.clone(), - unfixable: self.unfixable.clone().unwrap_or_default(), + unfixable: self + .unfixable + .iter() + .cloned() + .chain(self.extend_unfixable.iter().cloned()) + .flatten() + .collect(), + extend_fixable: self.extend_fixable.clone().unwrap_or_default(), }); if let Some(format) = &self.format { config.format = Some(*format); @@ -502,7 +548,7 @@ impl ConfigProcessor for &Overrides { config.force_exclude = Some(*force_exclude); } if let Some(line_length) = &self.line_length { - config.line_length = Some(*line_length); + config.line_length = Some(LineLength::from(*line_length)); } if let Some(per_file_ignores) = &self.per_file_ignores { config.per_file_ignores = Some(collect_per_file_ignores(per_file_ignores.clone())); diff --git a/crates/ruff_cli/src/cache.rs b/crates/ruff_cli/src/cache.rs index 757ef33805aa8..0147d9ccb563c 100644 --- a/crates/ruff_cli/src/cache.rs +++ b/crates/ruff_cli/src/cache.rs @@ -2,23 +2,24 @@ use std::cell::RefCell; use std::fs; use std::hash::Hasher; use std::io::Write; +#[cfg(unix)] +use std::os::unix::fs::PermissionsExt; use std::path::Path; use anyhow::Result; use filetime::FileTime; use log::error; use path_absolutize::Absolutize; +use ruff_text_size::{TextRange, TextSize}; +use serde::ser::{SerializeSeq, SerializeStruct}; +use serde::{Deserialize, Serialize, Serializer}; + use ruff::message::Message; use ruff::settings::{AllSettings, Settings}; use ruff_cache::{CacheKey, CacheKeyHasher}; use ruff_diagnostics::{DiagnosticKind, Fix}; use ruff_python_ast::imports::ImportMap; use ruff_python_ast::source_code::SourceFileBuilder; -use ruff_text_size::{TextRange, TextSize}; -use serde::ser::{SerializeSeq, SerializeStruct}; -use serde::{Deserialize, Serialize, Serializer}; -#[cfg(unix)] -use std::os::unix::fs::PermissionsExt; const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/crates/ruff_cli/src/commands/config.rs b/crates/ruff_cli/src/commands/config.rs index a2db72a85fe44..22c414a3619fc 100644 --- a/crates/ruff_cli/src/commands/config.rs +++ b/crates/ruff_cli/src/commands/config.rs @@ -1,6 +1,7 @@ -use crate::ExitStatus; use ruff::settings::options::Options; +use crate::ExitStatus; + #[allow(clippy::print_stdout)] pub(crate) fn config(key: Option<&str>) -> ExitStatus { match key { diff --git a/crates/ruff_cli/src/commands/run.rs b/crates/ruff_cli/src/commands/run.rs index 07d25afcaa83d..d33a55ed2fe3d 100644 --- a/crates/ruff_cli/src/commands/run.rs +++ b/crates/ruff_cli/src/commands/run.rs @@ -143,7 +143,8 @@ pub(crate) fn run( acc }); - diagnostics.messages.sort_unstable(); + diagnostics.messages.sort(); + let duration = start.elapsed(); debug!("Checked {:?} files in: {:?}", paths.len(), duration); @@ -254,8 +255,6 @@ mod test { LogLevel::Default, FixMode::None, Flags::SHOW_VIOLATIONS, - #[cfg(feature = "ecosystem_ci")] - false, ); let mut writer: Vec = Vec::new(); // Mute the terminal color codes diff --git a/crates/ruff_cli/src/commands/run_stdin.rs b/crates/ruff_cli/src/commands/run_stdin.rs index 817de79c57121..f0ac60f32ac10 100644 --- a/crates/ruff_cli/src/commands/run_stdin.rs +++ b/crates/ruff_cli/src/commands/run_stdin.rs @@ -11,7 +11,7 @@ use crate::args::Overrides; use crate::diagnostics::{lint_stdin, Diagnostics}; /// Read a `String` from `stdin`. -fn read_from_stdin() -> Result { +pub(crate) fn read_from_stdin() -> Result { let mut buffer = String::new(); io::stdin().lock().read_to_string(&mut buffer)?; Ok(buffer) diff --git a/crates/ruff_cli/src/lib.rs b/crates/ruff_cli/src/lib.rs index 752f6efa25619..3a88139e5475b 100644 --- a/crates/ruff_cli/src/lib.rs +++ b/crates/ruff_cli/src/lib.rs @@ -1,10 +1,11 @@ -use std::io::{self, BufWriter}; -use std::path::PathBuf; +use std::io::{self, stdout, BufWriter, Write}; +use std::path::{Path, PathBuf}; use std::process::ExitCode; use std::sync::mpsc::channel; -use anyhow::Result; +use anyhow::{Context, Result}; use clap::CommandFactory; +use log::warn; use notify::{recommended_watcher, RecursiveMode, Watcher}; use ruff::logging::{set_up_logging, LogLevel}; @@ -13,6 +14,7 @@ use ruff::settings::{flags, CliSettings}; use ruff::{fs, warn_user_once}; use crate::args::{Args, CheckArgs, Command}; +use crate::commands::run_stdin::read_from_stdin; use crate::printer::{Flags as PrinterFlags, Printer}; pub mod args; @@ -105,7 +107,7 @@ quoting the executed command, along with the relevant file contents and `pyproje #[cfg(windows)] assert!(colored::control::set_virtual_terminal(true).is_ok()); - let log_level: LogLevel = (&log_level_args).into(); + let log_level = LogLevel::from(&log_level_args); set_up_logging(&log_level)?; match command { @@ -117,14 +119,42 @@ quoting the executed command, along with the relevant file contents and `pyproje shell.generate(&mut Args::command(), &mut io::stdout()); } Command::Check(args) => return check(args, log_level), + Command::Format { files } => return format(&files), } Ok(ExitStatus::Success) } +fn format(files: &[PathBuf]) -> Result { + warn_user_once!( + "`ruff format` is a work-in-progress, subject to change at any time, and intended for \ + internal use only." + ); + + // dummy + let format_code = |code: &str| code.replace("# DEL", ""); + + match &files { + // Check if we should read from stdin + [path] if path == Path::new("-") => { + let unformatted = read_from_stdin()?; + let formatted = format_code(&unformatted); + stdout().lock().write_all(formatted.as_bytes())?; + } + _ => { + for file in files { + let unformatted = std::fs::read_to_string(file) + .with_context(|| format!("Could not read {}: ", file.display()))?; + let formatted = format_code(&unformatted); + std::fs::write(file, formatted) + .with_context(|| format!("Could not write to {}, exiting", file.display()))?; + } + } + } + Ok(ExitStatus::Success) +} + fn check(args: CheckArgs, log_level: LogLevel) -> Result { - #[cfg(feature = "ecosystem_ci")] - let ecosystem_ci = args.ecosystem_ci; let (cli, overrides) = args.partition(); // Construct the "default" settings. These are used when no `pyproject.toml` @@ -182,11 +212,18 @@ fn check(args: CheckArgs, log_level: LogLevel) -> Result { printer_flags |= PrinterFlags::SHOW_VIOLATIONS; } if show_fixes { - printer_flags |= PrinterFlags::SHOW_FIXES; + printer_flags |= PrinterFlags::SHOW_FIX_SUMMARY; } if show_source { printer_flags |= PrinterFlags::SHOW_SOURCE; } + if cli.ecosystem_ci { + warn_user_once!( + "The formatting of fixes emitted by this option is a work-in-progress, subject to \ + change at any time, and intended only for internal use." + ); + printer_flags |= PrinterFlags::SHOW_FIX_DIFF; + } #[cfg(debug_assertions)] if cache { @@ -211,14 +248,7 @@ fn check(args: CheckArgs, log_level: LogLevel) -> Result { return Ok(ExitStatus::Success); } - let printer = Printer::new( - format, - log_level, - autofix, - printer_flags, - #[cfg(feature = "ecosystem_ci")] - ecosystem_ci, - ); + let printer = Printer::new(format, log_level, autofix, printer_flags); if cli.watch { if format != SerializationFormat::Text { @@ -356,9 +386,10 @@ fn check(args: CheckArgs, log_level: LogLevel) -> Result { #[cfg(test)] mod test_file_change_detector { - use crate::{change_detected, ChangeKind}; use std::path::PathBuf; + use crate::{change_detected, ChangeKind}; + #[test] fn detect_correct_file_change() { assert_eq!( diff --git a/crates/ruff_cli/src/printer.rs b/crates/ruff_cli/src/printer.rs index d82febf2debed..81d4c95f4e758 100644 --- a/crates/ruff_cli/src/printer.rs +++ b/crates/ruff_cli/src/printer.rs @@ -28,9 +28,14 @@ use crate::diagnostics::Diagnostics; bitflags! { #[derive(Default, Debug, Copy, Clone)] pub(crate) struct Flags: u8 { + /// Whether to show violations when emitting diagnostics. const SHOW_VIOLATIONS = 0b0000_0001; - const SHOW_FIXES = 0b0000_0010; - const SHOW_SOURCE = 0b000_0100; + /// Whether to show the source code when emitting diagnostics. + const SHOW_SOURCE = 0b000_0010; + /// Whether to show a summary of the fixed violations when emitting diagnostics. + const SHOW_FIX_SUMMARY = 0b0000_0100; + /// Whether to show a diff of each fixed violation when emitting diagnostics. + const SHOW_FIX_DIFF = 0b0000_1000; } } @@ -70,9 +75,6 @@ pub(crate) struct Printer { log_level: LogLevel, autofix_level: flags::FixMode, flags: Flags, - /// Dev-only argument to show fixes - #[cfg(feature = "ecosystem_ci")] - ecosystem_ci: bool, } impl Printer { @@ -81,15 +83,12 @@ impl Printer { log_level: LogLevel, autofix_level: flags::FixMode, flags: Flags, - #[cfg(feature = "ecosystem_ci")] ecosystem_ci: bool, ) -> Self { Self { format, log_level, autofix_level, flags, - #[cfg(feature = "ecosystem_ci")] - ecosystem_ci, } } @@ -167,10 +166,10 @@ impl Printer { self.format, SerializationFormat::Text | SerializationFormat::Grouped ) { - if self.flags.contains(Flags::SHOW_FIXES) { + if self.flags.contains(Flags::SHOW_FIX_SUMMARY) { if !diagnostics.fixed.is_empty() { writeln!(writer)?; - print_fixed(writer, &diagnostics.fixed)?; + print_fix_summary(writer, &diagnostics.fixed)?; writeln!(writer)?; } } @@ -189,21 +188,16 @@ impl Printer { JunitEmitter::default().emit(writer, &diagnostics.messages, &context)?; } SerializationFormat::Text => { - #[cfg(feature = "ecosystem_ci")] - let show_fixes = self.ecosystem_ci && self.flags.contains(Flags::SHOW_FIXES); - #[cfg(not(feature = "ecosystem_ci"))] - let show_fixes = false; - TextEmitter::default() .with_show_fix_status(show_fix_status(self.autofix_level)) - .with_show_fix(show_fixes) + .with_show_fix_diff(self.flags.contains(Flags::SHOW_FIX_DIFF)) .with_show_source(self.flags.contains(Flags::SHOW_SOURCE)) .emit(writer, &diagnostics.messages, &context)?; - if self.flags.contains(Flags::SHOW_FIXES) { + if self.flags.contains(Flags::SHOW_FIX_SUMMARY) { if !diagnostics.fixed.is_empty() { writeln!(writer)?; - print_fixed(writer, &diagnostics.fixed)?; + print_fix_summary(writer, &diagnostics.fixed)?; writeln!(writer)?; } } @@ -216,10 +210,10 @@ impl Printer { .with_show_fix_status(show_fix_status(self.autofix_level)) .emit(writer, &diagnostics.messages, &context)?; - if self.flags.contains(Flags::SHOW_FIXES) { + if self.flags.contains(Flags::SHOW_FIX_SUMMARY) { if !diagnostics.fixed.is_empty() { writeln!(writer)?; - print_fixed(writer, &diagnostics.fixed)?; + print_fix_summary(writer, &diagnostics.fixed)?; writeln!(writer)?; } } @@ -397,7 +391,7 @@ const fn show_fix_status(autofix_level: flags::FixMode) -> bool { !matches!(autofix_level, flags::FixMode::Apply) } -fn print_fixed(stdout: &mut T, fixed: &FxHashMap) -> Result<()> { +fn print_fix_summary(stdout: &mut T, fixed: &FxHashMap) -> Result<()> { let total = fixed .values() .map(|table| table.values().sum::()) diff --git a/crates/ruff_cli/tests/black_compatibility_test.rs b/crates/ruff_cli/tests/black_compatibility_test.rs index afe3d174c7d1a..39f5ba54973c1 100644 --- a/crates/ruff_cli/tests/black_compatibility_test.rs +++ b/crates/ruff_cli/tests/black_compatibility_test.rs @@ -11,9 +11,10 @@ use std::{fs, process, str}; use anyhow::{anyhow, Context, Result}; use assert_cmd::Command; use log::info; -use ruff::logging::{set_up_logging, LogLevel}; use walkdir::WalkDir; +use ruff::logging::{set_up_logging, LogLevel}; + /// Handles `blackd` process and allows submitting code to it for formatting. struct Blackd { address: SocketAddr, diff --git a/crates/ruff_dev/src/generate_cli_help.rs b/crates/ruff_dev/src/generate_cli_help.rs index 266cfe6f5e2d9..25e2417467666 100644 --- a/crates/ruff_dev/src/generate_cli_help.rs +++ b/crates/ruff_dev/src/generate_cli_help.rs @@ -7,6 +7,7 @@ use std::{fs, str}; use anyhow::{bail, Context, Result}; use clap::CommandFactory; use pretty_assertions::StrComparison; + use ruff_cli::args; use crate::generate_all::{Mode, REGENERATE_ALL_COMMAND}; @@ -119,10 +120,12 @@ fn check_help_text() -> String { #[cfg(test)] mod test { - use super::{main, Args}; - use crate::generate_all::Mode; use anyhow::Result; + use crate::generate_all::Mode; + + use super::{main, Args}; + #[test] fn test_generate_json_schema() -> Result<()> { main(&Args { mode: Mode::Check }) diff --git a/crates/ruff_dev/src/generate_json_schema.rs b/crates/ruff_dev/src/generate_json_schema.rs index 5d8139674b364..147638dba6ef4 100644 --- a/crates/ruff_dev/src/generate_json_schema.rs +++ b/crates/ruff_dev/src/generate_json_schema.rs @@ -3,12 +3,13 @@ use std::fs; use std::path::PathBuf; -use crate::generate_all::{Mode, REGENERATE_ALL_COMMAND}; use anyhow::{bail, Result}; use pretty_assertions::StrComparison; -use ruff::settings::options::Options; use schemars::schema_for; +use ruff::settings::options::Options; + +use crate::generate_all::{Mode, REGENERATE_ALL_COMMAND}; use crate::ROOT_DIR; #[derive(clap::Args)] @@ -48,10 +49,12 @@ pub(crate) fn main(args: &Args) -> Result<()> { #[cfg(test)] mod test { - use super::{main, Args}; - use crate::generate_all::Mode; use anyhow::Result; + use crate::generate_all::Mode; + + use super::{main, Args}; + #[test] fn test_generate_json_schema() -> Result<()> { main(&Args { mode: Mode::Check }) diff --git a/crates/ruff_dev/src/generate_options.rs b/crates/ruff_dev/src/generate_options.rs index 8e541eb192bc7..cbea31d99f355 100644 --- a/crates/ruff_dev/src/generate_options.rs +++ b/crates/ruff_dev/src/generate_options.rs @@ -1,5 +1,6 @@ //! Generate a Markdown-compatible listing of configuration options. use itertools::Itertools; + use ruff::settings::options::Options; use ruff::settings::options_base::{OptionEntry, OptionField}; diff --git a/crates/ruff_diagnostics/src/diagnostic.rs b/crates/ruff_diagnostics/src/diagnostic.rs index 32ddbbedbde57..419f048674c0d 100644 --- a/crates/ruff_diagnostics/src/diagnostic.rs +++ b/crates/ruff_diagnostics/src/diagnostic.rs @@ -1,7 +1,6 @@ use anyhow::Result; use log::error; use ruff_text_size::{TextRange, TextSize}; - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/crates/ruff_diagnostics/src/edit.rs b/crates/ruff_diagnostics/src/edit.rs index 4eaf7ad3a7100..5da2dc590a001 100644 --- a/crates/ruff_diagnostics/src/edit.rs +++ b/crates/ruff_diagnostics/src/edit.rs @@ -1,8 +1,8 @@ -use ruff_text_size::{TextRange, TextSize}; +use std::cmp::Ordering; +use ruff_text_size::{TextRange, TextSize}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::cmp::Ordering; /// A text edit to be applied to a source file. Inserts, deletes, or replaces /// content at a given location. diff --git a/crates/ruff_formatter/Cargo.toml b/crates/ruff_formatter/Cargo.toml index a9c82b91a8fa7..593b47e3592d2 100644 --- a/crates/ruff_formatter/Cargo.toml +++ b/crates/ruff_formatter/Cargo.toml @@ -14,6 +14,7 @@ schemars = { workspace = true, optional = true } serde = { workspace = true, optional = true } tracing = { version = "0.1.37", default-features = false, features = ["std"] } unicode-width = { version = "0.1.10" } +static_assertions = "1.1.0" [dev-dependencies] insta = { workspace = true } diff --git a/crates/ruff_python_formatter/src/shared_traits.rs b/crates/ruff_formatter/shared_traits.rs similarity index 99% rename from crates/ruff_python_formatter/src/shared_traits.rs rename to crates/ruff_formatter/shared_traits.rs index 421d1f6b1b11f..f427934f31a31 100644 --- a/crates/ruff_python_formatter/src/shared_traits.rs +++ b/crates/ruff_formatter/shared_traits.rs @@ -1,5 +1,3 @@ -#![allow(clippy::all)] - /// Used to get an object that knows how to format this object. pub trait AsFormat { type Format<'a>: ruff_formatter::Format diff --git a/crates/ruff_formatter/src/builders.rs b/crates/ruff_formatter/src/builders.rs index 3d5b0e13a1d44..02000019c42ad 100644 --- a/crates/ruff_formatter/src/builders.rs +++ b/crates/ruff_formatter/src/builders.rs @@ -1,14 +1,13 @@ use crate::format_element::tag::{Condition, Tag}; use crate::prelude::tag::{DedentMode, GroupMode, LabelId}; use crate::prelude::*; -use crate::{format_element, write, Argument, Arguments, GroupId, TextSize}; +use crate::{format_element, write, Argument, Arguments, FormatContext, GroupId, TextSize}; use crate::{Buffer, VecBuffer}; use ruff_text_size::TextRange; use std::cell::Cell; use std::marker::PhantomData; use std::num::NonZeroU8; -use std::rc::Rc; use Tag::*; /// A line break that only gets printed if the enclosing `Group` doesn't fit on a single line. @@ -276,8 +275,62 @@ impl std::fmt::Debug for StaticText { } } -/// Creates a text from a dynamic string and a range of the input source -pub fn dynamic_text(text: &str, position: TextSize) -> DynamicText { +/// Creates a source map entry from the passed source `position` to the position in the formatted output. +/// +/// ## Examples +/// +/// ``` +/// /// ``` +/// use ruff_formatter::format; +/// use ruff_formatter::prelude::*; +/// +/// # fn main() -> FormatResult<()> { +/// // the tab must be encoded as \\t to not literally print a tab character ("Hello{tab}World" vs "Hello\tWorld") +/// use ruff_text_size::TextSize; +/// use ruff_formatter::SourceMarker; +/// +/// +/// let elements = format!(SimpleFormatContext::default(), [ +/// source_position(TextSize::new(0)), +/// text("\"Hello "), +/// source_position(TextSize::new(8)), +/// text("'Ruff'"), +/// source_position(TextSize::new(14)), +/// text("\""), +/// source_position(TextSize::new(20)) +/// ])?; +/// +/// let printed = elements.print()?; +/// +/// assert_eq!(printed.as_code(), r#""Hello 'Ruff'""#); +/// assert_eq!(printed.sourcemap(), [ +/// SourceMarker { source: TextSize::new(0), dest: TextSize::new(0) }, +/// SourceMarker { source: TextSize::new(0), dest: TextSize::new(7) }, +/// SourceMarker { source: TextSize::new(8), dest: TextSize::new(7) }, +/// SourceMarker { source: TextSize::new(8), dest: TextSize::new(13) }, +/// SourceMarker { source: TextSize::new(14), dest: TextSize::new(13) }, +/// SourceMarker { source: TextSize::new(14), dest: TextSize::new(14) }, +/// SourceMarker { source: TextSize::new(20), dest: TextSize::new(14) }, +/// ]); +/// +/// # Ok(()) +/// # } +/// ``` +pub const fn source_position(position: TextSize) -> SourcePosition { + SourcePosition(position) +} + +#[derive(Eq, PartialEq, Copy, Clone, Debug)] +pub struct SourcePosition(TextSize); + +impl Format for SourcePosition { + fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { + f.write_element(FormatElement::SourcePosition(self.0)) + } +} + +/// Creates a text from a dynamic string with its optional start-position in the source document +pub fn dynamic_text(text: &str, position: Option) -> DynamicText { debug_assert_no_newlines(text); DynamicText { text, position } @@ -286,14 +339,17 @@ pub fn dynamic_text(text: &str, position: TextSize) -> DynamicText { #[derive(Eq, PartialEq)] pub struct DynamicText<'a> { text: &'a str, - position: TextSize, + position: Option, } impl Format for DynamicText<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { + if let Some(source_position) = self.position { + f.write_element(FormatElement::SourcePosition(source_position))?; + } + f.write_element(FormatElement::DynamicText { text: self.text.to_string().into_boxed_str(), - source_position: self.position, }) } } @@ -304,31 +360,65 @@ impl std::fmt::Debug for DynamicText<'_> { } } -/// Creates a text from a dynamic string and a range of the input source -pub fn static_text_slice(text: Rc, range: TextRange) -> StaticTextSlice { - debug_assert_no_newlines(&text[range]); +/// Emits a text as it is written in the source document. Optimized to avoid allocations. +pub const fn source_text_slice( + range: TextRange, + newlines: ContainsNewlines, +) -> SourceTextSliceBuilder { + SourceTextSliceBuilder { + range, + new_lines: newlines, + } +} + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub enum ContainsNewlines { + /// The string contains newline characters + Yes, + /// The string contains no newline characters + No, - StaticTextSlice { text, range } + /// The string may contain newline characters, search the string to determine if there are any newlines. + Detect, } -#[derive(Eq, PartialEq)] -pub struct StaticTextSlice { - text: Rc, +#[derive(Eq, PartialEq, Debug)] +pub struct SourceTextSliceBuilder { range: TextRange, + new_lines: ContainsNewlines, } -impl Format for StaticTextSlice { +impl Format for SourceTextSliceBuilder +where + Context: FormatContext, +{ fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { - f.write_element(FormatElement::StaticTextSlice { - text: self.text.clone(), - range: self.range, - }) - } -} + let source_code = f.context().source_code(); + let slice = source_code.slice(self.range); + debug_assert_no_newlines(slice.text(source_code)); + + let contains_newlines = match self.new_lines { + ContainsNewlines::Yes => { + debug_assert!( + slice.text(source_code).contains('\n'), + "Text contains no new line characters but the caller specified that it does." + ); + true + } + ContainsNewlines::No => { + debug_assert!( + !slice.text(source_code).contains('\n'), + "Text contains new line characters but the caller specified that it does not." + ); + false + } + ContainsNewlines::Detect => slice.text(source_code).contains('\n'), + }; -impl std::fmt::Debug for StaticTextSlice { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::write!(f, "StaticTextSlice({})", &self.text[self.range]) + f.write_element(FormatElement::SourceCodeSlice { + slice, + contains_newlines, + }) } } @@ -1781,7 +1871,7 @@ impl std::fmt::Debug for FormatWith { /// let mut join = f.join_with(&separator); /// /// for item in &self.items { -/// join.entry(&format_with(|f| write!(f, [dynamic_text(item, TextSize::default())]))); +/// join.entry(&format_with(|f| write!(f, [dynamic_text(item, None)]))); /// } /// join.finish() /// })), diff --git a/crates/ruff_formatter/src/format_element.rs b/crates/ruff_formatter/src/format_element.rs index fda6ee227201d..a7dd7dedae9ba 100644 --- a/crates/ruff_formatter/src/format_element.rs +++ b/crates/ruff_formatter/src/format_element.rs @@ -1,17 +1,16 @@ pub mod document; pub mod tag; -use crate::format_element::tag::{LabelId, Tag}; use std::borrow::Cow; - -#[cfg(target_pointer_width = "64")] -use crate::static_assert; -use crate::{TagKind, TextSize}; -use ruff_text_size::TextRange; use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::rc::Rc; +use crate::format_element::tag::{LabelId, Tag}; +use crate::source_code::SourceCodeSlice; +use crate::TagKind; +use ruff_text_size::TextSize; + /// Language agnostic IR for formatting source code. /// /// Use the helper functions like [crate::builders::space], [crate::builders::soft_line_break] etc. defined in this file to create elements. @@ -26,20 +25,27 @@ pub enum FormatElement { /// Forces the parent group to print in expanded mode. ExpandParent, + /// Indicates the position of the elements coming after this element in the source document. + /// The printer will create a source map entry from this position in the source document to the + /// formatted position. + SourcePosition(TextSize), + /// Token constructed by the formatter from a static string StaticText { text: &'static str }, /// Token constructed from the input source as a dynamic - /// string with its start position in the input document. + /// string. DynamicText { /// There's no need for the text to be mutable, using `Box` safes 8 bytes over `String`. text: Box, - /// The start position of the dynamic token in the unformatted source code - source_position: TextSize, }, - /// Token constructed by slicing a defined range from a static string. - StaticTextSlice { text: Rc, range: TextRange }, + /// Text that gets emitted as it is in the source code. Optimized to avoid any allocations. + SourceCodeSlice { + slice: SourceCodeSlice, + /// Whether the string contains any new line characters + contains_newlines: bool, + }, /// Prevents that line suffixes move past this boundary. Forces the printer to print any pending /// line suffixes, potentially by inserting a hard line break. @@ -69,9 +75,14 @@ impl std::fmt::Debug for FormatElement { FormatElement::DynamicText { text, .. } => { fmt.debug_tuple("DynamicText").field(text).finish() } - FormatElement::StaticTextSlice { text, .. } => { - fmt.debug_tuple("Text").field(text).finish() - } + FormatElement::SourceCodeSlice { + slice, + contains_newlines, + } => fmt + .debug_tuple("Text") + .field(slice) + .field(contains_newlines) + .finish(), FormatElement::LineSuffixBoundary => write!(fmt, "LineSuffixBoundary"), FormatElement::BestFitting(best_fitting) => { fmt.debug_tuple("BestFitting").field(&best_fitting).finish() @@ -80,6 +91,9 @@ impl std::fmt::Debug for FormatElement { fmt.debug_list().entries(interned.deref()).finish() } FormatElement::Tag(tag) => fmt.debug_tuple("Tag").field(tag).finish(), + FormatElement::SourcePosition(position) => { + fmt.debug_tuple("SourcePosition").field(position).finish() + } } } } @@ -217,7 +231,7 @@ impl FormatElement { pub const fn is_text(&self) -> bool { matches!( self, - FormatElement::StaticTextSlice { .. } + FormatElement::SourceCodeSlice { .. } | FormatElement::DynamicText { .. } | FormatElement::StaticText { .. } ) @@ -236,14 +250,17 @@ impl FormatElements for FormatElement { FormatElement::Line(line_mode) => matches!(line_mode, LineMode::Hard | LineMode::Empty), FormatElement::StaticText { text } => text.contains('\n'), FormatElement::DynamicText { text, .. } => text.contains('\n'), - FormatElement::StaticTextSlice { text, range } => text[*range].contains('\n'), + FormatElement::SourceCodeSlice { + contains_newlines, .. + } => *contains_newlines, FormatElement::Interned(interned) => interned.will_break(), // Traverse into the most flat version because the content is guaranteed to expand when even // the most flat version contains some content that forces a break. FormatElement::BestFitting(best_fitting) => best_fitting.most_flat().will_break(), - FormatElement::LineSuffixBoundary | FormatElement::Space | FormatElement::Tag(_) => { - false - } + FormatElement::LineSuffixBoundary + | FormatElement::Space + | FormatElement::Tag(_) + | FormatElement::SourcePosition(_) => false, } } @@ -369,20 +386,25 @@ mod tests { } #[cfg(target_pointer_width = "64")] -static_assert!(std::mem::size_of::() == 8usize); +mod sizes { + // Increasing the size of FormatElement has serious consequences on runtime performance and memory footprint. + // Is there a more efficient way to encode the data to avoid increasing its size? Can the information + // be recomputed at a later point in time? + // You reduced the size of a format element? Excellent work! -#[cfg(target_pointer_width = "64")] -static_assert!(std::mem::size_of::() == 8usize); + use static_assertions::assert_eq_size; -#[cfg(not(debug_assertions))] -#[cfg(target_pointer_width = "64")] -static_assert!(std::mem::size_of::() == 16usize); + assert_eq_size!(ruff_text_size::TextRange, [u8; 8]); + assert_eq_size!(crate::prelude::tag::VerbatimKind, [u8; 8]); + assert_eq_size!(crate::prelude::Interned, [u8; 16]); + assert_eq_size!(crate::format_element::BestFitting, [u8; 16]); -// Increasing the size of FormatElement has serious consequences on runtime performance and memory footprint. -// Is there a more efficient way to encode the data to avoid increasing its size? Can the information -// be recomputed at a later point in time? -// You reduced the size of a format element? Excellent work! + #[cfg(not(debug_assertions))] + assert_eq_size!(crate::SourceCodeSlice, [u8; 8]); -#[cfg(not(debug_assertions))] -#[cfg(target_pointer_width = "64")] -static_assert!(std::mem::size_of::() == 32usize); + #[cfg(not(debug_assertions))] + assert_eq_size!(crate::format_element::Tag, [u8; 16]); + + #[cfg(not(debug_assertions))] + assert_eq_size!(crate::FormatElement, [u8; 24]); +} diff --git a/crates/ruff_formatter/src/format_element/document.rs b/crates/ruff_formatter/src/format_element/document.rs index be389b1a348b2..487c772a1d6d6 100644 --- a/crates/ruff_formatter/src/format_element/document.rs +++ b/crates/ruff_formatter/src/format_element/document.rs @@ -3,12 +3,12 @@ use crate::format_element::tag::DedentMode; use crate::prelude::tag::GroupMode; use crate::prelude::*; use crate::printer::LineEnding; +use crate::source_code::SourceCode; use crate::{format, write}; use crate::{ BufferExtensions, Format, FormatContext, FormatElement, FormatOptions, FormatResult, Formatter, IndentStyle, LineWidth, PrinterOptions, }; -use ruff_text_size::TextSize; use rustc_hash::FxHashMap; use std::collections::HashMap; use std::ops::Deref; @@ -81,7 +81,9 @@ impl Document { } FormatElement::StaticText { text } => text.contains('\n'), FormatElement::DynamicText { text, .. } => text.contains('\n'), - FormatElement::StaticTextSlice { text, range } => text[*range].contains('\n'), + FormatElement::SourceCodeSlice { + contains_newlines, .. + } => *contains_newlines, FormatElement::ExpandParent | FormatElement::Line(LineMode::Hard | LineMode::Empty) => true, _ => false, @@ -100,6 +102,13 @@ impl Document { let mut interned: FxHashMap<&Interned, bool> = FxHashMap::default(); propagate_expands(self, &mut enclosing, &mut interned); } + + pub fn display<'a>(&'a self, source_code: SourceCode<'a>) -> DisplayDocument { + DisplayDocument { + elements: self.elements.as_slice(), + source_code, + } + } } impl From> for Document { @@ -116,9 +125,14 @@ impl Deref for Document { } } -impl std::fmt::Display for Document { +pub struct DisplayDocument<'a> { + elements: &'a [FormatElement], + source_code: SourceCode<'a>, +} + +impl std::fmt::Display for DisplayDocument<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let formatted = format!(IrFormatContext::default(), [self.elements.as_slice()]) + let formatted = format!(IrFormatContext::new(self.source_code), [self.elements]) .expect("Formatting not to throw any FormatErrors"); f.write_str( @@ -130,18 +144,33 @@ impl std::fmt::Display for Document { } } -#[derive(Clone, Default, Debug)] -struct IrFormatContext { +#[derive(Clone, Debug)] +struct IrFormatContext<'a> { /// The interned elements that have been printed to this point printed_interned_elements: HashMap, + + source_code: SourceCode<'a>, } -impl FormatContext for IrFormatContext { +impl<'a> IrFormatContext<'a> { + fn new(source_code: SourceCode<'a>) -> Self { + Self { + source_code, + printed_interned_elements: HashMap::new(), + } + } +} + +impl FormatContext for IrFormatContext<'_> { type Options = IrFormatOptions; fn options(&self) -> &Self::Options { &IrFormatOptions } + + fn source_code(&self) -> SourceCode { + self.source_code + } } #[derive(Debug, Clone, Default)] @@ -166,7 +195,7 @@ impl FormatOptions for IrFormatOptions { } } -impl Format for &[FormatElement] { +impl Format> for &[FormatElement] { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { use Tag::*; @@ -190,7 +219,7 @@ impl Format for &[FormatElement] { element @ FormatElement::Space | element @ FormatElement::StaticText { .. } | element @ FormatElement::DynamicText { .. } - | element @ FormatElement::StaticTextSlice { .. } => { + | element @ FormatElement::SourceCodeSlice { .. } => { if !in_text { write!(f, [text("\"")])?; } @@ -231,6 +260,16 @@ impl Format for &[FormatElement] { write!(f, [text("expand_parent")])?; } + FormatElement::SourcePosition(position) => { + write!( + f, + [dynamic_text( + &std::format!("source_position({:?})", position), + None + )] + )?; + } + FormatElement::LineSuffixBoundary => { write!(f, [text("line_suffix_boundary")])?; } @@ -265,10 +304,7 @@ impl Format for &[FormatElement] { write!( f, [ - dynamic_text( - &std::format!(""), - TextSize::default() - ), + dynamic_text(&std::format!(""), None), space(), &interned.deref(), ] @@ -279,7 +315,7 @@ impl Format for &[FormatElement] { f, [dynamic_text( &std::format!(""), - TextSize::default() + None )] )?; } @@ -300,10 +336,7 @@ impl Format for &[FormatElement] { f, [ text(">"), ] )?; @@ -318,15 +351,9 @@ impl Format for &[FormatElement] { text(")"), soft_line_break_or_space(), text("ERROR>") ] )?; @@ -358,7 +385,7 @@ impl Format for &[FormatElement] { f, [ text("align("), - dynamic_text(&count.to_string(), TextSize::default()), + dynamic_text(&count.to_string(), None), text(","), space(), ] @@ -380,10 +407,7 @@ impl Format for &[FormatElement] { write!( f, [ - dynamic_text( - &std::format!("\"{group_id:?}\""), - TextSize::default() - ), + dynamic_text(&std::format!("\"{group_id:?}\""), None), text(","), space(), ] @@ -406,7 +430,7 @@ impl Format for &[FormatElement] { f, [ text("indent_if_group_breaks("), - dynamic_text(&std::format!("\"{id:?}\""), TextSize::default()), + dynamic_text(&std::format!("\"{id:?}\""), None), text(","), space(), ] @@ -427,10 +451,7 @@ impl Format for &[FormatElement] { write!( f, [ - dynamic_text( - &std::format!("\"{group_id:?}\""), - TextSize::default() - ), + dynamic_text(&std::format!("\"{group_id:?}\""), None), text(","), space(), ] @@ -443,10 +464,7 @@ impl Format for &[FormatElement] { f, [ text("label("), - dynamic_text( - &std::format!("\"{label_id:?}\""), - TextSize::default() - ), + dynamic_text(&std::format!("\"{label_id:?}\""), None), text(","), space(), ] @@ -490,10 +508,7 @@ impl Format for &[FormatElement] { ContentArrayEnd, text(")"), soft_line_break_or_space(), - dynamic_text( - &std::format!(">"), - TextSize::default() - ), + dynamic_text(&std::format!(">"), None), ] )?; } @@ -504,7 +519,7 @@ impl Format for &[FormatElement] { struct ContentArrayStart; -impl Format for ContentArrayStart { +impl Format> for ContentArrayStart { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { use Tag::*; @@ -520,7 +535,7 @@ impl Format for ContentArrayStart { struct ContentArrayEnd; -impl Format for ContentArrayEnd { +impl Format> for ContentArrayEnd { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { use Tag::*; f.write_elements([ @@ -630,8 +645,9 @@ impl FormatElements for [FormatElement] { #[cfg(test)] mod tests { use crate::prelude::*; - use crate::SimpleFormatContext; use crate::{format, format_args, write}; + use crate::{SimpleFormatContext, SourceCode}; + use ruff_text_size::{TextRange, TextSize}; #[test] fn display_elements() { @@ -656,7 +672,51 @@ mod tests { let document = formatted.into_document(); assert_eq!( - &std::format!("{document}"), + &std::format!("{}", document.display(SourceCode::default())), + r#"[ + group([ + "(", + indent([ + soft_line_break, + "Some longer content That should ultimately break" + ]), + soft_line_break + ]) +]"# + ); + } + + #[test] + fn display_elements_with_source_text_slice() { + let source_code = "Some longer content\nThat should ultimately break"; + let formatted = format!( + SimpleFormatContext::default().with_source_code(source_code), + [format_with(|f| { + write!( + f, + [group(&format_args![ + text("("), + soft_block_indent(&format_args![ + source_text_slice( + TextRange::at(TextSize::new(0), TextSize::new(19)), + ContainsNewlines::No + ), + space(), + source_text_slice( + TextRange::at(TextSize::new(20), TextSize::new(28)), + ContainsNewlines::No + ), + ]) + ])] + ) + })] + ) + .unwrap(); + + let document = formatted.into_document(); + + assert_eq!( + &std::format!("{}", document.display(SourceCode::new(source_code))), r#"[ group([ "(", @@ -692,7 +752,7 @@ mod tests { ]); assert_eq!( - &std::format!("{document}"), + &std::format!("{}", document.display(SourceCode::default())), r#"[ "[", group([ diff --git a/crates/ruff_formatter/src/format_extensions.rs b/crates/ruff_formatter/src/format_extensions.rs index ceea27f0cbaa5..82b99fda935de 100644 --- a/crates/ruff_formatter/src/format_extensions.rs +++ b/crates/ruff_formatter/src/format_extensions.rs @@ -35,7 +35,7 @@ pub trait MemoizeFormat { /// let value = self.value.get(); /// self.value.set(value + 1); /// - /// write!(f, [dynamic_text(&std::format!("Formatted {value} times."), TextSize::from(0))]) + /// write!(f, [dynamic_text(&std::format!("Formatted {value} times."), None)]) /// } /// } /// @@ -114,7 +114,7 @@ where /// write!(f, [ /// text("Count:"), /// space(), - /// dynamic_text(&std::format!("{current}"), TextSize::default()), + /// dynamic_text(&std::format!("{current}"), None), /// hard_line_break() /// ])?; /// diff --git a/crates/ruff_formatter/src/lib.rs b/crates/ruff_formatter/src/lib.rs index d350705c5b84b..5498d03342952 100644 --- a/crates/ruff_formatter/src/lib.rs +++ b/crates/ruff_formatter/src/lib.rs @@ -33,7 +33,7 @@ pub mod group_id; pub mod macros; pub mod prelude; pub mod printer; -mod utility_types; +mod source_code; use crate::formatter::Formatter; use crate::group_id::UniqueGroupIdBuilder; @@ -48,6 +48,7 @@ pub use buffer::{ VecBuffer, }; pub use builders::BestFitting; +pub use source_code::{SourceCode, SourceCodeSlice}; pub use crate::diagnostics::{ActualStart, FormatError, InvalidDocumentError, PrintError}; pub use format_element::{normalize_newlines, FormatElement, LINE_TERMINATORS}; @@ -203,6 +204,9 @@ pub trait FormatContext { /// Returns the formatting options fn options(&self) -> &Self::Options; + + /// Returns the source code from the document that gets formatted. + fn source_code(&self) -> SourceCode; } /// Options customizing how the source code should be formatted. @@ -220,11 +224,20 @@ pub trait FormatOptions { #[derive(Debug, Default, Eq, PartialEq)] pub struct SimpleFormatContext { options: SimpleFormatOptions, + source_code: String, } impl SimpleFormatContext { pub fn new(options: SimpleFormatOptions) -> Self { - Self { options } + Self { + options, + source_code: String::new(), + } + } + + pub fn with_source_code(mut self, code: &str) -> Self { + self.source_code = String::from(code); + self } } @@ -234,9 +247,13 @@ impl FormatContext for SimpleFormatContext { fn options(&self) -> &Self::Options { &self.options } + + fn source_code(&self) -> SourceCode { + SourceCode::new(&self.source_code) + } } -#[derive(Debug, Default, Eq, PartialEq)] +#[derive(Debug, Default, Eq, PartialEq, Clone)] pub struct SimpleFormatOptions { pub indent_style: IndentStyle, pub line_width: LineWidth, @@ -303,15 +320,18 @@ where Context: FormatContext, { pub fn print(&self) -> PrintResult { + let source_code = self.context.source_code(); let print_options = self.context.options().as_print_options(); - let printed = Printer::new(print_options).print(&self.document)?; + let printed = Printer::new(source_code, print_options).print(&self.document)?; Ok(printed) } pub fn print_with_indent(&self, indent: u16) -> PrintResult { + let source_code = self.context.source_code(); let print_options = self.context.options().as_print_options(); - let printed = Printer::new(print_options).print_with_indent(&self.document, indent)?; + let printed = + Printer::new(source_code, print_options).print_with_indent(&self.document, indent)?; Ok(printed) } @@ -428,7 +448,7 @@ pub type FormatResult = Result; /// fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { /// write!(f, [ /// hard_line_break(), -/// dynamic_text(&self.0, TextSize::from(0)), +/// dynamic_text(&self.0, None), /// hard_line_break(), /// ]) /// } diff --git a/crates/ruff_formatter/src/printer/mod.rs b/crates/ruff_formatter/src/printer/mod.rs index fa0ade60bbaa8..5d5c171b48d60 100644 --- a/crates/ruff_formatter/src/printer/mod.rs +++ b/crates/ruff_formatter/src/printer/mod.rs @@ -23,6 +23,7 @@ use crate::printer::line_suffixes::{LineSuffixEntry, LineSuffixes}; use crate::printer::queue::{ AllPredicate, FitsEndPredicate, FitsQueue, PrintQueue, Queue, SingleEntryPredicate, }; +use crate::source_code::SourceCode; use drop_bomb::DebugDropBomb; use ruff_text_size::{TextLen, TextSize}; use std::num::NonZeroU8; @@ -32,12 +33,14 @@ use unicode_width::UnicodeWidthChar; #[derive(Debug, Default)] pub struct Printer<'a> { options: PrinterOptions, + source_code: SourceCode<'a>, state: PrinterState<'a>, } impl<'a> Printer<'a> { - pub fn new(options: PrinterOptions) -> Self { + pub fn new(source_code: SourceCode<'a>, options: PrinterOptions) -> Self { Self { + source_code, options, state: PrinterState::default(), } @@ -95,11 +98,11 @@ impl<'a> Printer<'a> { } FormatElement::StaticText { text } => self.print_text(text, None), - FormatElement::DynamicText { - text, - source_position, - } => self.print_text(text, Some(*source_position)), - FormatElement::StaticTextSlice { text, range } => self.print_text(&text[*range], None), + FormatElement::DynamicText { text } => self.print_text(text, None), + FormatElement::SourceCodeSlice { slice, .. } => { + let text = slice.text(self.source_code); + self.print_text(text, Some(slice.range())) + } FormatElement::Line(line_mode) => { if args.mode().is_flat() && matches!(line_mode, LineMode::Soft | LineMode::SoftOrSpace) @@ -129,6 +132,11 @@ impl<'a> Printer<'a> { // Handled in `Document::propagate_expands() } + FormatElement::SourcePosition(position) => { + self.state.source_position = *position; + self.push_marker(); + } + FormatElement::LineSuffixBoundary => { const HARD_BREAK: &FormatElement = &FormatElement::Line(LineMode::Hard); self.flush_line_suffixes(queue, stack, Some(HARD_BREAK)); @@ -273,7 +281,7 @@ impl<'a> Printer<'a> { result } - fn print_text(&mut self, text: &str, source_position: Option) { + fn print_text(&mut self, text: &str, source_range: Option) { if !self.state.pending_indent.is_empty() { let (indent_char, repeat_count) = match self.options.indent_style() { IndentStyle::Tab => ('\t', 1), @@ -311,28 +319,27 @@ impl<'a> Printer<'a> { // If the token has no source position (was created by the formatter) // both the start and end marker will use the last known position // in the input source (from state.source_position) - if let Some(source) = source_position { - self.state.source_position = source; + if let Some(range) = source_range { + self.state.source_position = range.start(); } - self.push_marker(SourceMarker { - source: self.state.source_position, - dest: self.state.buffer.text_len(), - }); + self.push_marker(); self.print_str(text); - if source_position.is_some() { - self.state.source_position += text.text_len(); + if let Some(range) = source_range { + self.state.source_position = range.end(); } - self.push_marker(SourceMarker { + self.push_marker(); + } + + fn push_marker(&mut self) { + let marker = SourceMarker { source: self.state.source_position, dest: self.state.buffer.text_len(), - }); - } + }; - fn push_marker(&mut self, marker: SourceMarker) { if let Some(last) = self.state.source_markers.last() { if last != &marker { self.state.source_markers.push(marker) @@ -993,8 +1000,9 @@ impl<'a, 'print> FitsMeasurer<'a, 'print> { FormatElement::StaticText { text } => return Ok(self.fits_text(text)), FormatElement::DynamicText { text, .. } => return Ok(self.fits_text(text)), - FormatElement::StaticTextSlice { text, range } => { - return Ok(self.fits_text(&text[*range])) + FormatElement::SourceCodeSlice { slice, .. } => { + let text = slice.text(self.printer.source_code); + return Ok(self.fits_text(text)); } FormatElement::LineSuffixBoundary => { if self.state.has_line_suffix { @@ -1008,6 +1016,8 @@ impl<'a, 'print> FitsMeasurer<'a, 'print> { } } + FormatElement::SourcePosition(_) => {} + FormatElement::BestFitting(best_fitting) => { let slice = match args.mode() { PrintMode::Flat => best_fitting.most_flat(), @@ -1253,6 +1263,7 @@ struct FitsState { mod tests { use crate::prelude::*; use crate::printer::{LineEnding, PrintWidth, Printer, PrinterOptions}; + use crate::source_code::SourceCode; use crate::{format_args, write, Document, FormatState, IndentStyle, Printed, VecBuffer}; fn format(root: &dyn Format) -> Printed { @@ -1271,7 +1282,7 @@ mod tests { ) -> Printed { let formatted = crate::format!(SimpleFormatContext::default(), [root]).unwrap(); - Printer::new(options) + Printer::new(SourceCode::default(), options) .print(formatted.document()) .expect("Document to be valid") } @@ -1507,9 +1518,12 @@ two lines`, let document = Document::from(buffer.into_vec()); - let printed = Printer::new(PrinterOptions::default().with_print_width(PrintWidth::new(10))) - .print(&document) - .unwrap(); + let printed = Printer::new( + SourceCode::default(), + PrinterOptions::default().with_print_width(PrintWidth::new(10)), + ) + .print(&document) + .unwrap(); assert_eq!( printed.as_code(), diff --git a/crates/ruff_formatter/src/source_code.rs b/crates/ruff_formatter/src/source_code.rs new file mode 100644 index 0000000000000..77bf35fab5b98 --- /dev/null +++ b/crates/ruff_formatter/src/source_code.rs @@ -0,0 +1,81 @@ +use ruff_text_size::TextRange; +use std::fmt::{Debug, Formatter}; + +/// The source code of a document that gets formatted +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Default)] +pub struct SourceCode<'a> { + text: &'a str, +} + +impl<'a> SourceCode<'a> { + pub fn new(text: &'a str) -> Self { + Self { text } + } + + pub fn slice(self, range: TextRange) -> SourceCodeSlice { + assert!( + usize::from(range.end()) <= self.text.len(), + "Range end {:?} out of bounds {}.", + range.end(), + self.text.len() + ); + + assert!( + self.text.is_char_boundary(usize::from(range.start())), + "The range start position {:?} is not a char boundary.", + range.start() + ); + + assert!( + self.text.is_char_boundary(usize::from(range.end())), + "The range end position {:?} is not a char boundary.", + range.end() + ); + + SourceCodeSlice { + range, + #[cfg(debug_assertions)] + text: String::from(&self.text[range]).into_boxed_str(), + } + } +} + +impl Debug for SourceCode<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("SourceCode").field(&self.text).finish() + } +} + +/// A slice into the source text of a document. +/// +/// It only stores the range in production builds for a more compact representation, but it +/// keeps the original text in debug builds for better developer experience. +#[derive(Clone, Eq, PartialEq)] +pub struct SourceCodeSlice { + range: TextRange, + #[cfg(debug_assertions)] + text: Box, +} + +impl SourceCodeSlice { + /// Returns the slice's text. + pub fn text<'a>(&self, code: SourceCode<'a>) -> &'a str { + assert!(usize::from(self.range.end()) <= code.text.len(), "The range of this slice is out of bounds. Did you provide the correct source code for this slice?"); + &code.text[self.range] + } + + pub fn range(&self) -> TextRange { + self.range + } +} + +impl Debug for SourceCodeSlice { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let mut tuple = f.debug_tuple("SourceCodeSlice"); + + #[cfg(debug_assertions)] + tuple.field(&self.text); + + tuple.field(&self.range).finish() + } +} diff --git a/crates/ruff_formatter/src/utility_types.rs b/crates/ruff_formatter/src/utility_types.rs deleted file mode 100644 index 9a8f0e122c775..0000000000000 --- a/crates/ruff_formatter/src/utility_types.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(target_pointer_width = "64")] -#[macro_export] -macro_rules! static_assert { - ($expr:expr) => { - const _: i32 = 0 / $expr as i32; - }; -} diff --git a/crates/ruff_index/Cargo.toml b/crates/ruff_index/Cargo.toml new file mode 100644 index 0000000000000..77e1c3fb56d28 --- /dev/null +++ b/crates/ruff_index/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "ruff_index" +version = "0.0.0" +publish = false +edition = { workspace = true } +rust-version = { workspace = true } + +[lib] + +[dependencies] +ruff_macros = { path = "../ruff_macros" } + +[dev-dependencies] +static_assertions = "1.1.0" diff --git a/crates/ruff_index/src/idx.rs b/crates/ruff_index/src/idx.rs new file mode 100644 index 0000000000000..aa7b1f6ac4533 --- /dev/null +++ b/crates/ruff_index/src/idx.rs @@ -0,0 +1,60 @@ +use std::hash::Hash; + +/// Represents a newtype wrapper used to index into a Vec or a slice. +/// +/// You can use the [`newtype_index`](crate::newtype_index) macro to define your own index. +pub trait Idx: Copy + PartialEq + Eq + Hash + std::fmt::Debug + 'static { + fn new(value: usize) -> Self; + + fn index(self) -> usize; +} + +#[cfg(test)] +mod tests { + + use crate::newtype_index; + use static_assertions::{assert_eq_size, assert_impl_all}; + + // Allows the macro invocation below to work + use crate as ruff_index; + + #[newtype_index] + #[derive(PartialOrd, Ord)] + struct MyIndex; + + assert_impl_all!(MyIndex: Ord, PartialOrd); + assert_eq_size!(MyIndex, Option); + + #[test] + #[should_panic(expected = "assertion failed: value <= Self::MAX")] + fn from_u32_panics_for_u32_max() { + MyIndex::from_u32(u32::MAX); + } + + #[test] + #[should_panic(expected = "assertion failed: value <= Self::MAX")] + fn from_usize_panics_for_u32_max() { + MyIndex::from_usize(u32::MAX as usize); + } + + #[test] + fn max_value() { + let max_value = MyIndex::from_u32(u32::MAX - 1); + + assert_eq!(max_value.as_u32(), u32::MAX - 1); + } + + #[test] + fn max_value_usize() { + let max_value = MyIndex::from_usize((u32::MAX - 1) as usize); + + assert_eq!(max_value.as_u32(), u32::MAX - 1); + } + + #[test] + fn debug() { + let output = format!("{:?}", MyIndex::from(10u32)); + + assert_eq!(output, "MyIndex(10)"); + } +} diff --git a/crates/ruff_index/src/lib.rs b/crates/ruff_index/src/lib.rs new file mode 100644 index 0000000000000..6f7ac59c41791 --- /dev/null +++ b/crates/ruff_index/src/lib.rs @@ -0,0 +1,13 @@ +//! Provides new-type wrappers for collections that are indexed by a [`Idx`] rather +//! than `usize`. +//! +//! Inspired by [rustc_index](https://github.com/rust-lang/rust/blob/master/compiler/rustc_index/src/lib.rs). + +mod idx; +mod slice; +mod vec; + +pub use idx::Idx; +pub use ruff_macros::newtype_index; +pub use slice::IndexSlice; +pub use vec::IndexVec; diff --git a/crates/ruff_index/src/slice.rs b/crates/ruff_index/src/slice.rs new file mode 100644 index 0000000000000..ddb534ea82358 --- /dev/null +++ b/crates/ruff_index/src/slice.rs @@ -0,0 +1,178 @@ +use crate::vec::IndexVec; +use crate::Idx; +use std::fmt::{Debug, Formatter}; +use std::marker::PhantomData; +use std::ops::{Index, IndexMut}; + +/// A view into contiguous `T`s, indexed by `I` rather than by `usize`. +#[derive(PartialEq, Eq, Hash)] +#[repr(transparent)] +pub struct IndexSlice { + index: PhantomData, + pub raw: [T], +} + +impl IndexSlice { + #[inline] + pub const fn empty() -> &'static Self { + Self::from_raw(&[]) + } + + #[inline] + pub const fn from_raw(raw: &[T]) -> &Self { + let ptr: *const [T] = raw; + + #[allow(unsafe_code)] + // SAFETY: `IndexSlice` is `repr(transparent)` over a normal slice + unsafe { + &*(ptr as *const Self) + } + } + + #[inline] + pub fn from_raw_mut(raw: &mut [T]) -> &mut Self { + let ptr: *mut [T] = raw; + + #[allow(unsafe_code)] + // SAFETY: `IndexSlice` is `repr(transparent)` over a normal slice + unsafe { + &mut *(ptr as *mut Self) + } + } + + #[inline] + pub const fn len(&self) -> usize { + self.raw.len() + } + + #[inline] + pub const fn is_empty(&self) -> bool { + self.raw.is_empty() + } + + #[inline] + pub fn iter(&self) -> std::slice::Iter<'_, T> { + self.raw.iter() + } + + /// Returns an iterator over the indices + #[inline] + pub fn indices( + &self, + ) -> impl DoubleEndedIterator + ExactSizeIterator + Clone + 'static { + (0..self.len()).map(|n| I::new(n)) + } + + #[inline] + pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> { + self.raw.iter_mut() + } + + #[inline] + pub fn last_index(&self) -> Option { + self.len().checked_sub(1).map(I::new) + } + + #[inline] + pub fn swap(&mut self, a: I, b: I) { + self.raw.swap(a.index(), b.index()); + } + + #[inline] + pub fn get(&self, index: I) -> Option<&T> { + self.raw.get(index.index()) + } + + #[inline] + pub fn get_mut(&mut self, index: I) -> Option<&mut T> { + self.raw.get_mut(index.index()) + } + + #[inline] + pub fn binary_search(&self, value: &T) -> Result + where + T: Ord, + { + match self.raw.binary_search(value) { + Ok(i) => Ok(Idx::new(i)), + Err(i) => Err(Idx::new(i)), + } + } +} + +impl Debug for IndexSlice +where + I: Idx, + T: Debug, +{ + fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result { + std::fmt::Debug::fmt(&self.raw, fmt) + } +} + +impl Index for IndexSlice { + type Output = T; + + #[inline] + fn index(&self, index: I) -> &T { + &self.raw[index.index()] + } +} + +impl IndexMut for IndexSlice { + #[inline] + fn index_mut(&mut self, index: I) -> &mut T { + &mut self.raw[index.index()] + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a IndexSlice { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + #[inline] + fn into_iter(self) -> std::slice::Iter<'a, T> { + self.raw.iter() + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a mut IndexSlice { + type Item = &'a mut T; + type IntoIter = std::slice::IterMut<'a, T>; + + #[inline] + fn into_iter(self) -> std::slice::IterMut<'a, T> { + self.raw.iter_mut() + } +} + +impl ToOwned for IndexSlice { + type Owned = IndexVec; + + fn to_owned(&self) -> IndexVec { + IndexVec::from_raw(self.raw.to_owned()) + } + + fn clone_into(&self, target: &mut IndexVec) { + self.raw.clone_into(&mut target.raw); + } +} + +impl Default for &IndexSlice { + #[inline] + fn default() -> Self { + IndexSlice::from_raw(Default::default()) + } +} + +impl Default for &mut IndexSlice { + #[inline] + fn default() -> Self { + IndexSlice::from_raw_mut(Default::default()) + } +} + +// Whether `IndexSlice` is `Send` depends only on the data, +// not the phantom data. +#[allow(unsafe_code)] +unsafe impl Send for IndexSlice where T: Send {} diff --git a/crates/ruff_index/src/vec.rs b/crates/ruff_index/src/vec.rs new file mode 100644 index 0000000000000..36fa6388aca89 --- /dev/null +++ b/crates/ruff_index/src/vec.rs @@ -0,0 +1,170 @@ +use crate::slice::IndexSlice; +use crate::Idx; +use std::borrow::{Borrow, BorrowMut}; +use std::fmt::{Debug, Formatter}; +use std::marker::PhantomData; +use std::ops::{Deref, DerefMut, RangeBounds}; + +/// An owned sequence of `T` indexed by `I` +#[derive(Clone, PartialEq, Eq, Hash)] +#[repr(transparent)] +pub struct IndexVec { + pub raw: Vec, + index: PhantomData, +} + +impl IndexVec { + #[inline] + pub fn new() -> Self { + Self { + raw: Vec::new(), + index: PhantomData, + } + } + + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self { + raw: Vec::with_capacity(capacity), + index: PhantomData, + } + } + + #[inline] + pub fn from_raw(raw: Vec) -> Self { + Self { + raw, + index: PhantomData, + } + } + + #[inline] + pub fn drain>(&mut self, range: R) -> impl Iterator + '_ { + self.raw.drain(range) + } + + #[inline] + pub fn truncate(&mut self, a: usize) { + self.raw.truncate(a); + } + + #[inline] + pub fn as_slice(&self) -> &IndexSlice { + IndexSlice::from_raw(&self.raw) + } + + #[inline] + pub fn as_mut_slice(&mut self) -> &mut IndexSlice { + IndexSlice::from_raw_mut(&mut self.raw) + } + + #[inline] + pub fn push(&mut self, data: T) -> I { + let index = self.next_index(); + self.raw.push(data); + index + } + + #[inline] + pub fn next_index(&self) -> I { + I::new(self.raw.len()) + } +} + +impl Debug for IndexVec +where + T: Debug, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + std::fmt::Debug::fmt(&self.raw, f) + } +} + +impl Deref for IndexVec { + type Target = IndexSlice; + + fn deref(&self) -> &Self::Target { + self.as_slice() + } +} + +impl DerefMut for IndexVec { + fn deref_mut(&mut self) -> &mut Self::Target { + self.as_mut_slice() + } +} + +impl Borrow> for IndexVec { + fn borrow(&self) -> &IndexSlice { + self + } +} + +impl BorrowMut> for IndexVec { + fn borrow_mut(&mut self) -> &mut IndexSlice { + self + } +} + +impl Extend for IndexVec { + #[inline] + fn extend>(&mut self, iter: Iter) { + self.raw.extend(iter); + } +} + +impl FromIterator for IndexVec { + #[inline] + fn from_iter>(iter: Iter) -> Self { + Self::from_raw(Vec::from_iter(iter)) + } +} + +impl IntoIterator for IndexVec { + type Item = T; + type IntoIter = std::vec::IntoIter; + + #[inline] + fn into_iter(self) -> std::vec::IntoIter { + self.raw.into_iter() + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a IndexVec { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + #[inline] + fn into_iter(self) -> std::slice::Iter<'a, T> { + self.iter() + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a mut IndexVec { + type Item = &'a mut T; + type IntoIter = std::slice::IterMut<'a, T>; + + #[inline] + fn into_iter(self) -> std::slice::IterMut<'a, T> { + self.iter_mut() + } +} + +impl Default for IndexVec { + #[inline] + fn default() -> Self { + IndexVec::new() + } +} + +impl From<[T; N]> for IndexVec { + #[inline] + fn from(array: [T; N]) -> Self { + IndexVec::from_raw(array.into()) + } +} + +// Whether `IndexVec` is `Send` depends only on the data, +// not the phantom data. +#[allow(unsafe_code)] +unsafe impl Send for IndexVec where T: Send {} diff --git a/crates/ruff_macros/Cargo.toml b/crates/ruff_macros/Cargo.toml index b2c9b7ca99e4d..4d95a74a2e53d 100644 --- a/crates/ruff_macros/Cargo.toml +++ b/crates/ruff_macros/Cargo.toml @@ -12,6 +12,6 @@ doctest = false [dependencies] proc-macro2 = { workspace = true } quote = { workspace = true } -syn = { workspace = true, features = ["derive", "parsing", "extra-traits"] } +syn = { workspace = true, features = ["derive", "parsing", "extra-traits", "full"] } textwrap = { workspace = true } itertools = { workspace = true } diff --git a/crates/ruff_macros/src/lib.rs b/crates/ruff_macros/src/lib.rs index 22ffb939b3143..f02a2d1b1652a 100644 --- a/crates/ruff_macros/src/lib.rs +++ b/crates/ruff_macros/src/lib.rs @@ -1,6 +1,7 @@ //! This crate implements internal macros for the `ruff` library. use crate::cache_key::derive_cache_key; +use crate::newtype_index::generate_newtype_index; use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput, ItemFn, ItemStruct}; @@ -9,6 +10,7 @@ mod combine_options; mod config; mod derive_message_formats; mod map_codes; +mod newtype_index; mod register_rules; mod rule_code_prefix; mod rule_namespace; @@ -79,3 +81,33 @@ pub fn derive_message_formats(_attr: TokenStream, item: TokenStream) -> TokenStr let func = parse_macro_input!(item as ItemFn); derive_message_formats::derive_message_formats(&func).into() } + +/// Derives a newtype wrapper that can be used as an index. +/// The wrapper can represent indices up to `u32::MAX - 1`. +/// +/// The `u32::MAX - 1` is an optimization so that `Option` has the same size as `Index`. +/// +/// Can store at most `u32::MAX - 1` values +/// +/// ## Warning +/// +/// Additional `derive` attributes must come AFTER this attribute: +/// +/// Good: +/// +/// ```rust +/// #[newtype_index] +/// #[derive(Ord, PartialOrd)] +/// struct MyIndex; +/// ``` +#[proc_macro_attribute] +pub fn newtype_index(_metadata: TokenStream, input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as ItemStruct); + + let output = match generate_newtype_index(item) { + Ok(output) => output, + Err(err) => err.to_compile_error(), + }; + + TokenStream::from(output) +} diff --git a/crates/ruff_macros/src/newtype_index.rs b/crates/ruff_macros/src/newtype_index.rs new file mode 100644 index 0000000000000..f6524b48a9e3d --- /dev/null +++ b/crates/ruff_macros/src/newtype_index.rs @@ -0,0 +1,139 @@ +use quote::quote; +use syn::spanned::Spanned; +use syn::{Error, ItemStruct}; + +pub(super) fn generate_newtype_index(item: ItemStruct) -> syn::Result { + if !item.fields.is_empty() { + return Err(Error::new( + item.span(), + "A new type index cannot have any fields.", + )); + } + + if !item.generics.params.is_empty() { + return Err(Error::new( + item.span(), + "A new type index cannot be generic.", + )); + } + + let ItemStruct { + attrs, + vis, + struct_token, + ident, + generics: _, + fields: _, + semi_token, + } = item; + + let debug_name = ident.to_string(); + + let semi_token = semi_token.unwrap_or_default(); + let output = quote! { + #(#attrs)* + #[derive(Copy, Clone, Eq, PartialEq, Hash)] + #vis #struct_token #ident(std::num::NonZeroU32)#semi_token + + impl #ident { + const MAX: u32 = u32::MAX - 1; + + #vis const fn from_usize(value: usize) -> Self { + assert!(value <= Self::MAX as usize); + + // SAFETY: + // * The `value < u32::MAX` guarantees that the add doesn't overflow. + // * The `+ 1` guarantees that the index is not zero + #[allow(unsafe_code)] + Self(unsafe { std::num::NonZeroU32::new_unchecked((value as u32) + 1) }) + } + + #vis const fn from_u32(value: u32) -> Self { + assert!(value <= Self::MAX); + + // SAFETY: + // * The `value < u32::MAX` guarantees that the add doesn't overflow. + // * The `+ 1` guarantees that the index is larger than zero. + #[allow(unsafe_code)] + Self(unsafe { std::num::NonZeroU32::new_unchecked(value + 1) }) + } + + /// Returns the index as a `u32` value + #[inline] + #vis const fn as_u32(self) -> u32 { + self.0.get() - 1 + } + + /// Returns the index as a `u32` value + #[inline] + #vis const fn as_usize(self) -> usize { + self.as_u32() as usize + } + + #[inline] + #vis const fn index(self) -> usize { + self.as_usize() + } + } + + impl std::ops::Add for #ident { + type Output = #ident; + + fn add(self, rhs: usize) -> Self::Output { + #ident::from_usize(self.index() + rhs) + } + } + + impl std::ops::Add for #ident { + type Output = #ident; + + fn add(self, rhs: Self) -> Self::Output { + #ident::from_usize(self.index() + rhs.index()) + } + } + + impl std::fmt::Debug for #ident { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple(#debug_name).field(&self.index()).finish() + } + } + + impl ruff_index::Idx for #ident { + #[inline] + fn new(value: usize) -> Self { + #ident::from_usize(value) + } + + #[inline] + fn index(self) -> usize { + self.index() + } + } + + impl From for #ident { + fn from(value: usize) -> Self { + #ident::from_usize(value) + } + } + + impl From for #ident { + fn from(value: u32) -> Self { + #ident::from_u32(value) + } + } + + impl From<#ident> for usize { + fn from(value: #ident) -> Self { + value.as_usize() + } + } + + impl From<#ident> for u32 { + fn from(value: #ident) -> Self { + value.as_u32() + } + } + }; + + Ok(output) +} diff --git a/crates/ruff_python_ast/Cargo.toml b/crates/ruff_python_ast/Cargo.toml index c61b487ae2869..5b8da74706771 100644 --- a/crates/ruff_python_ast/Cargo.toml +++ b/crates/ruff_python_ast/Cargo.toml @@ -19,10 +19,10 @@ memchr = "2.5.0" num-bigint = { version = "0.4.3" } num-traits = { version = "0.2.15" } once_cell = { workspace = true } -regex = { workspace = true } rustc-hash = { workspace = true } rustpython-literal = { workspace = true } rustpython-parser = { workspace = true } +rustpython-ast = { workspace = true } serde = { workspace = true, optional = true } smallvec = { workspace = true } diff --git a/crates/ruff_python_ast/src/helpers.rs b/crates/ruff_python_ast/src/helpers.rs index d064c8f15fb7c..352023a337425 100644 --- a/crates/ruff_python_ast/src/helpers.rs +++ b/crates/ruff_python_ast/src/helpers.rs @@ -4,8 +4,6 @@ use std::path::Path; use itertools::Itertools; use log::error; use num_traits::Zero; -use once_cell::sync::Lazy; -use regex::Regex; use ruff_text_size::{TextRange, TextSize}; use rustc_hash::{FxHashMap, FxHashSet}; use rustpython_parser::ast::{ @@ -542,7 +540,9 @@ where body.iter().any(|stmt| any_over_stmt(stmt, func)) } -static DUNDER_REGEX: Lazy = Lazy::new(|| Regex::new(r"__[^\s]+__").unwrap()); +fn is_dunder(id: &str) -> bool { + id.starts_with("__") && id.ends_with("__") +} /// Return `true` if the [`Stmt`] is an assignment to a dunder (like `__all__`). pub fn is_assignment_to_a_dunder(stmt: &Stmt) -> bool { @@ -553,15 +553,19 @@ pub fn is_assignment_to_a_dunder(stmt: &Stmt) -> bool { if targets.len() != 1 { return false; } - match &targets[0] { - Expr::Name(ast::ExprName { id, .. }) => DUNDER_REGEX.is_match(id.as_str()), - _ => false, + if let Expr::Name(ast::ExprName { id, .. }) = &targets[0] { + is_dunder(id) + } else { + false + } + } + Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => { + if let Expr::Name(ast::ExprName { id, .. }) = target.as_ref() { + is_dunder(id) + } else { + false } } - Stmt::AnnAssign(ast::StmtAnnAssign { target, .. }) => match target.as_ref() { - Expr::Name(ast::ExprName { id, .. }) => DUNDER_REGEX.is_match(id.as_str()), - _ => false, - }, _ => false, } } @@ -1086,21 +1090,40 @@ pub fn match_parens(start: TextSize, locator: &Locator) -> Option { /// Specifically, this method returns the range of a function or class name, /// rather than that of the entire function or class body. pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> TextRange { - if matches!( - stmt, - Stmt::ClassDef(_) | Stmt::FunctionDef(_) | Stmt::AsyncFunctionDef(_) - ) { - let contents = &locator.contents()[stmt.range()]; - - for (tok, range) in lexer::lex_starts_at(contents, Mode::Module, stmt.start()).flatten() { - if matches!(tok, Tok::Name { .. }) { - return range; - } + match stmt { + Stmt::ClassDef(ast::StmtClassDef { + decorator_list, + range, + .. + }) + | Stmt::FunctionDef(ast::StmtFunctionDef { + decorator_list, + range, + .. + }) + | Stmt::AsyncFunctionDef(ast::StmtAsyncFunctionDef { + decorator_list, + range, + .. + }) => { + let header_range = decorator_list.last().map_or(*range, |last_decorator| { + TextRange::new(last_decorator.end(), range.end()) + }); + + let contents = locator.slice(header_range); + + let mut tokens = + lexer::lex_starts_at(contents, Mode::Module, header_range.start()).flatten(); + tokens + .find_map(|(t, range)| t.is_name().then_some(range)) + .unwrap_or_else(|| { + error!("Failed to find identifier for {:?}", stmt); + + header_range + }) } - error!("Failed to find identifier for {:?}", stmt); + _ => stmt.range(), } - - stmt.range() } /// Return the ranges of [`Tok::Name`] tokens within a specified node. diff --git a/crates/ruff_python_ast/src/imports.rs b/crates/ruff_python_ast/src/imports.rs index 3b92b8c06748d..b9d24d74dc834 100644 --- a/crates/ruff_python_ast/src/imports.rs +++ b/crates/ruff_python_ast/src/imports.rs @@ -1,6 +1,5 @@ use ruff_text_size::TextRange; use rustc_hash::FxHashMap; - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/crates/ruff_python_ast/src/lib.rs b/crates/ruff_python_ast/src/lib.rs index 1e5dab1cf8c2e..632de88a9e810 100644 --- a/crates/ruff_python_ast/src/lib.rs +++ b/crates/ruff_python_ast/src/lib.rs @@ -6,6 +6,8 @@ pub mod hashable; pub mod helpers; pub mod imports; pub mod newlines; +pub mod node; +pub mod prelude; pub mod relocate; pub mod source_code; pub mod statement_visitor; diff --git a/crates/ruff_python_ast/src/newlines.rs b/crates/ruff_python_ast/src/newlines.rs index 5e3b97f85a435..bba0343d0aa09 100644 --- a/crates/ruff_python_ast/src/newlines.rs +++ b/crates/ruff_python_ast/src/newlines.rs @@ -1,8 +1,9 @@ -use memchr::{memchr2, memrchr2}; -use ruff_text_size::{TextLen, TextRange, TextSize}; use std::iter::FusedIterator; use std::ops::Deref; +use memchr::{memchr2, memrchr2}; +use ruff_text_size::{TextLen, TextRange, TextSize}; + /// Extension trait for [`str`] that provides a [`UniversalNewlineIterator`]. pub trait StrExt { fn universal_newlines(&self) -> UniversalNewlineIterator<'_>; @@ -337,10 +338,12 @@ impl Deref for LineEnding { #[cfg(test)] mod tests { - use super::UniversalNewlineIterator; - use crate::newlines::Line; use ruff_text_size::TextSize; + use crate::newlines::Line; + + use super::UniversalNewlineIterator; + #[test] fn universal_newlines_empty_str() { let lines: Vec<_> = UniversalNewlineIterator::from("").collect(); diff --git a/crates/ruff_python_ast/src/node.rs b/crates/ruff_python_ast/src/node.rs new file mode 100644 index 0000000000000..7996b2b7cd164 --- /dev/null +++ b/crates/ruff_python_ast/src/node.rs @@ -0,0 +1,3678 @@ +use crate::prelude::*; +use ruff_text_size::TextRange; + +pub trait AstNode: Ranged { + fn cast(kind: AnyNode) -> Option + where + Self: Sized; + fn cast_ref(kind: AnyNodeRef) -> Option<&Self>; +} + +#[derive(Clone, Debug, is_macro::Is, PartialEq)] +pub enum AnyNode { + ModModule(ModModule), + ModInteractive(ModInteractive), + ModExpression(ModExpression), + ModFunctionType(ModFunctionType), + StmtFunctionDef(StmtFunctionDef), + StmtAsyncFunctionDef(StmtAsyncFunctionDef), + StmtClassDef(StmtClassDef), + StmtReturn(StmtReturn), + StmtDelete(StmtDelete), + StmtAssign(StmtAssign), + StmtAugAssign(StmtAugAssign), + StmtAnnAssign(StmtAnnAssign), + StmtFor(StmtFor), + StmtAsyncFor(StmtAsyncFor), + StmtWhile(StmtWhile), + StmtIf(StmtIf), + StmtWith(StmtWith), + StmtAsyncWith(StmtAsyncWith), + StmtMatch(StmtMatch), + StmtRaise(StmtRaise), + StmtTry(StmtTry), + StmtTryStar(StmtTryStar), + StmtAssert(StmtAssert), + StmtImport(StmtImport), + StmtImportFrom(StmtImportFrom), + StmtGlobal(StmtGlobal), + StmtNonlocal(StmtNonlocal), + StmtExpr(StmtExpr), + StmtPass(StmtPass), + StmtBreak(StmtBreak), + StmtContinue(StmtContinue), + ExprBoolOp(ExprBoolOp), + ExprNamedExpr(ExprNamedExpr), + ExprBinOp(ExprBinOp), + ExprUnaryOp(ExprUnaryOp), + ExprLambda(ExprLambda), + ExprIfExp(ExprIfExp), + ExprDict(ExprDict), + ExprSet(ExprSet), + ExprListComp(ExprListComp), + ExprSetComp(ExprSetComp), + ExprDictComp(ExprDictComp), + ExprGeneratorExp(ExprGeneratorExp), + ExprAwait(ExprAwait), + ExprYield(ExprYield), + ExprYieldFrom(ExprYieldFrom), + ExprCompare(ExprCompare), + ExprCall(ExprCall), + ExprFormattedValue(ExprFormattedValue), + ExprJoinedStr(ExprJoinedStr), + ExprConstant(ExprConstant), + ExprAttribute(ExprAttribute), + ExprSubscript(ExprSubscript), + ExprStarred(ExprStarred), + ExprName(ExprName), + ExprList(ExprList), + ExprTuple(ExprTuple), + ExprSlice(ExprSlice), + ExcepthandlerExceptHandler(ExcepthandlerExceptHandler), + PatternMatchValue(PatternMatchValue), + PatternMatchSingleton(PatternMatchSingleton), + PatternMatchSequence(PatternMatchSequence), + PatternMatchMapping(PatternMatchMapping), + PatternMatchClass(PatternMatchClass), + PatternMatchStar(PatternMatchStar), + PatternMatchAs(PatternMatchAs), + PatternMatchOr(PatternMatchOr), + TypeIgnoreTypeIgnore(TypeIgnoreTypeIgnore), + Comprehension(Comprehension), + Arguments(Arguments), + Arg(Arg), + Keyword(Keyword), + Alias(Alias), + Withitem(Withitem), + MatchCase(MatchCase), +} + +impl AnyNode { + pub fn statement(self) -> Option { + match self { + AnyNode::StmtFunctionDef(node) => Some(Stmt::FunctionDef(node)), + AnyNode::StmtAsyncFunctionDef(node) => Some(Stmt::AsyncFunctionDef(node)), + AnyNode::StmtClassDef(node) => Some(Stmt::ClassDef(node)), + AnyNode::StmtReturn(node) => Some(Stmt::Return(node)), + AnyNode::StmtDelete(node) => Some(Stmt::Delete(node)), + AnyNode::StmtAssign(node) => Some(Stmt::Assign(node)), + AnyNode::StmtAugAssign(node) => Some(Stmt::AugAssign(node)), + AnyNode::StmtAnnAssign(node) => Some(Stmt::AnnAssign(node)), + AnyNode::StmtFor(node) => Some(Stmt::For(node)), + AnyNode::StmtAsyncFor(node) => Some(Stmt::AsyncFor(node)), + AnyNode::StmtWhile(node) => Some(Stmt::While(node)), + AnyNode::StmtIf(node) => Some(Stmt::If(node)), + AnyNode::StmtWith(node) => Some(Stmt::With(node)), + AnyNode::StmtAsyncWith(node) => Some(Stmt::AsyncWith(node)), + AnyNode::StmtMatch(node) => Some(Stmt::Match(node)), + AnyNode::StmtRaise(node) => Some(Stmt::Raise(node)), + AnyNode::StmtTry(node) => Some(Stmt::Try(node)), + AnyNode::StmtTryStar(node) => Some(Stmt::TryStar(node)), + AnyNode::StmtAssert(node) => Some(Stmt::Assert(node)), + AnyNode::StmtImport(node) => Some(Stmt::Import(node)), + AnyNode::StmtImportFrom(node) => Some(Stmt::ImportFrom(node)), + AnyNode::StmtGlobal(node) => Some(Stmt::Global(node)), + AnyNode::StmtNonlocal(node) => Some(Stmt::Nonlocal(node)), + AnyNode::StmtExpr(node) => Some(Stmt::Expr(node)), + AnyNode::StmtPass(node) => Some(Stmt::Pass(node)), + AnyNode::StmtBreak(node) => Some(Stmt::Break(node)), + AnyNode::StmtContinue(node) => Some(Stmt::Continue(node)), + + AnyNode::ModModule(_) + | AnyNode::ModInteractive(_) + | AnyNode::ModExpression(_) + | AnyNode::ModFunctionType(_) + | AnyNode::ExprBoolOp(_) + | AnyNode::ExprNamedExpr(_) + | AnyNode::ExprBinOp(_) + | AnyNode::ExprUnaryOp(_) + | AnyNode::ExprLambda(_) + | AnyNode::ExprIfExp(_) + | AnyNode::ExprDict(_) + | AnyNode::ExprSet(_) + | AnyNode::ExprListComp(_) + | AnyNode::ExprSetComp(_) + | AnyNode::ExprDictComp(_) + | AnyNode::ExprGeneratorExp(_) + | AnyNode::ExprAwait(_) + | AnyNode::ExprYield(_) + | AnyNode::ExprYieldFrom(_) + | AnyNode::ExprCompare(_) + | AnyNode::ExprCall(_) + | AnyNode::ExprFormattedValue(_) + | AnyNode::ExprJoinedStr(_) + | AnyNode::ExprConstant(_) + | AnyNode::ExprAttribute(_) + | AnyNode::ExprSubscript(_) + | AnyNode::ExprStarred(_) + | AnyNode::ExprName(_) + | AnyNode::ExprList(_) + | AnyNode::ExprTuple(_) + | AnyNode::ExprSlice(_) + | AnyNode::ExcepthandlerExceptHandler(_) + | AnyNode::PatternMatchValue(_) + | AnyNode::PatternMatchSingleton(_) + | AnyNode::PatternMatchSequence(_) + | AnyNode::PatternMatchMapping(_) + | AnyNode::PatternMatchClass(_) + | AnyNode::PatternMatchStar(_) + | AnyNode::PatternMatchAs(_) + | AnyNode::PatternMatchOr(_) + | AnyNode::TypeIgnoreTypeIgnore(_) + | AnyNode::Comprehension(_) + | AnyNode::Arguments(_) + | AnyNode::Arg(_) + | AnyNode::Keyword(_) + | AnyNode::Alias(_) + | AnyNode::Withitem(_) + | AnyNode::MatchCase(_) => None, + } + } + + pub fn expression(self) -> Option { + match self { + AnyNode::ExprBoolOp(node) => Some(Expr::BoolOp(node)), + AnyNode::ExprNamedExpr(node) => Some(Expr::NamedExpr(node)), + AnyNode::ExprBinOp(node) => Some(Expr::BinOp(node)), + AnyNode::ExprUnaryOp(node) => Some(Expr::UnaryOp(node)), + AnyNode::ExprLambda(node) => Some(Expr::Lambda(node)), + AnyNode::ExprIfExp(node) => Some(Expr::IfExp(node)), + AnyNode::ExprDict(node) => Some(Expr::Dict(node)), + AnyNode::ExprSet(node) => Some(Expr::Set(node)), + AnyNode::ExprListComp(node) => Some(Expr::ListComp(node)), + AnyNode::ExprSetComp(node) => Some(Expr::SetComp(node)), + AnyNode::ExprDictComp(node) => Some(Expr::DictComp(node)), + AnyNode::ExprGeneratorExp(node) => Some(Expr::GeneratorExp(node)), + AnyNode::ExprAwait(node) => Some(Expr::Await(node)), + AnyNode::ExprYield(node) => Some(Expr::Yield(node)), + AnyNode::ExprYieldFrom(node) => Some(Expr::YieldFrom(node)), + AnyNode::ExprCompare(node) => Some(Expr::Compare(node)), + AnyNode::ExprCall(node) => Some(Expr::Call(node)), + AnyNode::ExprFormattedValue(node) => Some(Expr::FormattedValue(node)), + AnyNode::ExprJoinedStr(node) => Some(Expr::JoinedStr(node)), + AnyNode::ExprConstant(node) => Some(Expr::Constant(node)), + AnyNode::ExprAttribute(node) => Some(Expr::Attribute(node)), + AnyNode::ExprSubscript(node) => Some(Expr::Subscript(node)), + AnyNode::ExprStarred(node) => Some(Expr::Starred(node)), + AnyNode::ExprName(node) => Some(Expr::Name(node)), + AnyNode::ExprList(node) => Some(Expr::List(node)), + AnyNode::ExprTuple(node) => Some(Expr::Tuple(node)), + AnyNode::ExprSlice(node) => Some(Expr::Slice(node)), + + AnyNode::ModModule(_) + | AnyNode::ModInteractive(_) + | AnyNode::ModExpression(_) + | AnyNode::ModFunctionType(_) + | AnyNode::StmtFunctionDef(_) + | AnyNode::StmtAsyncFunctionDef(_) + | AnyNode::StmtClassDef(_) + | AnyNode::StmtReturn(_) + | AnyNode::StmtDelete(_) + | AnyNode::StmtAssign(_) + | AnyNode::StmtAugAssign(_) + | AnyNode::StmtAnnAssign(_) + | AnyNode::StmtFor(_) + | AnyNode::StmtAsyncFor(_) + | AnyNode::StmtWhile(_) + | AnyNode::StmtIf(_) + | AnyNode::StmtWith(_) + | AnyNode::StmtAsyncWith(_) + | AnyNode::StmtMatch(_) + | AnyNode::StmtRaise(_) + | AnyNode::StmtTry(_) + | AnyNode::StmtTryStar(_) + | AnyNode::StmtAssert(_) + | AnyNode::StmtImport(_) + | AnyNode::StmtImportFrom(_) + | AnyNode::StmtGlobal(_) + | AnyNode::StmtNonlocal(_) + | AnyNode::StmtExpr(_) + | AnyNode::StmtPass(_) + | AnyNode::StmtBreak(_) + | AnyNode::StmtContinue(_) + | AnyNode::ExcepthandlerExceptHandler(_) + | AnyNode::PatternMatchValue(_) + | AnyNode::PatternMatchSingleton(_) + | AnyNode::PatternMatchSequence(_) + | AnyNode::PatternMatchMapping(_) + | AnyNode::PatternMatchClass(_) + | AnyNode::PatternMatchStar(_) + | AnyNode::PatternMatchAs(_) + | AnyNode::PatternMatchOr(_) + | AnyNode::TypeIgnoreTypeIgnore(_) + | AnyNode::Comprehension(_) + | AnyNode::Arguments(_) + | AnyNode::Arg(_) + | AnyNode::Keyword(_) + | AnyNode::Alias(_) + | AnyNode::Withitem(_) + | AnyNode::MatchCase(_) => None, + } + } + + pub fn module(self) -> Option { + match self { + AnyNode::ModModule(node) => Some(Mod::Module(node)), + AnyNode::ModInteractive(node) => Some(Mod::Interactive(node)), + AnyNode::ModExpression(node) => Some(Mod::Expression(node)), + AnyNode::ModFunctionType(node) => Some(Mod::FunctionType(node)), + + AnyNode::StmtFunctionDef(_) + | AnyNode::StmtAsyncFunctionDef(_) + | AnyNode::StmtClassDef(_) + | AnyNode::StmtReturn(_) + | AnyNode::StmtDelete(_) + | AnyNode::StmtAssign(_) + | AnyNode::StmtAugAssign(_) + | AnyNode::StmtAnnAssign(_) + | AnyNode::StmtFor(_) + | AnyNode::StmtAsyncFor(_) + | AnyNode::StmtWhile(_) + | AnyNode::StmtIf(_) + | AnyNode::StmtWith(_) + | AnyNode::StmtAsyncWith(_) + | AnyNode::StmtMatch(_) + | AnyNode::StmtRaise(_) + | AnyNode::StmtTry(_) + | AnyNode::StmtTryStar(_) + | AnyNode::StmtAssert(_) + | AnyNode::StmtImport(_) + | AnyNode::StmtImportFrom(_) + | AnyNode::StmtGlobal(_) + | AnyNode::StmtNonlocal(_) + | AnyNode::StmtExpr(_) + | AnyNode::StmtPass(_) + | AnyNode::StmtBreak(_) + | AnyNode::StmtContinue(_) + | AnyNode::ExprBoolOp(_) + | AnyNode::ExprNamedExpr(_) + | AnyNode::ExprBinOp(_) + | AnyNode::ExprUnaryOp(_) + | AnyNode::ExprLambda(_) + | AnyNode::ExprIfExp(_) + | AnyNode::ExprDict(_) + | AnyNode::ExprSet(_) + | AnyNode::ExprListComp(_) + | AnyNode::ExprSetComp(_) + | AnyNode::ExprDictComp(_) + | AnyNode::ExprGeneratorExp(_) + | AnyNode::ExprAwait(_) + | AnyNode::ExprYield(_) + | AnyNode::ExprYieldFrom(_) + | AnyNode::ExprCompare(_) + | AnyNode::ExprCall(_) + | AnyNode::ExprFormattedValue(_) + | AnyNode::ExprJoinedStr(_) + | AnyNode::ExprConstant(_) + | AnyNode::ExprAttribute(_) + | AnyNode::ExprSubscript(_) + | AnyNode::ExprStarred(_) + | AnyNode::ExprName(_) + | AnyNode::ExprList(_) + | AnyNode::ExprTuple(_) + | AnyNode::ExprSlice(_) + | AnyNode::ExcepthandlerExceptHandler(_) + | AnyNode::PatternMatchValue(_) + | AnyNode::PatternMatchSingleton(_) + | AnyNode::PatternMatchSequence(_) + | AnyNode::PatternMatchMapping(_) + | AnyNode::PatternMatchClass(_) + | AnyNode::PatternMatchStar(_) + | AnyNode::PatternMatchAs(_) + | AnyNode::PatternMatchOr(_) + | AnyNode::TypeIgnoreTypeIgnore(_) + | AnyNode::Comprehension(_) + | AnyNode::Arguments(_) + | AnyNode::Arg(_) + | AnyNode::Keyword(_) + | AnyNode::Alias(_) + | AnyNode::Withitem(_) + | AnyNode::MatchCase(_) => None, + } + } + + pub fn pattern(self) -> Option { + match self { + AnyNode::PatternMatchValue(node) => Some(Pattern::MatchValue(node)), + AnyNode::PatternMatchSingleton(node) => Some(Pattern::MatchSingleton(node)), + AnyNode::PatternMatchSequence(node) => Some(Pattern::MatchSequence(node)), + AnyNode::PatternMatchMapping(node) => Some(Pattern::MatchMapping(node)), + AnyNode::PatternMatchClass(node) => Some(Pattern::MatchClass(node)), + AnyNode::PatternMatchStar(node) => Some(Pattern::MatchStar(node)), + AnyNode::PatternMatchAs(node) => Some(Pattern::MatchAs(node)), + AnyNode::PatternMatchOr(node) => Some(Pattern::MatchOr(node)), + + AnyNode::ModModule(_) + | AnyNode::ModInteractive(_) + | AnyNode::ModExpression(_) + | AnyNode::ModFunctionType(_) + | AnyNode::StmtFunctionDef(_) + | AnyNode::StmtAsyncFunctionDef(_) + | AnyNode::StmtClassDef(_) + | AnyNode::StmtReturn(_) + | AnyNode::StmtDelete(_) + | AnyNode::StmtAssign(_) + | AnyNode::StmtAugAssign(_) + | AnyNode::StmtAnnAssign(_) + | AnyNode::StmtFor(_) + | AnyNode::StmtAsyncFor(_) + | AnyNode::StmtWhile(_) + | AnyNode::StmtIf(_) + | AnyNode::StmtWith(_) + | AnyNode::StmtAsyncWith(_) + | AnyNode::StmtMatch(_) + | AnyNode::StmtRaise(_) + | AnyNode::StmtTry(_) + | AnyNode::StmtTryStar(_) + | AnyNode::StmtAssert(_) + | AnyNode::StmtImport(_) + | AnyNode::StmtImportFrom(_) + | AnyNode::StmtGlobal(_) + | AnyNode::StmtNonlocal(_) + | AnyNode::StmtExpr(_) + | AnyNode::StmtPass(_) + | AnyNode::StmtBreak(_) + | AnyNode::StmtContinue(_) + | AnyNode::ExprBoolOp(_) + | AnyNode::ExprNamedExpr(_) + | AnyNode::ExprBinOp(_) + | AnyNode::ExprUnaryOp(_) + | AnyNode::ExprLambda(_) + | AnyNode::ExprIfExp(_) + | AnyNode::ExprDict(_) + | AnyNode::ExprSet(_) + | AnyNode::ExprListComp(_) + | AnyNode::ExprSetComp(_) + | AnyNode::ExprDictComp(_) + | AnyNode::ExprGeneratorExp(_) + | AnyNode::ExprAwait(_) + | AnyNode::ExprYield(_) + | AnyNode::ExprYieldFrom(_) + | AnyNode::ExprCompare(_) + | AnyNode::ExprCall(_) + | AnyNode::ExprFormattedValue(_) + | AnyNode::ExprJoinedStr(_) + | AnyNode::ExprConstant(_) + | AnyNode::ExprAttribute(_) + | AnyNode::ExprSubscript(_) + | AnyNode::ExprStarred(_) + | AnyNode::ExprName(_) + | AnyNode::ExprList(_) + | AnyNode::ExprTuple(_) + | AnyNode::ExprSlice(_) + | AnyNode::ExcepthandlerExceptHandler(_) + | AnyNode::TypeIgnoreTypeIgnore(_) + | AnyNode::Comprehension(_) + | AnyNode::Arguments(_) + | AnyNode::Arg(_) + | AnyNode::Keyword(_) + | AnyNode::Alias(_) + | AnyNode::Withitem(_) + | AnyNode::MatchCase(_) => None, + } + } + + pub fn except_handler(self) -> Option { + match self { + AnyNode::ExcepthandlerExceptHandler(node) => Some(Excepthandler::ExceptHandler(node)), + + AnyNode::ModModule(_) + | AnyNode::ModInteractive(_) + | AnyNode::ModExpression(_) + | AnyNode::ModFunctionType(_) + | AnyNode::StmtFunctionDef(_) + | AnyNode::StmtAsyncFunctionDef(_) + | AnyNode::StmtClassDef(_) + | AnyNode::StmtReturn(_) + | AnyNode::StmtDelete(_) + | AnyNode::StmtAssign(_) + | AnyNode::StmtAugAssign(_) + | AnyNode::StmtAnnAssign(_) + | AnyNode::StmtFor(_) + | AnyNode::StmtAsyncFor(_) + | AnyNode::StmtWhile(_) + | AnyNode::StmtIf(_) + | AnyNode::StmtWith(_) + | AnyNode::StmtAsyncWith(_) + | AnyNode::StmtMatch(_) + | AnyNode::StmtRaise(_) + | AnyNode::StmtTry(_) + | AnyNode::StmtTryStar(_) + | AnyNode::StmtAssert(_) + | AnyNode::StmtImport(_) + | AnyNode::StmtImportFrom(_) + | AnyNode::StmtGlobal(_) + | AnyNode::StmtNonlocal(_) + | AnyNode::StmtExpr(_) + | AnyNode::StmtPass(_) + | AnyNode::StmtBreak(_) + | AnyNode::StmtContinue(_) + | AnyNode::ExprBoolOp(_) + | AnyNode::ExprNamedExpr(_) + | AnyNode::ExprBinOp(_) + | AnyNode::ExprUnaryOp(_) + | AnyNode::ExprLambda(_) + | AnyNode::ExprIfExp(_) + | AnyNode::ExprDict(_) + | AnyNode::ExprSet(_) + | AnyNode::ExprListComp(_) + | AnyNode::ExprSetComp(_) + | AnyNode::ExprDictComp(_) + | AnyNode::ExprGeneratorExp(_) + | AnyNode::ExprAwait(_) + | AnyNode::ExprYield(_) + | AnyNode::ExprYieldFrom(_) + | AnyNode::ExprCompare(_) + | AnyNode::ExprCall(_) + | AnyNode::ExprFormattedValue(_) + | AnyNode::ExprJoinedStr(_) + | AnyNode::ExprConstant(_) + | AnyNode::ExprAttribute(_) + | AnyNode::ExprSubscript(_) + | AnyNode::ExprStarred(_) + | AnyNode::ExprName(_) + | AnyNode::ExprList(_) + | AnyNode::ExprTuple(_) + | AnyNode::ExprSlice(_) + | AnyNode::PatternMatchValue(_) + | AnyNode::PatternMatchSingleton(_) + | AnyNode::PatternMatchSequence(_) + | AnyNode::PatternMatchMapping(_) + | AnyNode::PatternMatchClass(_) + | AnyNode::PatternMatchStar(_) + | AnyNode::PatternMatchAs(_) + | AnyNode::PatternMatchOr(_) + | AnyNode::TypeIgnoreTypeIgnore(_) + | AnyNode::Comprehension(_) + | AnyNode::Arguments(_) + | AnyNode::Arg(_) + | AnyNode::Keyword(_) + | AnyNode::Alias(_) + | AnyNode::Withitem(_) + | AnyNode::MatchCase(_) => None, + } + } + + pub fn type_ignore(self) -> Option { + match self { + AnyNode::TypeIgnoreTypeIgnore(node) => Some(TypeIgnore::TypeIgnore(node)), + + AnyNode::ModModule(_) + | AnyNode::ModInteractive(_) + | AnyNode::ModExpression(_) + | AnyNode::ModFunctionType(_) + | AnyNode::StmtFunctionDef(_) + | AnyNode::StmtAsyncFunctionDef(_) + | AnyNode::StmtClassDef(_) + | AnyNode::StmtReturn(_) + | AnyNode::StmtDelete(_) + | AnyNode::StmtAssign(_) + | AnyNode::StmtAugAssign(_) + | AnyNode::StmtAnnAssign(_) + | AnyNode::StmtFor(_) + | AnyNode::StmtAsyncFor(_) + | AnyNode::StmtWhile(_) + | AnyNode::StmtIf(_) + | AnyNode::StmtWith(_) + | AnyNode::StmtAsyncWith(_) + | AnyNode::StmtMatch(_) + | AnyNode::StmtRaise(_) + | AnyNode::StmtTry(_) + | AnyNode::StmtTryStar(_) + | AnyNode::StmtAssert(_) + | AnyNode::StmtImport(_) + | AnyNode::StmtImportFrom(_) + | AnyNode::StmtGlobal(_) + | AnyNode::StmtNonlocal(_) + | AnyNode::StmtExpr(_) + | AnyNode::StmtPass(_) + | AnyNode::StmtBreak(_) + | AnyNode::StmtContinue(_) + | AnyNode::ExprBoolOp(_) + | AnyNode::ExprNamedExpr(_) + | AnyNode::ExprBinOp(_) + | AnyNode::ExprUnaryOp(_) + | AnyNode::ExprLambda(_) + | AnyNode::ExprIfExp(_) + | AnyNode::ExprDict(_) + | AnyNode::ExprSet(_) + | AnyNode::ExprListComp(_) + | AnyNode::ExprSetComp(_) + | AnyNode::ExprDictComp(_) + | AnyNode::ExprGeneratorExp(_) + | AnyNode::ExprAwait(_) + | AnyNode::ExprYield(_) + | AnyNode::ExprYieldFrom(_) + | AnyNode::ExprCompare(_) + | AnyNode::ExprCall(_) + | AnyNode::ExprFormattedValue(_) + | AnyNode::ExprJoinedStr(_) + | AnyNode::ExprConstant(_) + | AnyNode::ExprAttribute(_) + | AnyNode::ExprSubscript(_) + | AnyNode::ExprStarred(_) + | AnyNode::ExprName(_) + | AnyNode::ExprList(_) + | AnyNode::ExprTuple(_) + | AnyNode::ExprSlice(_) + | AnyNode::PatternMatchValue(_) + | AnyNode::PatternMatchSingleton(_) + | AnyNode::PatternMatchSequence(_) + | AnyNode::PatternMatchMapping(_) + | AnyNode::PatternMatchClass(_) + | AnyNode::PatternMatchStar(_) + | AnyNode::PatternMatchAs(_) + | AnyNode::PatternMatchOr(_) + | AnyNode::ExcepthandlerExceptHandler(_) + | AnyNode::Comprehension(_) + | AnyNode::Arguments(_) + | AnyNode::Arg(_) + | AnyNode::Keyword(_) + | AnyNode::Alias(_) + | AnyNode::Withitem(_) + | AnyNode::MatchCase(_) => None, + } + } + + pub const fn as_ref(&self) -> AnyNodeRef { + match self { + Self::ModModule(node) => AnyNodeRef::ModModule(node), + Self::ModInteractive(node) => AnyNodeRef::ModInteractive(node), + Self::ModExpression(node) => AnyNodeRef::ModExpression(node), + Self::ModFunctionType(node) => AnyNodeRef::ModFunctionType(node), + Self::StmtFunctionDef(node) => AnyNodeRef::StmtFunctionDef(node), + Self::StmtAsyncFunctionDef(node) => AnyNodeRef::StmtAsyncFunctionDef(node), + Self::StmtClassDef(node) => AnyNodeRef::StmtClassDef(node), + Self::StmtReturn(node) => AnyNodeRef::StmtReturn(node), + Self::StmtDelete(node) => AnyNodeRef::StmtDelete(node), + Self::StmtAssign(node) => AnyNodeRef::StmtAssign(node), + Self::StmtAugAssign(node) => AnyNodeRef::StmtAugAssign(node), + Self::StmtAnnAssign(node) => AnyNodeRef::StmtAnnAssign(node), + Self::StmtFor(node) => AnyNodeRef::StmtFor(node), + Self::StmtAsyncFor(node) => AnyNodeRef::StmtAsyncFor(node), + Self::StmtWhile(node) => AnyNodeRef::StmtWhile(node), + Self::StmtIf(node) => AnyNodeRef::StmtIf(node), + Self::StmtWith(node) => AnyNodeRef::StmtWith(node), + Self::StmtAsyncWith(node) => AnyNodeRef::StmtAsyncWith(node), + Self::StmtMatch(node) => AnyNodeRef::StmtMatch(node), + Self::StmtRaise(node) => AnyNodeRef::StmtRaise(node), + Self::StmtTry(node) => AnyNodeRef::StmtTry(node), + Self::StmtTryStar(node) => AnyNodeRef::StmtTryStar(node), + Self::StmtAssert(node) => AnyNodeRef::StmtAssert(node), + Self::StmtImport(node) => AnyNodeRef::StmtImport(node), + Self::StmtImportFrom(node) => AnyNodeRef::StmtImportFrom(node), + Self::StmtGlobal(node) => AnyNodeRef::StmtGlobal(node), + Self::StmtNonlocal(node) => AnyNodeRef::StmtNonlocal(node), + Self::StmtExpr(node) => AnyNodeRef::StmtExpr(node), + Self::StmtPass(node) => AnyNodeRef::StmtPass(node), + Self::StmtBreak(node) => AnyNodeRef::StmtBreak(node), + Self::StmtContinue(node) => AnyNodeRef::StmtContinue(node), + Self::ExprBoolOp(node) => AnyNodeRef::ExprBoolOp(node), + Self::ExprNamedExpr(node) => AnyNodeRef::ExprNamedExpr(node), + Self::ExprBinOp(node) => AnyNodeRef::ExprBinOp(node), + Self::ExprUnaryOp(node) => AnyNodeRef::ExprUnaryOp(node), + Self::ExprLambda(node) => AnyNodeRef::ExprLambda(node), + Self::ExprIfExp(node) => AnyNodeRef::ExprIfExp(node), + Self::ExprDict(node) => AnyNodeRef::ExprDict(node), + Self::ExprSet(node) => AnyNodeRef::ExprSet(node), + Self::ExprListComp(node) => AnyNodeRef::ExprListComp(node), + Self::ExprSetComp(node) => AnyNodeRef::ExprSetComp(node), + Self::ExprDictComp(node) => AnyNodeRef::ExprDictComp(node), + Self::ExprGeneratorExp(node) => AnyNodeRef::ExprGeneratorExp(node), + Self::ExprAwait(node) => AnyNodeRef::ExprAwait(node), + Self::ExprYield(node) => AnyNodeRef::ExprYield(node), + Self::ExprYieldFrom(node) => AnyNodeRef::ExprYieldFrom(node), + Self::ExprCompare(node) => AnyNodeRef::ExprCompare(node), + Self::ExprCall(node) => AnyNodeRef::ExprCall(node), + Self::ExprFormattedValue(node) => AnyNodeRef::ExprFormattedValue(node), + Self::ExprJoinedStr(node) => AnyNodeRef::ExprJoinedStr(node), + Self::ExprConstant(node) => AnyNodeRef::ExprConstant(node), + Self::ExprAttribute(node) => AnyNodeRef::ExprAttribute(node), + Self::ExprSubscript(node) => AnyNodeRef::ExprSubscript(node), + Self::ExprStarred(node) => AnyNodeRef::ExprStarred(node), + Self::ExprName(node) => AnyNodeRef::ExprName(node), + Self::ExprList(node) => AnyNodeRef::ExprList(node), + Self::ExprTuple(node) => AnyNodeRef::ExprTuple(node), + Self::ExprSlice(node) => AnyNodeRef::ExprSlice(node), + Self::ExcepthandlerExceptHandler(node) => AnyNodeRef::ExcepthandlerExceptHandler(node), + Self::PatternMatchValue(node) => AnyNodeRef::PatternMatchValue(node), + Self::PatternMatchSingleton(node) => AnyNodeRef::PatternMatchSingleton(node), + Self::PatternMatchSequence(node) => AnyNodeRef::PatternMatchSequence(node), + Self::PatternMatchMapping(node) => AnyNodeRef::PatternMatchMapping(node), + Self::PatternMatchClass(node) => AnyNodeRef::PatternMatchClass(node), + Self::PatternMatchStar(node) => AnyNodeRef::PatternMatchStar(node), + Self::PatternMatchAs(node) => AnyNodeRef::PatternMatchAs(node), + Self::PatternMatchOr(node) => AnyNodeRef::PatternMatchOr(node), + Self::TypeIgnoreTypeIgnore(node) => AnyNodeRef::TypeIgnoreTypeIgnore(node), + Self::Comprehension(node) => AnyNodeRef::Comprehension(node), + Self::Arguments(node) => AnyNodeRef::Arguments(node), + Self::Arg(node) => AnyNodeRef::Arg(node), + Self::Keyword(node) => AnyNodeRef::Keyword(node), + Self::Alias(node) => AnyNodeRef::Alias(node), + Self::Withitem(node) => AnyNodeRef::Withitem(node), + Self::MatchCase(node) => AnyNodeRef::MatchCase(node), + } + } + + /// Returns the node's [`kind`](NodeKind) that has no data associated and is [`Copy`]. + pub const fn kind(&self) -> NodeKind { + self.as_ref().kind() + } +} + +impl AstNode for ModModule { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ModModule(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ModModule(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ModInteractive { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ModInteractive(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ModInteractive(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ModExpression { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ModExpression(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ModExpression(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ModFunctionType { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ModFunctionType(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ModFunctionType(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtFunctionDef { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtFunctionDef(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtFunctionDef(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAsyncFunctionDef { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAsyncFunctionDef(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAsyncFunctionDef(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtClassDef { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtClassDef(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtClassDef(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtReturn { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtReturn(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtReturn(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtDelete { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtDelete(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtDelete(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAssign { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAssign(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAssign(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAugAssign { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAugAssign(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAugAssign(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAnnAssign { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAnnAssign(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAnnAssign(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtFor { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtFor(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtFor(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAsyncFor { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAsyncFor(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAsyncFor(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtWhile { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtWhile(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtWhile(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtIf { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtIf(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtIf(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtWith { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtWith(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtWith(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAsyncWith { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAsyncWith(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAsyncWith(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtMatch { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtMatch(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtMatch(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtRaise { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtRaise(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtRaise(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtTry { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtTry(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtTry(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtTryStar { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtTryStar(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtTryStar(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtAssert { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtAssert(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtAssert(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtImport { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtImport(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtImport(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtImportFrom { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtImportFrom(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtImportFrom(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtGlobal { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtGlobal(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtGlobal(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtNonlocal { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtNonlocal(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtNonlocal(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtExpr { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtExpr(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtExpr(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtPass { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtPass(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtPass(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtBreak { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtBreak(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtBreak(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for StmtContinue { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::StmtContinue(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::StmtContinue(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprBoolOp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprBoolOp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprBoolOp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprNamedExpr { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprNamedExpr(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprNamedExpr(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprBinOp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprBinOp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprBinOp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprUnaryOp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprUnaryOp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprUnaryOp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprLambda { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprLambda(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprLambda(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprIfExp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprIfExp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprIfExp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprDict { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprDict(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprDict(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprSet { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprSet(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprSet(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprListComp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprListComp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprListComp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprSetComp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprSetComp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprSetComp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprDictComp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprDictComp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprDictComp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprGeneratorExp { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprGeneratorExp(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprGeneratorExp(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprAwait { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprAwait(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprAwait(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprYield { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprYield(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprYield(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprYieldFrom { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprYieldFrom(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprYieldFrom(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprCompare { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprCompare(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprCompare(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprCall { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprCall(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprCall(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprFormattedValue { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprFormattedValue(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprFormattedValue(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprJoinedStr { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprJoinedStr(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprJoinedStr(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprConstant { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprConstant(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprConstant(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprAttribute { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprAttribute(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprAttribute(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprSubscript { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprSubscript(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprSubscript(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprStarred { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprStarred(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprStarred(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprName { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprName(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprName(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprList { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprList(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprList(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprTuple { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprTuple(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprTuple(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExprSlice { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExprSlice(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExprSlice(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for ExcepthandlerExceptHandler { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::ExcepthandlerExceptHandler(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::ExcepthandlerExceptHandler(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchValue { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchValue(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchValue(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchSingleton { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchSingleton(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchSingleton(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchSequence { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchSequence(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchSequence(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchMapping { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchMapping(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchMapping(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchClass { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchClass(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchClass(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchStar { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchStar(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchStar(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchAs { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchAs(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchAs(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for PatternMatchOr { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::PatternMatchOr(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::PatternMatchOr(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for TypeIgnoreTypeIgnore { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::TypeIgnoreTypeIgnore(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::TypeIgnoreTypeIgnore(node) = kind { + Some(node) + } else { + None + } + } +} + +impl AstNode for Comprehension { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::Comprehension(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::Comprehension(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for Arguments { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::Arguments(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::Arguments(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for Arg { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::Arg(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::Arg(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for Keyword { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::Keyword(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::Keyword(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for Alias { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::Alias(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::Alias(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for Withitem { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::Withitem(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::Withitem(node) = kind { + Some(node) + } else { + None + } + } +} +impl AstNode for MatchCase { + fn cast(kind: AnyNode) -> Option + where + Self: Sized, + { + if let AnyNode::MatchCase(node) = kind { + Some(node) + } else { + None + } + } + + fn cast_ref(kind: AnyNodeRef) -> Option<&Self> { + if let AnyNodeRef::MatchCase(node) = kind { + Some(node) + } else { + None + } + } +} + +impl From for AnyNode { + fn from(stmt: Stmt) -> Self { + match stmt { + Stmt::FunctionDef(node) => AnyNode::StmtFunctionDef(node), + Stmt::AsyncFunctionDef(node) => AnyNode::StmtAsyncFunctionDef(node), + Stmt::ClassDef(node) => AnyNode::StmtClassDef(node), + Stmt::Return(node) => AnyNode::StmtReturn(node), + Stmt::Delete(node) => AnyNode::StmtDelete(node), + Stmt::Assign(node) => AnyNode::StmtAssign(node), + Stmt::AugAssign(node) => AnyNode::StmtAugAssign(node), + Stmt::AnnAssign(node) => AnyNode::StmtAnnAssign(node), + Stmt::For(node) => AnyNode::StmtFor(node), + Stmt::AsyncFor(node) => AnyNode::StmtAsyncFor(node), + Stmt::While(node) => AnyNode::StmtWhile(node), + Stmt::If(node) => AnyNode::StmtIf(node), + Stmt::With(node) => AnyNode::StmtWith(node), + Stmt::AsyncWith(node) => AnyNode::StmtAsyncWith(node), + Stmt::Match(node) => AnyNode::StmtMatch(node), + Stmt::Raise(node) => AnyNode::StmtRaise(node), + Stmt::Try(node) => AnyNode::StmtTry(node), + Stmt::TryStar(node) => AnyNode::StmtTryStar(node), + Stmt::Assert(node) => AnyNode::StmtAssert(node), + Stmt::Import(node) => AnyNode::StmtImport(node), + Stmt::ImportFrom(node) => AnyNode::StmtImportFrom(node), + Stmt::Global(node) => AnyNode::StmtGlobal(node), + Stmt::Nonlocal(node) => AnyNode::StmtNonlocal(node), + Stmt::Expr(node) => AnyNode::StmtExpr(node), + Stmt::Pass(node) => AnyNode::StmtPass(node), + Stmt::Break(node) => AnyNode::StmtBreak(node), + Stmt::Continue(node) => AnyNode::StmtContinue(node), + } + } +} + +impl From for AnyNode { + fn from(expr: Expr) -> Self { + match expr { + Expr::BoolOp(node) => AnyNode::ExprBoolOp(node), + Expr::NamedExpr(node) => AnyNode::ExprNamedExpr(node), + Expr::BinOp(node) => AnyNode::ExprBinOp(node), + Expr::UnaryOp(node) => AnyNode::ExprUnaryOp(node), + Expr::Lambda(node) => AnyNode::ExprLambda(node), + Expr::IfExp(node) => AnyNode::ExprIfExp(node), + Expr::Dict(node) => AnyNode::ExprDict(node), + Expr::Set(node) => AnyNode::ExprSet(node), + Expr::ListComp(node) => AnyNode::ExprListComp(node), + Expr::SetComp(node) => AnyNode::ExprSetComp(node), + Expr::DictComp(node) => AnyNode::ExprDictComp(node), + Expr::GeneratorExp(node) => AnyNode::ExprGeneratorExp(node), + Expr::Await(node) => AnyNode::ExprAwait(node), + Expr::Yield(node) => AnyNode::ExprYield(node), + Expr::YieldFrom(node) => AnyNode::ExprYieldFrom(node), + Expr::Compare(node) => AnyNode::ExprCompare(node), + Expr::Call(node) => AnyNode::ExprCall(node), + Expr::FormattedValue(node) => AnyNode::ExprFormattedValue(node), + Expr::JoinedStr(node) => AnyNode::ExprJoinedStr(node), + Expr::Constant(node) => AnyNode::ExprConstant(node), + Expr::Attribute(node) => AnyNode::ExprAttribute(node), + Expr::Subscript(node) => AnyNode::ExprSubscript(node), + Expr::Starred(node) => AnyNode::ExprStarred(node), + Expr::Name(node) => AnyNode::ExprName(node), + Expr::List(node) => AnyNode::ExprList(node), + Expr::Tuple(node) => AnyNode::ExprTuple(node), + Expr::Slice(node) => AnyNode::ExprSlice(node), + } + } +} + +impl From for AnyNode { + fn from(module: Mod) -> Self { + match module { + Mod::Module(node) => AnyNode::ModModule(node), + Mod::Interactive(node) => AnyNode::ModInteractive(node), + Mod::Expression(node) => AnyNode::ModExpression(node), + Mod::FunctionType(node) => AnyNode::ModFunctionType(node), + } + } +} + +impl From for AnyNode { + fn from(pattern: Pattern) -> Self { + match pattern { + Pattern::MatchValue(node) => AnyNode::PatternMatchValue(node), + Pattern::MatchSingleton(node) => AnyNode::PatternMatchSingleton(node), + Pattern::MatchSequence(node) => AnyNode::PatternMatchSequence(node), + Pattern::MatchMapping(node) => AnyNode::PatternMatchMapping(node), + Pattern::MatchClass(node) => AnyNode::PatternMatchClass(node), + Pattern::MatchStar(node) => AnyNode::PatternMatchStar(node), + Pattern::MatchAs(node) => AnyNode::PatternMatchAs(node), + Pattern::MatchOr(node) => AnyNode::PatternMatchOr(node), + } + } +} + +impl From for AnyNode { + fn from(handler: Excepthandler) -> Self { + match handler { + Excepthandler::ExceptHandler(handler) => AnyNode::ExcepthandlerExceptHandler(handler), + } + } +} + +impl From for AnyNode { + fn from(ignore: TypeIgnore) -> Self { + match ignore { + TypeIgnore::TypeIgnore(ignore) => AnyNode::TypeIgnoreTypeIgnore(ignore), + } + } +} + +impl From for AnyNode { + fn from(node: ModModule) -> Self { + AnyNode::ModModule(node) + } +} + +impl From for AnyNode { + fn from(node: ModInteractive) -> Self { + AnyNode::ModInteractive(node) + } +} + +impl From for AnyNode { + fn from(node: ModExpression) -> Self { + AnyNode::ModExpression(node) + } +} + +impl From for AnyNode { + fn from(node: ModFunctionType) -> Self { + AnyNode::ModFunctionType(node) + } +} + +impl From for AnyNode { + fn from(node: StmtFunctionDef) -> Self { + AnyNode::StmtFunctionDef(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAsyncFunctionDef) -> Self { + AnyNode::StmtAsyncFunctionDef(node) + } +} + +impl From for AnyNode { + fn from(node: StmtClassDef) -> Self { + AnyNode::StmtClassDef(node) + } +} + +impl From for AnyNode { + fn from(node: StmtReturn) -> Self { + AnyNode::StmtReturn(node) + } +} + +impl From for AnyNode { + fn from(node: StmtDelete) -> Self { + AnyNode::StmtDelete(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAssign) -> Self { + AnyNode::StmtAssign(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAugAssign) -> Self { + AnyNode::StmtAugAssign(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAnnAssign) -> Self { + AnyNode::StmtAnnAssign(node) + } +} + +impl From for AnyNode { + fn from(node: StmtFor) -> Self { + AnyNode::StmtFor(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAsyncFor) -> Self { + AnyNode::StmtAsyncFor(node) + } +} + +impl From for AnyNode { + fn from(node: StmtWhile) -> Self { + AnyNode::StmtWhile(node) + } +} + +impl From for AnyNode { + fn from(node: StmtIf) -> Self { + AnyNode::StmtIf(node) + } +} + +impl From for AnyNode { + fn from(node: StmtWith) -> Self { + AnyNode::StmtWith(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAsyncWith) -> Self { + AnyNode::StmtAsyncWith(node) + } +} + +impl From for AnyNode { + fn from(node: StmtMatch) -> Self { + AnyNode::StmtMatch(node) + } +} + +impl From for AnyNode { + fn from(node: StmtRaise) -> Self { + AnyNode::StmtRaise(node) + } +} + +impl From for AnyNode { + fn from(node: StmtTry) -> Self { + AnyNode::StmtTry(node) + } +} + +impl From for AnyNode { + fn from(node: StmtTryStar) -> Self { + AnyNode::StmtTryStar(node) + } +} + +impl From for AnyNode { + fn from(node: StmtAssert) -> Self { + AnyNode::StmtAssert(node) + } +} + +impl From for AnyNode { + fn from(node: StmtImport) -> Self { + AnyNode::StmtImport(node) + } +} + +impl From for AnyNode { + fn from(node: StmtImportFrom) -> Self { + AnyNode::StmtImportFrom(node) + } +} + +impl From for AnyNode { + fn from(node: StmtGlobal) -> Self { + AnyNode::StmtGlobal(node) + } +} + +impl From for AnyNode { + fn from(node: StmtNonlocal) -> Self { + AnyNode::StmtNonlocal(node) + } +} + +impl From for AnyNode { + fn from(node: StmtExpr) -> Self { + AnyNode::StmtExpr(node) + } +} + +impl From for AnyNode { + fn from(node: StmtPass) -> Self { + AnyNode::StmtPass(node) + } +} + +impl From for AnyNode { + fn from(node: StmtBreak) -> Self { + AnyNode::StmtBreak(node) + } +} + +impl From for AnyNode { + fn from(node: StmtContinue) -> Self { + AnyNode::StmtContinue(node) + } +} + +impl From for AnyNode { + fn from(node: ExprBoolOp) -> Self { + AnyNode::ExprBoolOp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprNamedExpr) -> Self { + AnyNode::ExprNamedExpr(node) + } +} + +impl From for AnyNode { + fn from(node: ExprBinOp) -> Self { + AnyNode::ExprBinOp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprUnaryOp) -> Self { + AnyNode::ExprUnaryOp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprLambda) -> Self { + AnyNode::ExprLambda(node) + } +} + +impl From for AnyNode { + fn from(node: ExprIfExp) -> Self { + AnyNode::ExprIfExp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprDict) -> Self { + AnyNode::ExprDict(node) + } +} + +impl From for AnyNode { + fn from(node: ExprSet) -> Self { + AnyNode::ExprSet(node) + } +} + +impl From for AnyNode { + fn from(node: ExprListComp) -> Self { + AnyNode::ExprListComp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprSetComp) -> Self { + AnyNode::ExprSetComp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprDictComp) -> Self { + AnyNode::ExprDictComp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprGeneratorExp) -> Self { + AnyNode::ExprGeneratorExp(node) + } +} + +impl From for AnyNode { + fn from(node: ExprAwait) -> Self { + AnyNode::ExprAwait(node) + } +} + +impl From for AnyNode { + fn from(node: ExprYield) -> Self { + AnyNode::ExprYield(node) + } +} + +impl From for AnyNode { + fn from(node: ExprYieldFrom) -> Self { + AnyNode::ExprYieldFrom(node) + } +} + +impl From for AnyNode { + fn from(node: ExprCompare) -> Self { + AnyNode::ExprCompare(node) + } +} + +impl From for AnyNode { + fn from(node: ExprCall) -> Self { + AnyNode::ExprCall(node) + } +} + +impl From for AnyNode { + fn from(node: ExprFormattedValue) -> Self { + AnyNode::ExprFormattedValue(node) + } +} + +impl From for AnyNode { + fn from(node: ExprJoinedStr) -> Self { + AnyNode::ExprJoinedStr(node) + } +} + +impl From for AnyNode { + fn from(node: ExprConstant) -> Self { + AnyNode::ExprConstant(node) + } +} + +impl From for AnyNode { + fn from(node: ExprAttribute) -> Self { + AnyNode::ExprAttribute(node) + } +} + +impl From for AnyNode { + fn from(node: ExprSubscript) -> Self { + AnyNode::ExprSubscript(node) + } +} + +impl From for AnyNode { + fn from(node: ExprStarred) -> Self { + AnyNode::ExprStarred(node) + } +} + +impl From for AnyNode { + fn from(node: ExprName) -> Self { + AnyNode::ExprName(node) + } +} + +impl From for AnyNode { + fn from(node: ExprList) -> Self { + AnyNode::ExprList(node) + } +} + +impl From for AnyNode { + fn from(node: ExprTuple) -> Self { + AnyNode::ExprTuple(node) + } +} + +impl From for AnyNode { + fn from(node: ExprSlice) -> Self { + AnyNode::ExprSlice(node) + } +} + +impl From for AnyNode { + fn from(node: ExcepthandlerExceptHandler) -> Self { + AnyNode::ExcepthandlerExceptHandler(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchValue) -> Self { + AnyNode::PatternMatchValue(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchSingleton) -> Self { + AnyNode::PatternMatchSingleton(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchSequence) -> Self { + AnyNode::PatternMatchSequence(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchMapping) -> Self { + AnyNode::PatternMatchMapping(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchClass) -> Self { + AnyNode::PatternMatchClass(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchStar) -> Self { + AnyNode::PatternMatchStar(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchAs) -> Self { + AnyNode::PatternMatchAs(node) + } +} + +impl From for AnyNode { + fn from(node: PatternMatchOr) -> Self { + AnyNode::PatternMatchOr(node) + } +} + +impl From for AnyNode { + fn from(node: TypeIgnoreTypeIgnore) -> Self { + AnyNode::TypeIgnoreTypeIgnore(node) + } +} + +impl From for AnyNode { + fn from(node: Comprehension) -> Self { + AnyNode::Comprehension(node) + } +} +impl From for AnyNode { + fn from(node: Arguments) -> Self { + AnyNode::Arguments(node) + } +} +impl From for AnyNode { + fn from(node: Arg) -> Self { + AnyNode::Arg(node) + } +} +impl From for AnyNode { + fn from(node: Keyword) -> Self { + AnyNode::Keyword(node) + } +} +impl From for AnyNode { + fn from(node: Alias) -> Self { + AnyNode::Alias(node) + } +} +impl From for AnyNode { + fn from(node: Withitem) -> Self { + AnyNode::Withitem(node) + } +} +impl From for AnyNode { + fn from(node: MatchCase) -> Self { + AnyNode::MatchCase(node) + } +} + +impl Ranged for AnyNode { + fn range(&self) -> TextRange { + match self { + AnyNode::ModModule(node) => node.range(), + AnyNode::ModInteractive(node) => node.range(), + AnyNode::ModExpression(node) => node.range(), + AnyNode::ModFunctionType(node) => node.range(), + AnyNode::StmtFunctionDef(node) => node.range(), + AnyNode::StmtAsyncFunctionDef(node) => node.range(), + AnyNode::StmtClassDef(node) => node.range(), + AnyNode::StmtReturn(node) => node.range(), + AnyNode::StmtDelete(node) => node.range(), + AnyNode::StmtAssign(node) => node.range(), + AnyNode::StmtAugAssign(node) => node.range(), + AnyNode::StmtAnnAssign(node) => node.range(), + AnyNode::StmtFor(node) => node.range(), + AnyNode::StmtAsyncFor(node) => node.range(), + AnyNode::StmtWhile(node) => node.range(), + AnyNode::StmtIf(node) => node.range(), + AnyNode::StmtWith(node) => node.range(), + AnyNode::StmtAsyncWith(node) => node.range(), + AnyNode::StmtMatch(node) => node.range(), + AnyNode::StmtRaise(node) => node.range(), + AnyNode::StmtTry(node) => node.range(), + AnyNode::StmtTryStar(node) => node.range(), + AnyNode::StmtAssert(node) => node.range(), + AnyNode::StmtImport(node) => node.range(), + AnyNode::StmtImportFrom(node) => node.range(), + AnyNode::StmtGlobal(node) => node.range(), + AnyNode::StmtNonlocal(node) => node.range(), + AnyNode::StmtExpr(node) => node.range(), + AnyNode::StmtPass(node) => node.range(), + AnyNode::StmtBreak(node) => node.range(), + AnyNode::StmtContinue(node) => node.range(), + AnyNode::ExprBoolOp(node) => node.range(), + AnyNode::ExprNamedExpr(node) => node.range(), + AnyNode::ExprBinOp(node) => node.range(), + AnyNode::ExprUnaryOp(node) => node.range(), + AnyNode::ExprLambda(node) => node.range(), + AnyNode::ExprIfExp(node) => node.range(), + AnyNode::ExprDict(node) => node.range(), + AnyNode::ExprSet(node) => node.range(), + AnyNode::ExprListComp(node) => node.range(), + AnyNode::ExprSetComp(node) => node.range(), + AnyNode::ExprDictComp(node) => node.range(), + AnyNode::ExprGeneratorExp(node) => node.range(), + AnyNode::ExprAwait(node) => node.range(), + AnyNode::ExprYield(node) => node.range(), + AnyNode::ExprYieldFrom(node) => node.range(), + AnyNode::ExprCompare(node) => node.range(), + AnyNode::ExprCall(node) => node.range(), + AnyNode::ExprFormattedValue(node) => node.range(), + AnyNode::ExprJoinedStr(node) => node.range(), + AnyNode::ExprConstant(node) => node.range(), + AnyNode::ExprAttribute(node) => node.range(), + AnyNode::ExprSubscript(node) => node.range(), + AnyNode::ExprStarred(node) => node.range(), + AnyNode::ExprName(node) => node.range(), + AnyNode::ExprList(node) => node.range(), + AnyNode::ExprTuple(node) => node.range(), + AnyNode::ExprSlice(node) => node.range(), + AnyNode::ExcepthandlerExceptHandler(node) => node.range(), + AnyNode::PatternMatchValue(node) => node.range(), + AnyNode::PatternMatchSingleton(node) => node.range(), + AnyNode::PatternMatchSequence(node) => node.range(), + AnyNode::PatternMatchMapping(node) => node.range(), + AnyNode::PatternMatchClass(node) => node.range(), + AnyNode::PatternMatchStar(node) => node.range(), + AnyNode::PatternMatchAs(node) => node.range(), + AnyNode::PatternMatchOr(node) => node.range(), + AnyNode::TypeIgnoreTypeIgnore(node) => node.range(), + AnyNode::Comprehension(node) => node.range(), + AnyNode::Arguments(node) => node.range(), + AnyNode::Arg(node) => node.range(), + AnyNode::Keyword(node) => node.range(), + AnyNode::Alias(node) => node.range(), + AnyNode::Withitem(node) => node.range(), + AnyNode::MatchCase(node) => node.range(), + } + } +} + +#[derive(Copy, Clone, Debug, is_macro::Is, PartialEq)] +pub enum AnyNodeRef<'a> { + ModModule(&'a ModModule), + ModInteractive(&'a ModInteractive), + ModExpression(&'a ModExpression), + ModFunctionType(&'a ModFunctionType), + StmtFunctionDef(&'a StmtFunctionDef), + StmtAsyncFunctionDef(&'a StmtAsyncFunctionDef), + StmtClassDef(&'a StmtClassDef), + StmtReturn(&'a StmtReturn), + StmtDelete(&'a StmtDelete), + StmtAssign(&'a StmtAssign), + StmtAugAssign(&'a StmtAugAssign), + StmtAnnAssign(&'a StmtAnnAssign), + StmtFor(&'a StmtFor), + StmtAsyncFor(&'a StmtAsyncFor), + StmtWhile(&'a StmtWhile), + StmtIf(&'a StmtIf), + StmtWith(&'a StmtWith), + StmtAsyncWith(&'a StmtAsyncWith), + StmtMatch(&'a StmtMatch), + StmtRaise(&'a StmtRaise), + StmtTry(&'a StmtTry), + StmtTryStar(&'a StmtTryStar), + StmtAssert(&'a StmtAssert), + StmtImport(&'a StmtImport), + StmtImportFrom(&'a StmtImportFrom), + StmtGlobal(&'a StmtGlobal), + StmtNonlocal(&'a StmtNonlocal), + StmtExpr(&'a StmtExpr), + StmtPass(&'a StmtPass), + StmtBreak(&'a StmtBreak), + StmtContinue(&'a StmtContinue), + ExprBoolOp(&'a ExprBoolOp), + ExprNamedExpr(&'a ExprNamedExpr), + ExprBinOp(&'a ExprBinOp), + ExprUnaryOp(&'a ExprUnaryOp), + ExprLambda(&'a ExprLambda), + ExprIfExp(&'a ExprIfExp), + ExprDict(&'a ExprDict), + ExprSet(&'a ExprSet), + ExprListComp(&'a ExprListComp), + ExprSetComp(&'a ExprSetComp), + ExprDictComp(&'a ExprDictComp), + ExprGeneratorExp(&'a ExprGeneratorExp), + ExprAwait(&'a ExprAwait), + ExprYield(&'a ExprYield), + ExprYieldFrom(&'a ExprYieldFrom), + ExprCompare(&'a ExprCompare), + ExprCall(&'a ExprCall), + ExprFormattedValue(&'a ExprFormattedValue), + ExprJoinedStr(&'a ExprJoinedStr), + ExprConstant(&'a ExprConstant), + ExprAttribute(&'a ExprAttribute), + ExprSubscript(&'a ExprSubscript), + ExprStarred(&'a ExprStarred), + ExprName(&'a ExprName), + ExprList(&'a ExprList), + ExprTuple(&'a ExprTuple), + ExprSlice(&'a ExprSlice), + ExcepthandlerExceptHandler(&'a ExcepthandlerExceptHandler), + PatternMatchValue(&'a PatternMatchValue), + PatternMatchSingleton(&'a PatternMatchSingleton), + PatternMatchSequence(&'a PatternMatchSequence), + PatternMatchMapping(&'a PatternMatchMapping), + PatternMatchClass(&'a PatternMatchClass), + PatternMatchStar(&'a PatternMatchStar), + PatternMatchAs(&'a PatternMatchAs), + PatternMatchOr(&'a PatternMatchOr), + TypeIgnoreTypeIgnore(&'a TypeIgnoreTypeIgnore), + Comprehension(&'a Comprehension), + Arguments(&'a Arguments), + Arg(&'a Arg), + Keyword(&'a Keyword), + Alias(&'a Alias), + Withitem(&'a Withitem), + MatchCase(&'a MatchCase), +} + +impl AnyNodeRef<'_> { + /// Returns the node's [`kind`](NodeKind) that has no data associated and is [`Copy`]. + pub const fn kind(self) -> NodeKind { + match self { + AnyNodeRef::ModModule(_) => NodeKind::ModModule, + AnyNodeRef::ModInteractive(_) => NodeKind::ModInteractive, + AnyNodeRef::ModExpression(_) => NodeKind::ModExpression, + AnyNodeRef::ModFunctionType(_) => NodeKind::ModFunctionType, + AnyNodeRef::StmtFunctionDef(_) => NodeKind::StmtFunctionDef, + AnyNodeRef::StmtAsyncFunctionDef(_) => NodeKind::StmtAsyncFunctionDef, + AnyNodeRef::StmtClassDef(_) => NodeKind::StmtClassDef, + AnyNodeRef::StmtReturn(_) => NodeKind::StmtReturn, + AnyNodeRef::StmtDelete(_) => NodeKind::StmtDelete, + AnyNodeRef::StmtAssign(_) => NodeKind::StmtAssign, + AnyNodeRef::StmtAugAssign(_) => NodeKind::StmtAugAssign, + AnyNodeRef::StmtAnnAssign(_) => NodeKind::StmtAnnAssign, + AnyNodeRef::StmtFor(_) => NodeKind::StmtFor, + AnyNodeRef::StmtAsyncFor(_) => NodeKind::StmtAsyncFor, + AnyNodeRef::StmtWhile(_) => NodeKind::StmtWhile, + AnyNodeRef::StmtIf(_) => NodeKind::StmtIf, + AnyNodeRef::StmtWith(_) => NodeKind::StmtWith, + AnyNodeRef::StmtAsyncWith(_) => NodeKind::StmtAsyncWith, + AnyNodeRef::StmtMatch(_) => NodeKind::StmtMatch, + AnyNodeRef::StmtRaise(_) => NodeKind::StmtRaise, + AnyNodeRef::StmtTry(_) => NodeKind::StmtTry, + AnyNodeRef::StmtTryStar(_) => NodeKind::StmtTryStar, + AnyNodeRef::StmtAssert(_) => NodeKind::StmtAssert, + AnyNodeRef::StmtImport(_) => NodeKind::StmtImport, + AnyNodeRef::StmtImportFrom(_) => NodeKind::StmtImportFrom, + AnyNodeRef::StmtGlobal(_) => NodeKind::StmtGlobal, + AnyNodeRef::StmtNonlocal(_) => NodeKind::StmtNonlocal, + AnyNodeRef::StmtExpr(_) => NodeKind::StmtExpr, + AnyNodeRef::StmtPass(_) => NodeKind::StmtPass, + AnyNodeRef::StmtBreak(_) => NodeKind::StmtBreak, + AnyNodeRef::StmtContinue(_) => NodeKind::StmtContinue, + AnyNodeRef::ExprBoolOp(_) => NodeKind::ExprBoolOp, + AnyNodeRef::ExprNamedExpr(_) => NodeKind::ExprNamedExpr, + AnyNodeRef::ExprBinOp(_) => NodeKind::ExprBinOp, + AnyNodeRef::ExprUnaryOp(_) => NodeKind::ExprUnaryOp, + AnyNodeRef::ExprLambda(_) => NodeKind::ExprLambda, + AnyNodeRef::ExprIfExp(_) => NodeKind::ExprIfExp, + AnyNodeRef::ExprDict(_) => NodeKind::ExprDict, + AnyNodeRef::ExprSet(_) => NodeKind::ExprSet, + AnyNodeRef::ExprListComp(_) => NodeKind::ExprListComp, + AnyNodeRef::ExprSetComp(_) => NodeKind::ExprSetComp, + AnyNodeRef::ExprDictComp(_) => NodeKind::ExprDictComp, + AnyNodeRef::ExprGeneratorExp(_) => NodeKind::ExprGeneratorExp, + AnyNodeRef::ExprAwait(_) => NodeKind::ExprAwait, + AnyNodeRef::ExprYield(_) => NodeKind::ExprYield, + AnyNodeRef::ExprYieldFrom(_) => NodeKind::ExprYieldFrom, + AnyNodeRef::ExprCompare(_) => NodeKind::ExprCompare, + AnyNodeRef::ExprCall(_) => NodeKind::ExprCall, + AnyNodeRef::ExprFormattedValue(_) => NodeKind::ExprFormattedValue, + AnyNodeRef::ExprJoinedStr(_) => NodeKind::ExprJoinedStr, + AnyNodeRef::ExprConstant(_) => NodeKind::ExprConstant, + AnyNodeRef::ExprAttribute(_) => NodeKind::ExprAttribute, + AnyNodeRef::ExprSubscript(_) => NodeKind::ExprSubscript, + AnyNodeRef::ExprStarred(_) => NodeKind::ExprStarred, + AnyNodeRef::ExprName(_) => NodeKind::ExprName, + AnyNodeRef::ExprList(_) => NodeKind::ExprList, + AnyNodeRef::ExprTuple(_) => NodeKind::ExprTuple, + AnyNodeRef::ExprSlice(_) => NodeKind::ExprSlice, + AnyNodeRef::ExcepthandlerExceptHandler(_) => NodeKind::ExcepthandlerExceptHandler, + AnyNodeRef::PatternMatchValue(_) => NodeKind::PatternMatchValue, + AnyNodeRef::PatternMatchSingleton(_) => NodeKind::PatternMatchSingleton, + AnyNodeRef::PatternMatchSequence(_) => NodeKind::PatternMatchSequence, + AnyNodeRef::PatternMatchMapping(_) => NodeKind::PatternMatchMapping, + AnyNodeRef::PatternMatchClass(_) => NodeKind::PatternMatchClass, + AnyNodeRef::PatternMatchStar(_) => NodeKind::PatternMatchStar, + AnyNodeRef::PatternMatchAs(_) => NodeKind::PatternMatchAs, + AnyNodeRef::PatternMatchOr(_) => NodeKind::PatternMatchOr, + AnyNodeRef::TypeIgnoreTypeIgnore(_) => NodeKind::TypeIgnoreTypeIgnore, + AnyNodeRef::Comprehension(_) => NodeKind::Comprehension, + AnyNodeRef::Arguments(_) => NodeKind::Arguments, + AnyNodeRef::Arg(_) => NodeKind::Arg, + AnyNodeRef::Keyword(_) => NodeKind::Keyword, + AnyNodeRef::Alias(_) => NodeKind::Alias, + AnyNodeRef::Withitem(_) => NodeKind::Withitem, + AnyNodeRef::MatchCase(_) => NodeKind::MatchCase, + } + } +} + +impl<'a> From<&'a ModModule> for AnyNodeRef<'a> { + fn from(node: &'a ModModule) -> Self { + AnyNodeRef::ModModule(node) + } +} + +impl<'a> From<&'a ModInteractive> for AnyNodeRef<'a> { + fn from(node: &'a ModInteractive) -> Self { + AnyNodeRef::ModInteractive(node) + } +} + +impl<'a> From<&'a ModExpression> for AnyNodeRef<'a> { + fn from(node: &'a ModExpression) -> Self { + AnyNodeRef::ModExpression(node) + } +} + +impl<'a> From<&'a ModFunctionType> for AnyNodeRef<'a> { + fn from(node: &'a ModFunctionType) -> Self { + AnyNodeRef::ModFunctionType(node) + } +} + +impl<'a> From<&'a StmtFunctionDef> for AnyNodeRef<'a> { + fn from(node: &'a StmtFunctionDef) -> Self { + AnyNodeRef::StmtFunctionDef(node) + } +} + +impl<'a> From<&'a StmtAsyncFunctionDef> for AnyNodeRef<'a> { + fn from(node: &'a StmtAsyncFunctionDef) -> Self { + AnyNodeRef::StmtAsyncFunctionDef(node) + } +} + +impl<'a> From<&'a StmtClassDef> for AnyNodeRef<'a> { + fn from(node: &'a StmtClassDef) -> Self { + AnyNodeRef::StmtClassDef(node) + } +} + +impl<'a> From<&'a StmtReturn> for AnyNodeRef<'a> { + fn from(node: &'a StmtReturn) -> Self { + AnyNodeRef::StmtReturn(node) + } +} + +impl<'a> From<&'a StmtDelete> for AnyNodeRef<'a> { + fn from(node: &'a StmtDelete) -> Self { + AnyNodeRef::StmtDelete(node) + } +} + +impl<'a> From<&'a StmtAssign> for AnyNodeRef<'a> { + fn from(node: &'a StmtAssign) -> Self { + AnyNodeRef::StmtAssign(node) + } +} + +impl<'a> From<&'a StmtAugAssign> for AnyNodeRef<'a> { + fn from(node: &'a StmtAugAssign) -> Self { + AnyNodeRef::StmtAugAssign(node) + } +} + +impl<'a> From<&'a StmtAnnAssign> for AnyNodeRef<'a> { + fn from(node: &'a StmtAnnAssign) -> Self { + AnyNodeRef::StmtAnnAssign(node) + } +} + +impl<'a> From<&'a StmtFor> for AnyNodeRef<'a> { + fn from(node: &'a StmtFor) -> Self { + AnyNodeRef::StmtFor(node) + } +} + +impl<'a> From<&'a StmtAsyncFor> for AnyNodeRef<'a> { + fn from(node: &'a StmtAsyncFor) -> Self { + AnyNodeRef::StmtAsyncFor(node) + } +} + +impl<'a> From<&'a StmtWhile> for AnyNodeRef<'a> { + fn from(node: &'a StmtWhile) -> Self { + AnyNodeRef::StmtWhile(node) + } +} + +impl<'a> From<&'a StmtIf> for AnyNodeRef<'a> { + fn from(node: &'a StmtIf) -> Self { + AnyNodeRef::StmtIf(node) + } +} + +impl<'a> From<&'a StmtWith> for AnyNodeRef<'a> { + fn from(node: &'a StmtWith) -> Self { + AnyNodeRef::StmtWith(node) + } +} + +impl<'a> From<&'a StmtAsyncWith> for AnyNodeRef<'a> { + fn from(node: &'a StmtAsyncWith) -> Self { + AnyNodeRef::StmtAsyncWith(node) + } +} + +impl<'a> From<&'a StmtMatch> for AnyNodeRef<'a> { + fn from(node: &'a StmtMatch) -> Self { + AnyNodeRef::StmtMatch(node) + } +} + +impl<'a> From<&'a StmtRaise> for AnyNodeRef<'a> { + fn from(node: &'a StmtRaise) -> Self { + AnyNodeRef::StmtRaise(node) + } +} + +impl<'a> From<&'a StmtTry> for AnyNodeRef<'a> { + fn from(node: &'a StmtTry) -> Self { + AnyNodeRef::StmtTry(node) + } +} + +impl<'a> From<&'a StmtTryStar> for AnyNodeRef<'a> { + fn from(node: &'a StmtTryStar) -> Self { + AnyNodeRef::StmtTryStar(node) + } +} + +impl<'a> From<&'a StmtAssert> for AnyNodeRef<'a> { + fn from(node: &'a StmtAssert) -> Self { + AnyNodeRef::StmtAssert(node) + } +} + +impl<'a> From<&'a StmtImport> for AnyNodeRef<'a> { + fn from(node: &'a StmtImport) -> Self { + AnyNodeRef::StmtImport(node) + } +} + +impl<'a> From<&'a StmtImportFrom> for AnyNodeRef<'a> { + fn from(node: &'a StmtImportFrom) -> Self { + AnyNodeRef::StmtImportFrom(node) + } +} + +impl<'a> From<&'a StmtGlobal> for AnyNodeRef<'a> { + fn from(node: &'a StmtGlobal) -> Self { + AnyNodeRef::StmtGlobal(node) + } +} + +impl<'a> From<&'a StmtNonlocal> for AnyNodeRef<'a> { + fn from(node: &'a StmtNonlocal) -> Self { + AnyNodeRef::StmtNonlocal(node) + } +} + +impl<'a> From<&'a StmtExpr> for AnyNodeRef<'a> { + fn from(node: &'a StmtExpr) -> Self { + AnyNodeRef::StmtExpr(node) + } +} + +impl<'a> From<&'a StmtPass> for AnyNodeRef<'a> { + fn from(node: &'a StmtPass) -> Self { + AnyNodeRef::StmtPass(node) + } +} + +impl<'a> From<&'a StmtBreak> for AnyNodeRef<'a> { + fn from(node: &'a StmtBreak) -> Self { + AnyNodeRef::StmtBreak(node) + } +} + +impl<'a> From<&'a StmtContinue> for AnyNodeRef<'a> { + fn from(node: &'a StmtContinue) -> Self { + AnyNodeRef::StmtContinue(node) + } +} + +impl<'a> From<&'a ExprBoolOp> for AnyNodeRef<'a> { + fn from(node: &'a ExprBoolOp) -> Self { + AnyNodeRef::ExprBoolOp(node) + } +} + +impl<'a> From<&'a ExprNamedExpr> for AnyNodeRef<'a> { + fn from(node: &'a ExprNamedExpr) -> Self { + AnyNodeRef::ExprNamedExpr(node) + } +} + +impl<'a> From<&'a ExprBinOp> for AnyNodeRef<'a> { + fn from(node: &'a ExprBinOp) -> Self { + AnyNodeRef::ExprBinOp(node) + } +} + +impl<'a> From<&'a ExprUnaryOp> for AnyNodeRef<'a> { + fn from(node: &'a ExprUnaryOp) -> Self { + AnyNodeRef::ExprUnaryOp(node) + } +} + +impl<'a> From<&'a ExprLambda> for AnyNodeRef<'a> { + fn from(node: &'a ExprLambda) -> Self { + AnyNodeRef::ExprLambda(node) + } +} + +impl<'a> From<&'a ExprIfExp> for AnyNodeRef<'a> { + fn from(node: &'a ExprIfExp) -> Self { + AnyNodeRef::ExprIfExp(node) + } +} + +impl<'a> From<&'a ExprDict> for AnyNodeRef<'a> { + fn from(node: &'a ExprDict) -> Self { + AnyNodeRef::ExprDict(node) + } +} + +impl<'a> From<&'a ExprSet> for AnyNodeRef<'a> { + fn from(node: &'a ExprSet) -> Self { + AnyNodeRef::ExprSet(node) + } +} + +impl<'a> From<&'a ExprListComp> for AnyNodeRef<'a> { + fn from(node: &'a ExprListComp) -> Self { + AnyNodeRef::ExprListComp(node) + } +} + +impl<'a> From<&'a ExprSetComp> for AnyNodeRef<'a> { + fn from(node: &'a ExprSetComp) -> Self { + AnyNodeRef::ExprSetComp(node) + } +} + +impl<'a> From<&'a ExprDictComp> for AnyNodeRef<'a> { + fn from(node: &'a ExprDictComp) -> Self { + AnyNodeRef::ExprDictComp(node) + } +} + +impl<'a> From<&'a ExprGeneratorExp> for AnyNodeRef<'a> { + fn from(node: &'a ExprGeneratorExp) -> Self { + AnyNodeRef::ExprGeneratorExp(node) + } +} + +impl<'a> From<&'a ExprAwait> for AnyNodeRef<'a> { + fn from(node: &'a ExprAwait) -> Self { + AnyNodeRef::ExprAwait(node) + } +} + +impl<'a> From<&'a ExprYield> for AnyNodeRef<'a> { + fn from(node: &'a ExprYield) -> Self { + AnyNodeRef::ExprYield(node) + } +} + +impl<'a> From<&'a ExprYieldFrom> for AnyNodeRef<'a> { + fn from(node: &'a ExprYieldFrom) -> Self { + AnyNodeRef::ExprYieldFrom(node) + } +} + +impl<'a> From<&'a ExprCompare> for AnyNodeRef<'a> { + fn from(node: &'a ExprCompare) -> Self { + AnyNodeRef::ExprCompare(node) + } +} + +impl<'a> From<&'a ExprCall> for AnyNodeRef<'a> { + fn from(node: &'a ExprCall) -> Self { + AnyNodeRef::ExprCall(node) + } +} + +impl<'a> From<&'a ExprFormattedValue> for AnyNodeRef<'a> { + fn from(node: &'a ExprFormattedValue) -> Self { + AnyNodeRef::ExprFormattedValue(node) + } +} + +impl<'a> From<&'a ExprJoinedStr> for AnyNodeRef<'a> { + fn from(node: &'a ExprJoinedStr) -> Self { + AnyNodeRef::ExprJoinedStr(node) + } +} + +impl<'a> From<&'a ExprConstant> for AnyNodeRef<'a> { + fn from(node: &'a ExprConstant) -> Self { + AnyNodeRef::ExprConstant(node) + } +} + +impl<'a> From<&'a ExprAttribute> for AnyNodeRef<'a> { + fn from(node: &'a ExprAttribute) -> Self { + AnyNodeRef::ExprAttribute(node) + } +} + +impl<'a> From<&'a ExprSubscript> for AnyNodeRef<'a> { + fn from(node: &'a ExprSubscript) -> Self { + AnyNodeRef::ExprSubscript(node) + } +} + +impl<'a> From<&'a ExprStarred> for AnyNodeRef<'a> { + fn from(node: &'a ExprStarred) -> Self { + AnyNodeRef::ExprStarred(node) + } +} + +impl<'a> From<&'a ExprName> for AnyNodeRef<'a> { + fn from(node: &'a ExprName) -> Self { + AnyNodeRef::ExprName(node) + } +} + +impl<'a> From<&'a ExprList> for AnyNodeRef<'a> { + fn from(node: &'a ExprList) -> Self { + AnyNodeRef::ExprList(node) + } +} + +impl<'a> From<&'a ExprTuple> for AnyNodeRef<'a> { + fn from(node: &'a ExprTuple) -> Self { + AnyNodeRef::ExprTuple(node) + } +} + +impl<'a> From<&'a ExprSlice> for AnyNodeRef<'a> { + fn from(node: &'a ExprSlice) -> Self { + AnyNodeRef::ExprSlice(node) + } +} + +impl<'a> From<&'a ExcepthandlerExceptHandler> for AnyNodeRef<'a> { + fn from(node: &'a ExcepthandlerExceptHandler) -> Self { + AnyNodeRef::ExcepthandlerExceptHandler(node) + } +} + +impl<'a> From<&'a PatternMatchValue> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchValue) -> Self { + AnyNodeRef::PatternMatchValue(node) + } +} + +impl<'a> From<&'a PatternMatchSingleton> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchSingleton) -> Self { + AnyNodeRef::PatternMatchSingleton(node) + } +} + +impl<'a> From<&'a PatternMatchSequence> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchSequence) -> Self { + AnyNodeRef::PatternMatchSequence(node) + } +} + +impl<'a> From<&'a PatternMatchMapping> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchMapping) -> Self { + AnyNodeRef::PatternMatchMapping(node) + } +} + +impl<'a> From<&'a PatternMatchClass> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchClass) -> Self { + AnyNodeRef::PatternMatchClass(node) + } +} + +impl<'a> From<&'a PatternMatchStar> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchStar) -> Self { + AnyNodeRef::PatternMatchStar(node) + } +} + +impl<'a> From<&'a PatternMatchAs> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchAs) -> Self { + AnyNodeRef::PatternMatchAs(node) + } +} + +impl<'a> From<&'a PatternMatchOr> for AnyNodeRef<'a> { + fn from(node: &'a PatternMatchOr) -> Self { + AnyNodeRef::PatternMatchOr(node) + } +} + +impl<'a> From<&'a TypeIgnoreTypeIgnore> for AnyNodeRef<'a> { + fn from(node: &'a TypeIgnoreTypeIgnore) -> Self { + AnyNodeRef::TypeIgnoreTypeIgnore(node) + } +} + +impl<'a> From<&'a Stmt> for AnyNodeRef<'a> { + fn from(stmt: &'a Stmt) -> Self { + match stmt { + Stmt::FunctionDef(node) => AnyNodeRef::StmtFunctionDef(node), + Stmt::AsyncFunctionDef(node) => AnyNodeRef::StmtAsyncFunctionDef(node), + Stmt::ClassDef(node) => AnyNodeRef::StmtClassDef(node), + Stmt::Return(node) => AnyNodeRef::StmtReturn(node), + Stmt::Delete(node) => AnyNodeRef::StmtDelete(node), + Stmt::Assign(node) => AnyNodeRef::StmtAssign(node), + Stmt::AugAssign(node) => AnyNodeRef::StmtAugAssign(node), + Stmt::AnnAssign(node) => AnyNodeRef::StmtAnnAssign(node), + Stmt::For(node) => AnyNodeRef::StmtFor(node), + Stmt::AsyncFor(node) => AnyNodeRef::StmtAsyncFor(node), + Stmt::While(node) => AnyNodeRef::StmtWhile(node), + Stmt::If(node) => AnyNodeRef::StmtIf(node), + Stmt::With(node) => AnyNodeRef::StmtWith(node), + Stmt::AsyncWith(node) => AnyNodeRef::StmtAsyncWith(node), + Stmt::Match(node) => AnyNodeRef::StmtMatch(node), + Stmt::Raise(node) => AnyNodeRef::StmtRaise(node), + Stmt::Try(node) => AnyNodeRef::StmtTry(node), + Stmt::TryStar(node) => AnyNodeRef::StmtTryStar(node), + Stmt::Assert(node) => AnyNodeRef::StmtAssert(node), + Stmt::Import(node) => AnyNodeRef::StmtImport(node), + Stmt::ImportFrom(node) => AnyNodeRef::StmtImportFrom(node), + Stmt::Global(node) => AnyNodeRef::StmtGlobal(node), + Stmt::Nonlocal(node) => AnyNodeRef::StmtNonlocal(node), + Stmt::Expr(node) => AnyNodeRef::StmtExpr(node), + Stmt::Pass(node) => AnyNodeRef::StmtPass(node), + Stmt::Break(node) => AnyNodeRef::StmtBreak(node), + Stmt::Continue(node) => AnyNodeRef::StmtContinue(node), + } + } +} + +impl<'a> From<&'a Expr> for AnyNodeRef<'a> { + fn from(expr: &'a Expr) -> Self { + match expr { + Expr::BoolOp(node) => AnyNodeRef::ExprBoolOp(node), + Expr::NamedExpr(node) => AnyNodeRef::ExprNamedExpr(node), + Expr::BinOp(node) => AnyNodeRef::ExprBinOp(node), + Expr::UnaryOp(node) => AnyNodeRef::ExprUnaryOp(node), + Expr::Lambda(node) => AnyNodeRef::ExprLambda(node), + Expr::IfExp(node) => AnyNodeRef::ExprIfExp(node), + Expr::Dict(node) => AnyNodeRef::ExprDict(node), + Expr::Set(node) => AnyNodeRef::ExprSet(node), + Expr::ListComp(node) => AnyNodeRef::ExprListComp(node), + Expr::SetComp(node) => AnyNodeRef::ExprSetComp(node), + Expr::DictComp(node) => AnyNodeRef::ExprDictComp(node), + Expr::GeneratorExp(node) => AnyNodeRef::ExprGeneratorExp(node), + Expr::Await(node) => AnyNodeRef::ExprAwait(node), + Expr::Yield(node) => AnyNodeRef::ExprYield(node), + Expr::YieldFrom(node) => AnyNodeRef::ExprYieldFrom(node), + Expr::Compare(node) => AnyNodeRef::ExprCompare(node), + Expr::Call(node) => AnyNodeRef::ExprCall(node), + Expr::FormattedValue(node) => AnyNodeRef::ExprFormattedValue(node), + Expr::JoinedStr(node) => AnyNodeRef::ExprJoinedStr(node), + Expr::Constant(node) => AnyNodeRef::ExprConstant(node), + Expr::Attribute(node) => AnyNodeRef::ExprAttribute(node), + Expr::Subscript(node) => AnyNodeRef::ExprSubscript(node), + Expr::Starred(node) => AnyNodeRef::ExprStarred(node), + Expr::Name(node) => AnyNodeRef::ExprName(node), + Expr::List(node) => AnyNodeRef::ExprList(node), + Expr::Tuple(node) => AnyNodeRef::ExprTuple(node), + Expr::Slice(node) => AnyNodeRef::ExprSlice(node), + } + } +} + +impl<'a> From<&'a Mod> for AnyNodeRef<'a> { + fn from(module: &'a Mod) -> Self { + match module { + Mod::Module(node) => AnyNodeRef::ModModule(node), + Mod::Interactive(node) => AnyNodeRef::ModInteractive(node), + Mod::Expression(node) => AnyNodeRef::ModExpression(node), + Mod::FunctionType(node) => AnyNodeRef::ModFunctionType(node), + } + } +} + +impl<'a> From<&'a Pattern> for AnyNodeRef<'a> { + fn from(pattern: &'a Pattern) -> Self { + match pattern { + Pattern::MatchValue(node) => AnyNodeRef::PatternMatchValue(node), + Pattern::MatchSingleton(node) => AnyNodeRef::PatternMatchSingleton(node), + Pattern::MatchSequence(node) => AnyNodeRef::PatternMatchSequence(node), + Pattern::MatchMapping(node) => AnyNodeRef::PatternMatchMapping(node), + Pattern::MatchClass(node) => AnyNodeRef::PatternMatchClass(node), + Pattern::MatchStar(node) => AnyNodeRef::PatternMatchStar(node), + Pattern::MatchAs(node) => AnyNodeRef::PatternMatchAs(node), + Pattern::MatchOr(node) => AnyNodeRef::PatternMatchOr(node), + } + } +} + +impl<'a> From<&'a Excepthandler> for AnyNodeRef<'a> { + fn from(handler: &'a Excepthandler) -> Self { + match handler { + Excepthandler::ExceptHandler(handler) => { + AnyNodeRef::ExcepthandlerExceptHandler(handler) + } + } + } +} + +impl<'a> From<&'a TypeIgnore> for AnyNodeRef<'a> { + fn from(ignore: &'a TypeIgnore) -> Self { + match ignore { + TypeIgnore::TypeIgnore(ignore) => AnyNodeRef::TypeIgnoreTypeIgnore(ignore), + } + } +} + +impl<'a> From<&'a Comprehension> for AnyNodeRef<'a> { + fn from(node: &'a Comprehension) -> Self { + AnyNodeRef::Comprehension(node) + } +} +impl<'a> From<&'a Arguments> for AnyNodeRef<'a> { + fn from(node: &'a Arguments) -> Self { + AnyNodeRef::Arguments(node) + } +} +impl<'a> From<&'a Arg> for AnyNodeRef<'a> { + fn from(node: &'a Arg) -> Self { + AnyNodeRef::Arg(node) + } +} +impl<'a> From<&'a Keyword> for AnyNodeRef<'a> { + fn from(node: &'a Keyword) -> Self { + AnyNodeRef::Keyword(node) + } +} +impl<'a> From<&'a Alias> for AnyNodeRef<'a> { + fn from(node: &'a Alias) -> Self { + AnyNodeRef::Alias(node) + } +} +impl<'a> From<&'a Withitem> for AnyNodeRef<'a> { + fn from(node: &'a Withitem) -> Self { + AnyNodeRef::Withitem(node) + } +} +impl<'a> From<&'a MatchCase> for AnyNodeRef<'a> { + fn from(node: &'a MatchCase) -> Self { + AnyNodeRef::MatchCase(node) + } +} + +impl Ranged for AnyNodeRef<'_> { + fn range(&self) -> TextRange { + match self { + AnyNodeRef::ModModule(node) => node.range(), + AnyNodeRef::ModInteractive(node) => node.range(), + AnyNodeRef::ModExpression(node) => node.range(), + AnyNodeRef::ModFunctionType(node) => node.range(), + AnyNodeRef::StmtFunctionDef(node) => node.range(), + AnyNodeRef::StmtAsyncFunctionDef(node) => node.range(), + AnyNodeRef::StmtClassDef(node) => node.range(), + AnyNodeRef::StmtReturn(node) => node.range(), + AnyNodeRef::StmtDelete(node) => node.range(), + AnyNodeRef::StmtAssign(node) => node.range(), + AnyNodeRef::StmtAugAssign(node) => node.range(), + AnyNodeRef::StmtAnnAssign(node) => node.range(), + AnyNodeRef::StmtFor(node) => node.range(), + AnyNodeRef::StmtAsyncFor(node) => node.range(), + AnyNodeRef::StmtWhile(node) => node.range(), + AnyNodeRef::StmtIf(node) => node.range(), + AnyNodeRef::StmtWith(node) => node.range(), + AnyNodeRef::StmtAsyncWith(node) => node.range(), + AnyNodeRef::StmtMatch(node) => node.range(), + AnyNodeRef::StmtRaise(node) => node.range(), + AnyNodeRef::StmtTry(node) => node.range(), + AnyNodeRef::StmtTryStar(node) => node.range(), + AnyNodeRef::StmtAssert(node) => node.range(), + AnyNodeRef::StmtImport(node) => node.range(), + AnyNodeRef::StmtImportFrom(node) => node.range(), + AnyNodeRef::StmtGlobal(node) => node.range(), + AnyNodeRef::StmtNonlocal(node) => node.range(), + AnyNodeRef::StmtExpr(node) => node.range(), + AnyNodeRef::StmtPass(node) => node.range(), + AnyNodeRef::StmtBreak(node) => node.range(), + AnyNodeRef::StmtContinue(node) => node.range(), + AnyNodeRef::ExprBoolOp(node) => node.range(), + AnyNodeRef::ExprNamedExpr(node) => node.range(), + AnyNodeRef::ExprBinOp(node) => node.range(), + AnyNodeRef::ExprUnaryOp(node) => node.range(), + AnyNodeRef::ExprLambda(node) => node.range(), + AnyNodeRef::ExprIfExp(node) => node.range(), + AnyNodeRef::ExprDict(node) => node.range(), + AnyNodeRef::ExprSet(node) => node.range(), + AnyNodeRef::ExprListComp(node) => node.range(), + AnyNodeRef::ExprSetComp(node) => node.range(), + AnyNodeRef::ExprDictComp(node) => node.range(), + AnyNodeRef::ExprGeneratorExp(node) => node.range(), + AnyNodeRef::ExprAwait(node) => node.range(), + AnyNodeRef::ExprYield(node) => node.range(), + AnyNodeRef::ExprYieldFrom(node) => node.range(), + AnyNodeRef::ExprCompare(node) => node.range(), + AnyNodeRef::ExprCall(node) => node.range(), + AnyNodeRef::ExprFormattedValue(node) => node.range(), + AnyNodeRef::ExprJoinedStr(node) => node.range(), + AnyNodeRef::ExprConstant(node) => node.range(), + AnyNodeRef::ExprAttribute(node) => node.range(), + AnyNodeRef::ExprSubscript(node) => node.range(), + AnyNodeRef::ExprStarred(node) => node.range(), + AnyNodeRef::ExprName(node) => node.range(), + AnyNodeRef::ExprList(node) => node.range(), + AnyNodeRef::ExprTuple(node) => node.range(), + AnyNodeRef::ExprSlice(node) => node.range(), + AnyNodeRef::ExcepthandlerExceptHandler(node) => node.range(), + AnyNodeRef::PatternMatchValue(node) => node.range(), + AnyNodeRef::PatternMatchSingleton(node) => node.range(), + AnyNodeRef::PatternMatchSequence(node) => node.range(), + AnyNodeRef::PatternMatchMapping(node) => node.range(), + AnyNodeRef::PatternMatchClass(node) => node.range(), + AnyNodeRef::PatternMatchStar(node) => node.range(), + AnyNodeRef::PatternMatchAs(node) => node.range(), + AnyNodeRef::PatternMatchOr(node) => node.range(), + AnyNodeRef::TypeIgnoreTypeIgnore(node) => node.range(), + AnyNodeRef::Comprehension(node) => node.range(), + AnyNodeRef::Arguments(node) => node.range(), + AnyNodeRef::Arg(node) => node.range(), + AnyNodeRef::Keyword(node) => node.range(), + AnyNodeRef::Alias(node) => node.range(), + AnyNodeRef::Withitem(node) => node.range(), + AnyNodeRef::MatchCase(node) => node.range(), + } + } +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum NodeKind { + ModModule, + ModInteractive, + ModExpression, + ModFunctionType, + StmtFunctionDef, + StmtAsyncFunctionDef, + StmtClassDef, + StmtReturn, + StmtDelete, + StmtAssign, + StmtAugAssign, + StmtAnnAssign, + StmtFor, + StmtAsyncFor, + StmtWhile, + StmtIf, + StmtWith, + StmtAsyncWith, + StmtMatch, + StmtRaise, + StmtTry, + StmtTryStar, + StmtAssert, + StmtImport, + StmtImportFrom, + StmtGlobal, + StmtNonlocal, + StmtExpr, + StmtPass, + StmtBreak, + StmtContinue, + ExprBoolOp, + ExprNamedExpr, + ExprBinOp, + ExprUnaryOp, + ExprLambda, + ExprIfExp, + ExprDict, + ExprSet, + ExprListComp, + ExprSetComp, + ExprDictComp, + ExprGeneratorExp, + ExprAwait, + ExprYield, + ExprYieldFrom, + ExprCompare, + ExprCall, + ExprFormattedValue, + ExprJoinedStr, + ExprConstant, + ExprAttribute, + ExprSubscript, + ExprStarred, + ExprName, + ExprList, + ExprTuple, + ExprSlice, + ExcepthandlerExceptHandler, + PatternMatchValue, + PatternMatchSingleton, + PatternMatchSequence, + PatternMatchMapping, + PatternMatchClass, + PatternMatchStar, + PatternMatchAs, + PatternMatchOr, + TypeIgnoreTypeIgnore, + Comprehension, + Arguments, + Arg, + Keyword, + Alias, + Withitem, + MatchCase, +} diff --git a/crates/ruff_python_ast/src/prelude.rs b/crates/ruff_python_ast/src/prelude.rs new file mode 100644 index 0000000000000..76505ecd015cc --- /dev/null +++ b/crates/ruff_python_ast/src/prelude.rs @@ -0,0 +1,2 @@ +pub use crate::node::AstNode; +pub use rustpython_ast::*; diff --git a/crates/ruff_python_ast/src/source_code/generator.rs b/crates/ruff_python_ast/src/source_code/generator.rs index f7806335ef74a..af36d7eaa549d 100644 --- a/crates/ruff_python_ast/src/source_code/generator.rs +++ b/crates/ruff_python_ast/src/source_code/generator.rs @@ -9,7 +9,6 @@ use rustpython_parser::ast::{ }; use crate::newlines::LineEnding; - use crate::source_code::stylist::{Indentation, Quote, Stylist}; mod precedence { @@ -1457,9 +1456,9 @@ impl<'a> Generator<'a> { #[cfg(test)] mod tests { - use crate::newlines::LineEnding; use rustpython_parser as parser; + use crate::newlines::LineEnding; use crate::source_code::stylist::{Indentation, Quote}; use crate::source_code::Generator; diff --git a/crates/ruff_python_ast/src/source_code/line_index.rs b/crates/ruff_python_ast/src/source_code/line_index.rs index 96916d35341cc..1096f1d1bf3f8 100644 --- a/crates/ruff_python_ast/src/source_code/line_index.rs +++ b/crates/ruff_python_ast/src/source_code/line_index.rs @@ -1,13 +1,15 @@ -use crate::source_code::SourceLocation; -use ruff_text_size::{TextLen, TextRange, TextSize}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::fmt; use std::fmt::{Debug, Formatter}; use std::num::NonZeroUsize; use std::ops::Deref; use std::sync::Arc; +use ruff_text_size::{TextLen, TextRange, TextSize}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use crate::source_code::SourceLocation; + /// Index for fast [byte offset](TextSize) to [`SourceLocation`] conversions. /// /// Cloning a [`LineIndex`] is cheap because it only requires bumping a reference count. @@ -312,9 +314,10 @@ const fn unwrap(option: Option) -> T { #[cfg(test)] mod tests { + use ruff_text_size::TextSize; + use crate::source_code::line_index::LineIndex; use crate::source_code::{OneIndexed, SourceLocation}; - use ruff_text_size::TextSize; #[test] fn ascii_index() { diff --git a/crates/ruff_python_ast/src/source_code/locator.rs b/crates/ruff_python_ast/src/source_code/locator.rs index 6a56e75584251..58dd92f23373f 100644 --- a/crates/ruff_python_ast/src/source_code/locator.rs +++ b/crates/ruff_python_ast/src/source_code/locator.rs @@ -1,11 +1,13 @@ //! Struct used to efficiently slice source code at (row, column) Locations. -use crate::newlines::find_newline; -use crate::source_code::{LineIndex, OneIndexed, SourceCode, SourceLocation}; +use std::ops::Add; + use memchr::{memchr2, memrchr2}; use once_cell::unsync::OnceCell; use ruff_text_size::{TextLen, TextRange, TextSize}; -use std::ops::Add; + +use crate::newlines::find_newline; +use crate::source_code::{LineIndex, OneIndexed, SourceCode, SourceLocation}; pub struct Locator<'a> { contents: &'a str, diff --git a/crates/ruff_python_ast/src/source_code/mod.rs b/crates/ruff_python_ast/src/source_code/mod.rs index 3a1764d3d0d9f..c4fdcc718f048 100644 --- a/crates/ruff_python_ast/src/source_code/mod.rs +++ b/crates/ruff_python_ast/src/source_code/mod.rs @@ -1,22 +1,26 @@ -mod generator; -mod indexer; -mod line_index; -mod locator; -mod stylist; +use std::cmp::Ordering; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; -pub use crate::source_code::line_index::{LineIndex, OneIndexed}; -pub use generator::Generator; -pub use indexer::Indexer; -pub use locator::Locator; use ruff_text_size::{TextRange, TextSize}; use rustpython_parser as parser; use rustpython_parser::{lexer, Mode, ParseError}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::fmt::{Debug, Formatter}; -use std::sync::Arc; + +pub use generator::Generator; +pub use indexer::Indexer; +pub use locator::Locator; pub use stylist::{Quote, Stylist}; +pub use crate::source_code::line_index::{LineIndex, OneIndexed}; + +mod generator; +mod indexer; +mod line_index; +mod locator; +mod stylist; + /// Run round-trip source code generation on a given Python code. pub fn round_trip(code: &str, source_path: &str) -> Result { let locator = Locator::new(code); @@ -204,6 +208,23 @@ impl SourceFile { } } +impl PartialOrd for SourceFile { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceFile { + fn cmp(&self, other: &Self) -> Ordering { + // Short circuit if these are the same source files + if Arc::ptr_eq(&self.inner, &other.inner) { + Ordering::Equal + } else { + self.inner.name.cmp(&other.inner.name) + } + } +} + struct SourceFileInner { name: Box, code: Box, diff --git a/crates/ruff_python_ast/src/source_code/stylist.rs b/crates/ruff_python_ast/src/source_code/stylist.rs index 5074beb1a5006..237b619b57e98 100644 --- a/crates/ruff_python_ast/src/source_code/stylist.rs +++ b/crates/ruff_python_ast/src/source_code/stylist.rs @@ -9,7 +9,6 @@ use rustpython_parser::lexer::LexResult; use rustpython_parser::Tok; use crate::newlines::{find_newline, LineEnding}; - use crate::source_code::Locator; use crate::str::leading_quote; @@ -164,10 +163,10 @@ impl Deref for Indentation { #[cfg(test)] mod tests { - use crate::newlines::{find_newline, LineEnding}; use rustpython_parser::lexer::lex; use rustpython_parser::Mode; + use crate::newlines::{find_newline, LineEnding}; use crate::source_code::stylist::{Indentation, Quote}; use crate::source_code::{Locator, Stylist}; diff --git a/crates/ruff_python_formatter/src/context.rs b/crates/ruff_python_formatter/src/context.rs index 36827dd6dea74..301dbff052873 100644 --- a/crates/ruff_python_formatter/src/context.rs +++ b/crates/ruff_python_formatter/src/context.rs @@ -1,36 +1,34 @@ -use std::rc::Rc; - -use ruff_formatter::{FormatContext, SimpleFormatOptions}; +use ruff_formatter::{FormatContext, SimpleFormatOptions, SourceCode}; use ruff_python_ast::source_code::Locator; -pub struct ASTFormatContext { +#[derive(Clone, Debug)] +pub struct ASTFormatContext<'source> { options: SimpleFormatOptions, - contents: Rc, + contents: &'source str, } -impl ASTFormatContext { - pub fn new(options: SimpleFormatOptions, contents: &str) -> Self { - Self { - options, - contents: Rc::from(contents), - } +impl<'source> ASTFormatContext<'source> { + pub fn new(options: SimpleFormatOptions, contents: &'source str) -> Self { + Self { options, contents } + } + + pub fn contents(&self) -> &'source str { + self.contents + } + + pub fn locator(&self) -> Locator<'source> { + Locator::new(self.contents) } } -impl FormatContext for ASTFormatContext { +impl FormatContext for ASTFormatContext<'_> { type Options = SimpleFormatOptions; fn options(&self) -> &Self::Options { &self.options } -} - -impl ASTFormatContext { - pub fn contents(&self) -> Rc { - self.contents.clone() - } - pub fn locator(&self) -> Locator { - Locator::new(&self.contents) + fn source_code(&self) -> SourceCode { + SourceCode::new(self.contents) } } diff --git a/crates/ruff_python_formatter/src/cst/mod.rs b/crates/ruff_python_formatter/src/cst/mod.rs index ef398673b368b..239629bdd0cfd 100644 --- a/crates/ruff_python_formatter/src/cst/mod.rs +++ b/crates/ruff_python_formatter/src/cst/mod.rs @@ -19,15 +19,15 @@ pub(crate) mod visitor; type Ident = String; #[derive(Clone, Debug, PartialEq)] -pub struct Attributed { - pub range: TextRange, - pub node: T, - pub trivia: Vec, - pub parentheses: Parenthesize, +pub(crate) struct Attributed { + pub(crate) range: TextRange, + pub(crate) node: T, + pub(crate) trivia: Vec, + pub(crate) parentheses: Parenthesize, } impl Attributed { - pub fn new(range: TextRange, node: T) -> Self { + pub(crate) fn new(range: TextRange, node: T) -> Self { Self { range, node, @@ -36,23 +36,19 @@ impl Attributed { } } - pub const fn range(&self) -> TextRange { + pub(crate) const fn range(&self) -> TextRange { self.range } - pub const fn start(&self) -> TextSize { + pub(crate) const fn start(&self) -> TextSize { self.range.start() } - pub const fn end(&self) -> TextSize { + pub(crate) const fn end(&self) -> TextSize { self.range.end() } - pub fn add_trivia(&mut self, trivia: Trivia) { - self.trivia.push(trivia); - } - - pub fn id(&self) -> usize { + pub(crate) fn id(&self) -> usize { std::ptr::addr_of!(self.node) as usize } } @@ -65,7 +61,7 @@ impl Deref for Attributed { } #[derive(Clone, Debug, PartialEq)] -pub enum ExprContext { +pub(crate) enum ExprContext { Load, Store, Del, @@ -82,7 +78,7 @@ impl From for ExprContext { } #[derive(Clone, Debug, PartialEq)] -pub enum BoolOpKind { +pub(crate) enum BoolOpKind { And, Or, } @@ -99,7 +95,7 @@ impl From<&ast::Boolop> for BoolOpKind { pub(crate) type BoolOp = Attributed; #[derive(Clone, Debug, PartialEq)] -pub enum OperatorKind { +pub(crate) enum OperatorKind { Add, Sub, Mult, @@ -138,7 +134,7 @@ impl From<&ast::Operator> for OperatorKind { } #[derive(Clone, Debug, PartialEq)] -pub enum UnaryOpKind { +pub(crate) enum UnaryOpKind { Invert, Not, UAdd, @@ -159,7 +155,7 @@ impl From<&ast::Unaryop> for UnaryOpKind { } #[derive(Clone, Debug, PartialEq)] -pub enum CmpOpKind { +pub(crate) enum CmpOpKind { Eq, NotEq, Lt, @@ -208,7 +204,7 @@ impl From<(Vec, &Locator<'_>)> for Body { } #[derive(Clone, Debug, PartialEq)] -pub enum StmtKind { +pub(crate) enum StmtKind { FunctionDef { name: Ident, args: Box, @@ -338,7 +334,7 @@ pub enum StmtKind { pub(crate) type Stmt = Attributed; #[derive(Clone, Debug, PartialEq)] -pub enum ExprKind { +pub(crate) enum ExprKind { BoolOp { ops: Vec, values: Vec, @@ -456,15 +452,15 @@ pub enum ExprKind { pub(crate) type Expr = Attributed; #[derive(Clone, Debug, PartialEq)] -pub struct Comprehension { - pub target: Expr, - pub iter: Expr, - pub ifs: Vec, - pub is_async: usize, +pub(crate) struct Comprehension { + pub(crate) target: Expr, + pub(crate) iter: Expr, + pub(crate) ifs: Vec, + pub(crate) is_async: usize, } #[derive(Clone, Debug, PartialEq)] -pub enum ExcepthandlerKind { +pub(crate) enum ExcepthandlerKind { ExceptHandler { type_: Option>, name: Option, @@ -475,7 +471,7 @@ pub enum ExcepthandlerKind { pub(crate) type Excepthandler = Attributed; #[derive(Clone, Debug, PartialEq)] -pub enum SliceIndexKind { +pub(crate) enum SliceIndexKind { /// The index slot exists, but is empty. Empty, /// The index slot contains an expression. @@ -485,57 +481,57 @@ pub enum SliceIndexKind { pub(crate) type SliceIndex = Attributed; #[derive(Clone, Debug, PartialEq)] -pub struct Arguments { - pub posonlyargs: Vec, - pub args: Vec, - pub vararg: Option>, - pub kwonlyargs: Vec, - pub kw_defaults: Vec, - pub kwarg: Option>, - pub defaults: Vec, +pub(crate) struct Arguments { + pub(crate) posonlyargs: Vec, + pub(crate) args: Vec, + pub(crate) vararg: Option>, + pub(crate) kwonlyargs: Vec, + pub(crate) kw_defaults: Vec, + pub(crate) kwarg: Option>, + pub(crate) defaults: Vec, } #[derive(Clone, Debug, PartialEq)] -pub struct ArgData { - pub arg: Ident, - pub annotation: Option>, - pub type_comment: Option, +pub(crate) struct ArgData { + pub(crate) arg: Ident, + pub(crate) annotation: Option>, + pub(crate) type_comment: Option, } pub(crate) type Arg = Attributed; #[derive(Clone, Debug, PartialEq)] -pub struct KeywordData { - pub arg: Option, - pub value: Expr, +pub(crate) struct KeywordData { + pub(crate) arg: Option, + pub(crate) value: Expr, } pub(crate) type Keyword = Attributed; #[derive(Clone, Debug, PartialEq)] -pub struct AliasData { - pub name: Ident, - pub asname: Option, +pub(crate) struct AliasData { + pub(crate) name: Ident, + pub(crate) asname: Option, } pub(crate) type Alias = Attributed; #[derive(Clone, Debug, PartialEq)] -pub struct Withitem { - pub context_expr: Expr, - pub optional_vars: Option>, +pub(crate) struct Withitem { + pub(crate) context_expr: Expr, + pub(crate) optional_vars: Option>, } #[derive(Clone, Debug, PartialEq)] -pub struct MatchCase { - pub pattern: Pattern, - pub guard: Option>, - pub body: Body, +pub(crate) struct MatchCase { + pub(crate) pattern: Pattern, + pub(crate) guard: Option>, + pub(crate) body: Body, } #[allow(clippy::enum_variant_names)] #[derive(Clone, Debug, PartialEq)] -pub enum PatternKind { +pub(crate) enum PatternKind { MatchValue { value: Box, }, @@ -953,12 +949,7 @@ impl From<(ast::Stmt, &Locator<'_>)> for Stmt { }; Stmt { - range: TextRange::new( - decorator_list - .first() - .map_or(range.start(), ast::Ranged::start), - body.end(), - ), + range: TextRange::new(range.start(), body.end()), node: StmtKind::FunctionDef { name: name.into(), args: Box::new((*args, locator).into()), @@ -999,12 +990,7 @@ impl From<(ast::Stmt, &Locator<'_>)> for Stmt { }; Stmt { - range: TextRange::new( - decorator_list - .first() - .map_or(range.start(), |expr| expr.range().start()), - body.end(), - ), + range: TextRange::new(range.start(), body.end()), node: StmtKind::AsyncFunctionDef { name: name.into(), args: Box::new((*args, locator).into()), diff --git a/crates/ruff_python_formatter/src/format/alias.rs b/crates/ruff_python_formatter/src/format/alias.rs index a12648f9b2a90..a6d3f1e189117 100644 --- a/crates/ruff_python_formatter/src/format/alias.rs +++ b/crates/ruff_python_formatter/src/format/alias.rs @@ -1,17 +1,14 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::Alias; use crate::format::comments::end_of_line_comments; -use crate::shared_traits::AsFormat; -pub struct FormatAlias<'a> { +pub(crate) struct FormatAlias<'a> { item: &'a Alias, } -impl AsFormat for Alias { +impl AsFormat> for Alias { type Format<'a> = FormatAlias<'a>; fn format(&self) -> Self::Format<'_> { @@ -19,14 +16,14 @@ impl AsFormat for Alias { } } -impl Format for FormatAlias<'_> { +impl Format> for FormatAlias<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let alias = self.item; - write!(f, [dynamic_text(&alias.name, TextSize::default())])?; + write!(f, [dynamic_text(&alias.name, None)])?; if let Some(asname) = &alias.asname { write!(f, [text(" as ")])?; - write!(f, [dynamic_text(asname, TextSize::default())])?; + write!(f, [dynamic_text(asname, None)])?; } write!(f, [end_of_line_comments(alias)])?; diff --git a/crates/ruff_python_formatter/src/format/arg.rs b/crates/ruff_python_formatter/src/format/arg.rs index 4279d345206da..f350a464dfa2e 100644 --- a/crates/ruff_python_formatter/src/format/arg.rs +++ b/crates/ruff_python_formatter/src/format/arg.rs @@ -1,17 +1,14 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::Arg; use crate::format::comments::end_of_line_comments; -use crate::shared_traits::AsFormat; -pub struct FormatArg<'a> { +pub(crate) struct FormatArg<'a> { item: &'a Arg, } -impl AsFormat for Arg { +impl AsFormat> for Arg { type Format<'a> = FormatArg<'a>; fn format(&self) -> Self::Format<'_> { @@ -19,11 +16,11 @@ impl AsFormat for Arg { } } -impl Format for FormatArg<'_> { +impl Format> for FormatArg<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let arg = self.item; - write!(f, [dynamic_text(&arg.arg, TextSize::default())])?; + write!(f, [dynamic_text(&arg.arg, None)])?; if let Some(annotation) = &arg.annotation { write!(f, [text(": ")])?; write!(f, [annotation.format()])?; diff --git a/crates/ruff_python_formatter/src/format/arguments.rs b/crates/ruff_python_formatter/src/format/arguments.rs index d5dd17100d27b..fe8dbc5e5a636 100644 --- a/crates/ruff_python_formatter/src/format/arguments.rs +++ b/crates/ruff_python_formatter/src/format/arguments.rs @@ -1,15 +1,13 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{format_args, write, Format}; -use crate::context::ASTFormatContext; use crate::cst::Arguments; -use crate::shared_traits::AsFormat; -pub struct FormatArguments<'a> { +pub(crate) struct FormatArguments<'a> { item: &'a Arguments, } -impl AsFormat for Arguments { +impl AsFormat> for Arguments { type Format<'a> = FormatArguments<'a>; fn format(&self) -> Self::Format<'_> { @@ -17,7 +15,7 @@ impl AsFormat for Arguments { } } -impl Format for FormatArguments<'_> { +impl Format> for FormatArguments<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let args = self.item; diff --git a/crates/ruff_python_formatter/src/format/bool_op.rs b/crates/ruff_python_formatter/src/format/bool_op.rs index e148c9c59c83f..4e278ce2c0a2a 100644 --- a/crates/ruff_python_formatter/src/format/bool_op.rs +++ b/crates/ruff_python_formatter/src/format/bool_op.rs @@ -1,16 +1,14 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::{BoolOp, BoolOpKind}; use crate::format::comments::{end_of_line_comments, leading_comments, trailing_comments}; -use crate::shared_traits::AsFormat; -pub struct FormatBoolOp<'a> { +pub(crate) struct FormatBoolOp<'a> { item: &'a BoolOp, } -impl AsFormat for BoolOp { +impl AsFormat> for BoolOp { type Format<'a> = FormatBoolOp<'a>; fn format(&self) -> Self::Format<'_> { @@ -18,7 +16,7 @@ impl AsFormat for BoolOp { } } -impl Format for FormatBoolOp<'_> { +impl Format> for FormatBoolOp<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let bool_op = self.item; write!(f, [leading_comments(bool_op)])?; diff --git a/crates/ruff_python_formatter/src/format/builders.rs b/crates/ruff_python_formatter/src/format/builders.rs index 3106eea705fc8..d22cf7bd278cf 100644 --- a/crates/ruff_python_formatter/src/format/builders.rs +++ b/crates/ruff_python_formatter/src/format/builders.rs @@ -1,10 +1,8 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{write, Format}; -use ruff_text_size::{TextRange, TextSize}; +use ruff_text_size::TextRange; -use crate::context::ASTFormatContext; use crate::cst::{Body, Stmt}; -use crate::shared_traits::AsFormat; use crate::trivia::{Relationship, TriviaKind}; #[derive(Copy, Clone)] @@ -12,7 +10,7 @@ pub(crate) struct Block<'a> { body: &'a Body, } -impl Format for Block<'_> { +impl Format> for Block<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { for (i, stmt) in self.body.iter().enumerate() { if i > 0 { @@ -28,7 +26,7 @@ impl Format for Block<'_> { write!(f, [empty_line()])?; } TriviaKind::OwnLineComment(range) => { - write!(f, [literal(range), hard_line_break()])?; + write!(f, [literal(range, ContainsNewlines::No), hard_line_break()])?; } _ => {} } @@ -49,7 +47,7 @@ pub(crate) struct Statements<'a> { suite: &'a [Stmt], } -impl Format for Statements<'_> { +impl Format> for Statements<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { for (i, stmt) in self.suite.iter().enumerate() { if i > 0 { @@ -70,20 +68,18 @@ pub(crate) struct Literal { range: TextRange, } -impl Format for Literal { +impl Format> for Literal { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { - let text = f.context().contents(); - - f.write_element(FormatElement::StaticTextSlice { - text, - range: self.range, - }) + source_text_slice(self.range, ContainsNewlines::Detect).fmt(f) } } #[inline] -pub(crate) const fn literal(range: TextRange) -> Literal { - Literal { range } +pub(crate) const fn literal( + range: TextRange, + newlines: ContainsNewlines, +) -> SourceTextSliceBuilder { + source_text_slice(range, newlines) } pub(crate) const fn join_names(names: &[String]) -> JoinNames { @@ -98,7 +94,7 @@ impl Format for JoinNames<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let mut join = f.join_with(text(", ")); for name in self.names { - join.entry(&dynamic_text(name, TextSize::default())); + join.entry(&dynamic_text(name, None)); } join.finish() } diff --git a/crates/ruff_python_formatter/src/format/cmp_op.rs b/crates/ruff_python_formatter/src/format/cmp_op.rs index da66c361c2d02..5b32e0a4be1fc 100644 --- a/crates/ruff_python_formatter/src/format/cmp_op.rs +++ b/crates/ruff_python_formatter/src/format/cmp_op.rs @@ -1,16 +1,14 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::{CmpOp, CmpOpKind}; use crate::format::comments::{end_of_line_comments, leading_comments, trailing_comments}; -use crate::shared_traits::AsFormat; -pub struct FormatCmpOp<'a> { +pub(crate) struct FormatCmpOp<'a> { item: &'a CmpOp, } -impl AsFormat for CmpOp { +impl AsFormat> for CmpOp { type Format<'a> = FormatCmpOp<'a>; fn format(&self) -> Self::Format<'_> { @@ -18,7 +16,7 @@ impl AsFormat for CmpOp { } } -impl Format for FormatCmpOp<'_> { +impl Format> for FormatCmpOp<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let cmp_op = self.item; write!(f, [leading_comments(cmp_op)])?; diff --git a/crates/ruff_python_formatter/src/format/comments.rs b/crates/ruff_python_formatter/src/format/comments.rs index 8aadf9a530fe7..b6d13f021d443 100644 --- a/crates/ruff_python_formatter/src/format/comments.rs +++ b/crates/ruff_python_formatter/src/format/comments.rs @@ -1,7 +1,6 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{write, Format}; -use crate::context::ASTFormatContext; use crate::cst::Attributed; use crate::format::builders::literal; use crate::trivia::TriviaKind; @@ -11,7 +10,7 @@ pub(crate) struct LeadingComments<'a, T> { item: &'a Attributed, } -impl Format for LeadingComments<'_, T> { +impl Format> for LeadingComments<'_, T> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { for trivia in &self.item.trivia { if trivia.relationship.is_leading() { @@ -20,7 +19,7 @@ impl Format for LeadingComments<'_, T> { write!(f, [empty_line()])?; } TriviaKind::OwnLineComment(range) => { - write!(f, [literal(range), hard_line_break()])?; + write!(f, [literal(range, ContainsNewlines::No), hard_line_break()])?; } _ => {} } @@ -40,7 +39,7 @@ pub(crate) struct TrailingComments<'a, T> { item: &'a Attributed, } -impl Format for TrailingComments<'_, T> { +impl Format> for TrailingComments<'_, T> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { for trivia in &self.item.trivia { if trivia.relationship.is_trailing() { @@ -49,7 +48,7 @@ impl Format for TrailingComments<'_, T> { write!(f, [empty_line()])?; } TriviaKind::OwnLineComment(range) => { - write!(f, [literal(range), hard_line_break()])?; + write!(f, [literal(range, ContainsNewlines::No), hard_line_break()])?; } _ => {} } @@ -69,7 +68,7 @@ pub(crate) struct EndOfLineComments<'a, T> { item: &'a Attributed, } -impl Format for EndOfLineComments<'_, T> { +impl Format> for EndOfLineComments<'_, T> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let mut first = true; for range in self @@ -81,7 +80,7 @@ impl Format for EndOfLineComments<'_, T> { if std::mem::take(&mut first) { write!(f, [line_suffix(&text(" "))])?; } - write!(f, [line_suffix(&literal(range))])?; + write!(f, [line_suffix(&literal(range, ContainsNewlines::No))])?; } Ok(()) } @@ -97,13 +96,13 @@ pub(crate) struct DanglingComments<'a, T> { item: &'a Attributed, } -impl Format for DanglingComments<'_, T> { +impl Format> for DanglingComments<'_, T> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { for trivia in &self.item.trivia { if trivia.relationship.is_dangling() { if let TriviaKind::OwnLineComment(range) = trivia.kind { write!(f, [hard_line_break()])?; - write!(f, [literal(range)])?; + write!(f, [literal(range, ContainsNewlines::No)])?; write!(f, [hard_line_break()])?; } } diff --git a/crates/ruff_python_formatter/src/format/comprehension.rs b/crates/ruff_python_formatter/src/format/comprehension.rs index 449aeba73ae14..ccf981b99400b 100644 --- a/crates/ruff_python_formatter/src/format/comprehension.rs +++ b/crates/ruff_python_formatter/src/format/comprehension.rs @@ -1,15 +1,13 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::Comprehension; -use crate::shared_traits::AsFormat; -pub struct FormatComprehension<'a> { +pub(crate) struct FormatComprehension<'a> { item: &'a Comprehension, } -impl AsFormat for Comprehension { +impl AsFormat> for Comprehension { type Format<'a> = FormatComprehension<'a>; fn format(&self) -> Self::Format<'_> { @@ -17,7 +15,7 @@ impl AsFormat for Comprehension { } } -impl Format for FormatComprehension<'_> { +impl Format> for FormatComprehension<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let comprehension = self.item; diff --git a/crates/ruff_python_formatter/src/format/excepthandler.rs b/crates/ruff_python_formatter/src/format/excepthandler.rs index 106ee54ff4fc4..2662540249899 100644 --- a/crates/ruff_python_formatter/src/format/excepthandler.rs +++ b/crates/ruff_python_formatter/src/format/excepthandler.rs @@ -1,18 +1,15 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::{Excepthandler, ExcepthandlerKind}; use crate::format::builders::block; use crate::format::comments::end_of_line_comments; -use crate::shared_traits::AsFormat; -pub struct FormatExcepthandler<'a> { +pub(crate) struct FormatExcepthandler<'a> { item: &'a Excepthandler, } -impl AsFormat for Excepthandler { +impl AsFormat> for Excepthandler { type Format<'a> = FormatExcepthandler<'a>; fn format(&self) -> Self::Format<'_> { @@ -20,7 +17,7 @@ impl AsFormat for Excepthandler { } } -impl Format for FormatExcepthandler<'_> { +impl Format> for FormatExcepthandler<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let excepthandler = self.item; let ExcepthandlerKind::ExceptHandler { type_, name, body } = &excepthandler.node; @@ -29,15 +26,7 @@ impl Format for FormatExcepthandler<'_> { if let Some(type_) = &type_ { write!(f, [space(), type_.format()])?; if let Some(name) = &name { - write!( - f, - [ - space(), - text("as"), - space(), - dynamic_text(name, TextSize::default()), - ] - )?; + write!(f, [space(), text("as"), space(), dynamic_text(name, None)])?; } } write!(f, [text(":")])?; diff --git a/crates/ruff_python_formatter/src/format/expr.rs b/crates/ruff_python_formatter/src/format/expr.rs index 5aa139b7bdf02..5a7ae3282412d 100644 --- a/crates/ruff_python_formatter/src/format/expr.rs +++ b/crates/ruff_python_formatter/src/format/expr.rs @@ -2,11 +2,9 @@ use rustpython_parser::ast::{Constant, ConversionFlag}; -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{format_args, write}; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::{ Arguments, BoolOp, CmpOp, Comprehension, Expr, ExprKind, Keyword, Operator, OperatorKind, SliceIndex, SliceIndexKind, UnaryOp, UnaryOpKind, @@ -16,10 +14,9 @@ use crate::format::comments::{dangling_comments, end_of_line_comments, leading_c use crate::format::helpers::{is_self_closing, is_simple_power, is_simple_slice}; use crate::format::numbers::{complex_literal, float_literal, int_literal}; use crate::format::strings::string_literal; -use crate::shared_traits::AsFormat; use crate::trivia::{Parenthesize, TriviaKind}; -pub struct FormatExpr<'a> { +pub(crate) struct FormatExpr<'a> { item: &'a Expr, } @@ -34,7 +31,7 @@ fn format_starred( } fn format_name(f: &mut Formatter, expr: &Expr, _id: &str) -> FormatResult<()> { - write!(f, [literal(expr.range())])?; + write!(f, [literal(expr.range(), ContainsNewlines::No)])?; write!(f, [end_of_line_comments(expr)])?; Ok(()) } @@ -58,7 +55,7 @@ fn format_subscript( if let TriviaKind::OwnLineComment(range) = trivia.kind { write!(f, [expand_parent()])?; write!(f, [hard_line_break()])?; - write!(f, [literal(range)])?; + write!(f, [literal(range, ContainsNewlines::No)])?; } } } @@ -574,7 +571,7 @@ fn format_joined_str( expr: &Expr, _values: &[Expr], ) -> FormatResult<()> { - write!(f, [literal(expr.range())])?; + write!(f, [literal(expr.range(), ContainsNewlines::Detect)])?; write!(f, [end_of_line_comments(expr)])?; Ok(()) } @@ -676,7 +673,7 @@ fn format_attribute( ) -> FormatResult<()> { write!(f, [value.format()])?; write!(f, [text(".")])?; - write!(f, [dynamic_text(attr, TextSize::default())])?; + write!(f, [dynamic_text(attr, None)])?; write!(f, [end_of_line_comments(expr)])?; Ok(()) } @@ -801,7 +798,7 @@ fn format_if_exp( Ok(()) } -impl Format for FormatExpr<'_> { +impl Format> for FormatExpr<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { if self.item.parentheses.is_always() { write!(f, [text("(")])?; @@ -872,7 +869,7 @@ impl Format for FormatExpr<'_> { if trivia.relationship.is_trailing() { if let TriviaKind::OwnLineComment(range) = trivia.kind { write!(f, [expand_parent()])?; - write!(f, [literal(range)])?; + write!(f, [literal(range, ContainsNewlines::No)])?; write!(f, [hard_line_break()])?; } } @@ -886,7 +883,7 @@ impl Format for FormatExpr<'_> { } } -impl AsFormat for Expr { +impl AsFormat> for Expr { type Format<'a> = FormatExpr<'a>; fn format(&self) -> Self::Format<'_> { diff --git a/crates/ruff_python_formatter/src/format/keyword.rs b/crates/ruff_python_formatter/src/format/keyword.rs index d89cea21ff680..c277b7de57c38 100644 --- a/crates/ruff_python_formatter/src/format/keyword.rs +++ b/crates/ruff_python_formatter/src/format/keyword.rs @@ -1,17 +1,14 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::Keyword; use crate::format::comments::{end_of_line_comments, leading_comments, trailing_comments}; -use crate::shared_traits::AsFormat; -pub struct FormatKeyword<'a> { +pub(crate) struct FormatKeyword<'a> { item: &'a Keyword, } -impl AsFormat for Keyword { +impl AsFormat> for Keyword { type Format<'a> = FormatKeyword<'a>; fn format(&self) -> Self::Format<'_> { @@ -19,13 +16,13 @@ impl AsFormat for Keyword { } } -impl Format for FormatKeyword<'_> { +impl Format> for FormatKeyword<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let keyword = self.item; write!(f, [leading_comments(keyword)])?; if let Some(arg) = &keyword.arg { - write!(f, [dynamic_text(arg, TextSize::default())])?; + write!(f, [dynamic_text(arg, None)])?; write!(f, [text("=")])?; write!(f, [keyword.value.format()])?; } else { diff --git a/crates/ruff_python_formatter/src/format/match_case.rs b/crates/ruff_python_formatter/src/format/match_case.rs index 858d62302cd19..d71ac3c919620 100644 --- a/crates/ruff_python_formatter/src/format/match_case.rs +++ b/crates/ruff_python_formatter/src/format/match_case.rs @@ -1,17 +1,15 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::MatchCase; use crate::format::builders::block; use crate::format::comments::{end_of_line_comments, leading_comments}; -use crate::shared_traits::AsFormat; -pub struct FormatMatchCase<'a> { +pub(crate) struct FormatMatchCase<'a> { item: &'a MatchCase, } -impl AsFormat for MatchCase { +impl AsFormat> for MatchCase { type Format<'a> = FormatMatchCase<'a>; fn format(&self) -> Self::Format<'_> { @@ -19,7 +17,7 @@ impl AsFormat for MatchCase { } } -impl Format for FormatMatchCase<'_> { +impl Format> for FormatMatchCase<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let MatchCase { pattern, diff --git a/crates/ruff_python_formatter/src/format/numbers.rs b/crates/ruff_python_formatter/src/format/numbers.rs index 2b73e4a1dd1e3..dde5a1c5371d7 100644 --- a/crates/ruff_python_formatter/src/format/numbers.rs +++ b/crates/ruff_python_formatter/src/format/numbers.rs @@ -1,10 +1,9 @@ use std::ops::{Add, Sub}; -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{write, Format}; use ruff_text_size::{TextRange, TextSize}; -use crate::context::ASTFormatContext; use crate::format::builders::literal; #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -12,7 +11,7 @@ struct FloatAtom { range: TextRange, } -impl Format for FloatAtom { +impl Format> for FloatAtom { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let contents = f.context().contents(); @@ -26,12 +25,15 @@ impl Format for FloatAtom { } else { write!( f, - [literal(TextRange::new( - self.range.start(), - self.range - .start() - .add(TextSize::try_from(dot_index).unwrap()) - ))] + [literal( + TextRange::new( + self.range.start(), + self.range + .start() + .add(TextSize::try_from(dot_index).unwrap()) + ), + ContainsNewlines::No + )] )?; } @@ -42,16 +44,19 @@ impl Format for FloatAtom { } else { write!( f, - [literal(TextRange::new( - self.range - .start() - .add(TextSize::try_from(dot_index + 1).unwrap()), - self.range.end() - ))] + [literal( + TextRange::new( + self.range + .start() + .add(TextSize::try_from(dot_index + 1).unwrap()), + self.range.end() + ), + ContainsNewlines::No + )] )?; } } else { - write!(f, [literal(self.range)])?; + write!(f, [literal(self.range, ContainsNewlines::No)])?; } Ok(()) @@ -68,7 +73,7 @@ pub(crate) struct FloatLiteral { range: TextRange, } -impl Format for FloatLiteral { +impl Format> for FloatLiteral { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let contents = f.context().contents(); @@ -93,12 +98,15 @@ impl Format for FloatLiteral { let plus = content[exponent_index + 1..].starts_with('+'); write!( f, - [literal(TextRange::new( - self.range - .start() - .add(TextSize::try_from(exponent_index + 1 + usize::from(plus)).unwrap()), - self.range.end() - ))] + [literal( + TextRange::new( + self.range.start().add( + TextSize::try_from(exponent_index + 1 + usize::from(plus)).unwrap() + ), + self.range.end() + ), + ContainsNewlines::No + )] )?; } else { write!(f, [float_atom(self.range)])?; @@ -118,7 +126,7 @@ pub(crate) struct IntLiteral { range: TextRange, } -impl Format for IntLiteral { +impl Format> for IntLiteral { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let contents = f.context().contents(); @@ -136,20 +144,20 @@ impl Format for IntLiteral { write!( f, [ - dynamic_text(&prefix.to_lowercase(), TextSize::default()), - dynamic_text(&suffix.to_uppercase(), TextSize::default()) + dynamic_text(&prefix.to_lowercase(), None), + dynamic_text(&suffix.to_uppercase(), None) ] )?; } else { // Use the existing source. - write!(f, [literal(self.range)])?; + write!(f, [literal(self.range, ContainsNewlines::No)])?; } return Ok(()); } } - write!(f, [literal(self.range)])?; + write!(f, [literal(self.range, ContainsNewlines::No)])?; Ok(()) } @@ -165,20 +173,20 @@ pub(crate) struct ComplexLiteral { range: TextRange, } -impl Format for ComplexLiteral { +impl Format> for ComplexLiteral { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let contents = f.context().contents(); let content = &contents[self.range]; if content.ends_with('j') { - write!(f, [literal(self.range)])?; + write!(f, [literal(self.range, ContainsNewlines::No)])?; } else if content.ends_with('J') { write!( f, - [literal(TextRange::new( - self.range.start(), - self.range.end().sub(TextSize::from(1)) - ))] + [literal( + TextRange::new(self.range.start(), self.range.end().sub(TextSize::from(1))), + ContainsNewlines::No + )] )?; write!(f, [text("j")])?; } else { diff --git a/crates/ruff_python_formatter/src/format/operator.rs b/crates/ruff_python_formatter/src/format/operator.rs index baede7c6ccce8..360cea69cbe45 100644 --- a/crates/ruff_python_formatter/src/format/operator.rs +++ b/crates/ruff_python_formatter/src/format/operator.rs @@ -1,16 +1,14 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::{Operator, OperatorKind}; use crate::format::comments::{end_of_line_comments, leading_comments, trailing_comments}; -use crate::shared_traits::AsFormat; -pub struct FormatOperator<'a> { +pub(crate) struct FormatOperator<'a> { item: &'a Operator, } -impl AsFormat for Operator { +impl AsFormat> for Operator { type Format<'a> = FormatOperator<'a>; fn format(&self) -> Self::Format<'_> { @@ -18,7 +16,7 @@ impl AsFormat for Operator { } } -impl Format for FormatOperator<'_> { +impl Format> for FormatOperator<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let operator = self.item; write!(f, [leading_comments(operator)])?; diff --git a/crates/ruff_python_formatter/src/format/pattern.rs b/crates/ruff_python_formatter/src/format/pattern.rs index b6845cbe174c2..565312c92e9d2 100644 --- a/crates/ruff_python_formatter/src/format/pattern.rs +++ b/crates/ruff_python_formatter/src/format/pattern.rs @@ -1,18 +1,15 @@ use rustpython_parser::ast::Constant; -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::{Pattern, PatternKind}; -use crate::shared_traits::AsFormat; -pub struct FormatPattern<'a> { +pub(crate) struct FormatPattern<'a> { item: &'a Pattern, } -impl AsFormat for Pattern { +impl AsFormat> for Pattern { type Format<'a> = FormatPattern<'a>; fn format(&self) -> Self::Format<'_> { @@ -20,7 +17,7 @@ impl AsFormat for Pattern { } } -impl Format for FormatPattern<'_> { +impl Format> for FormatPattern<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let pattern = self.item; @@ -79,7 +76,7 @@ impl Format for FormatPattern<'_> { space(), text("**"), space(), - dynamic_text(rest, TextSize::default()) + dynamic_text(rest, None) ] )?; } @@ -105,13 +102,10 @@ impl Format for FormatPattern<'_> { if !kwd_attrs.is_empty() { write!(f, [text("(")])?; if let Some(attr) = kwd_attrs.first() { - write!(f, [dynamic_text(attr, TextSize::default())])?; + write!(f, [dynamic_text(attr, None)])?; } for attr in kwd_attrs.iter().skip(1) { - write!( - f, - [text(","), space(), dynamic_text(attr, TextSize::default())] - )?; + write!(f, [text(","), space(), dynamic_text(attr, None)])?; } write!(f, [text(")")])?; } @@ -128,7 +122,7 @@ impl Format for FormatPattern<'_> { } PatternKind::MatchStar { name } => { if let Some(name) = name { - write!(f, [text("*"), dynamic_text(name, TextSize::default())])?; + write!(f, [text("*"), dynamic_text(name, None)])?; } else { write!(f, [text("*_")])?; } @@ -141,7 +135,7 @@ impl Format for FormatPattern<'_> { write!(f, [space()])?; } if let Some(name) = name { - write!(f, [dynamic_text(name, TextSize::default())])?; + write!(f, [dynamic_text(name, None)])?; } else { write!(f, [text("_")])?; } diff --git a/crates/ruff_python_formatter/src/format/stmt.rs b/crates/ruff_python_formatter/src/format/stmt.rs index 5eb7036d80a31..0e76023b01b59 100644 --- a/crates/ruff_python_formatter/src/format/stmt.rs +++ b/crates/ruff_python_formatter/src/format/stmt.rs @@ -1,10 +1,8 @@ #![allow(unused_variables, clippy::too_many_arguments)] -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{format_args, write}; -use ruff_text_size::TextSize; -use crate::context::ASTFormatContext; use crate::cst::{ Alias, Arguments, Body, Excepthandler, Expr, ExprKind, Keyword, MatchCase, Operator, Stmt, StmtKind, Withitem, @@ -12,7 +10,6 @@ use crate::cst::{ use crate::format::builders::{block, join_names}; use crate::format::comments::{end_of_line_comments, leading_comments, trailing_comments}; use crate::format::helpers::is_self_closing; -use crate::shared_traits::AsFormat; fn format_break(f: &mut Formatter, stmt: &Stmt) -> FormatResult<()> { write!(f, [text("break")])?; @@ -110,14 +107,7 @@ fn format_class_def( write!(f, [leading_comments(body)])?; - write!( - f, - [ - text("class"), - space(), - dynamic_text(name, TextSize::default()) - ] - )?; + write!(f, [text("class"), space(), dynamic_text(name, None)])?; if !bases.is_empty() || !keywords.is_empty() { let format_bases = format_with(|f| { @@ -180,7 +170,7 @@ fn format_func_def( [ text("def"), space(), - dynamic_text(name, TextSize::default()), + dynamic_text(name, None), text("("), group(&soft_block_indent(&format_with(|f| { if stmt.trivia.iter().any(|c| c.kind.is_magic_trailing_comma()) { @@ -653,7 +643,7 @@ fn format_import_from( } } if let Some(module) = module { - write!(f, [dynamic_text(module, TextSize::default())])?; + write!(f, [dynamic_text(module, None)])?; } write!(f, [space()])?; @@ -760,11 +750,11 @@ fn format_with_( Ok(()) } -pub struct FormatStmt<'a> { +pub(crate) struct FormatStmt<'a> { item: &'a Stmt, } -impl Format for FormatStmt<'_> { +impl Format> for FormatStmt<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { write!(f, [leading_comments(self.item)])?; @@ -947,7 +937,7 @@ impl Format for FormatStmt<'_> { } } -impl AsFormat for Stmt { +impl AsFormat> for Stmt { type Format<'a> = FormatStmt<'a>; fn format(&self) -> Self::Format<'_> { diff --git a/crates/ruff_python_formatter/src/format/strings.rs b/crates/ruff_python_formatter/src/format/strings.rs index 40c3562d43937..2046608295807 100644 --- a/crates/ruff_python_formatter/src/format/strings.rs +++ b/crates/ruff_python_formatter/src/format/strings.rs @@ -1,11 +1,10 @@ use rustpython_parser::{Mode, Tok}; -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::{write, Format}; use ruff_python_ast::str::{leading_quote, trailing_quote}; -use ruff_text_size::{TextRange, TextSize}; +use ruff_text_size::TextRange; -use crate::context::ASTFormatContext; use crate::cst::Expr; #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -13,7 +12,7 @@ pub(crate) struct StringLiteralPart { range: TextRange, } -impl Format for StringLiteralPart { +impl Format> for StringLiteralPart { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let contents = f.context().contents(); @@ -64,7 +63,6 @@ impl Format for StringLiteralPart { } else { double_escape(body).into() }, - source_position: TextSize::default(), })?; f.write_element(FormatElement::StaticText { text: "\"" })?; Ok(()) @@ -76,7 +74,6 @@ impl Format for StringLiteralPart { } else { single_escape(body).into() }, - source_position: TextSize::default(), })?; f.write_element(FormatElement::StaticText { text: "'" })?; Ok(()) @@ -90,7 +87,6 @@ impl Format for StringLiteralPart { f.write_element(FormatElement::StaticText { text: "'''" })?; f.write_element(FormatElement::DynamicText { text: body.to_string().into_boxed_str(), - source_position: TextSize::default(), })?; f.write_element(FormatElement::StaticText { text: "'''" })?; Ok(()) @@ -98,7 +94,6 @@ impl Format for StringLiteralPart { f.write_element(FormatElement::StaticText { text: "\"\"\"" })?; f.write_element(FormatElement::DynamicText { text: body.to_string().into_boxed_str(), - source_position: TextSize::default(), })?; f.write_element(FormatElement::StaticText { text: "\"\"\"" })?; Ok(()) @@ -119,7 +114,7 @@ pub(crate) struct StringLiteral<'a> { expr: &'a Expr, } -impl Format for StringLiteral<'_> { +impl Format> for StringLiteral<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let expr = self.expr; diff --git a/crates/ruff_python_formatter/src/format/unary_op.rs b/crates/ruff_python_formatter/src/format/unary_op.rs index 39a16f52b5ef7..b16220fee918e 100644 --- a/crates/ruff_python_formatter/src/format/unary_op.rs +++ b/crates/ruff_python_formatter/src/format/unary_op.rs @@ -1,15 +1,13 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::{UnaryOp, UnaryOpKind}; -use crate::shared_traits::AsFormat; -pub struct FormatUnaryOp<'a> { +pub(crate) struct FormatUnaryOp<'a> { item: &'a UnaryOp, } -impl AsFormat for UnaryOp { +impl AsFormat> for UnaryOp { type Format<'a> = FormatUnaryOp<'a>; fn format(&self) -> Self::Format<'_> { @@ -17,7 +15,7 @@ impl AsFormat for UnaryOp { } } -impl Format for FormatUnaryOp<'_> { +impl Format> for FormatUnaryOp<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let unary_op = self.item; write!( diff --git a/crates/ruff_python_formatter/src/format/withitem.rs b/crates/ruff_python_formatter/src/format/withitem.rs index 0a4b2e3852321..6b48dcf6e0d3e 100644 --- a/crates/ruff_python_formatter/src/format/withitem.rs +++ b/crates/ruff_python_formatter/src/format/withitem.rs @@ -1,15 +1,13 @@ -use ruff_formatter::prelude::*; +use crate::prelude::*; use ruff_formatter::write; -use crate::context::ASTFormatContext; use crate::cst::Withitem; -use crate::shared_traits::AsFormat; -pub struct FormatWithitem<'a> { +pub(crate) struct FormatWithitem<'a> { item: &'a Withitem, } -impl AsFormat for Withitem { +impl AsFormat> for Withitem { type Format<'a> = FormatWithitem<'a>; fn format(&self) -> Self::Format<'_> { @@ -17,7 +15,7 @@ impl AsFormat for Withitem { } } -impl Format for FormatWithitem<'_> { +impl Format> for FormatWithitem<'_> { fn fmt(&self, f: &mut Formatter) -> FormatResult<()> { let withitem = self.item; diff --git a/crates/ruff_python_formatter/src/lib.rs b/crates/ruff_python_formatter/src/lib.rs index 552f839a338ce..90ee7eb27e8d1 100644 --- a/crates/ruff_python_formatter/src/lib.rs +++ b/crates/ruff_python_formatter/src/lib.rs @@ -17,8 +17,10 @@ mod cst; mod format; mod newlines; mod parentheses; -pub mod shared_traits; -pub mod trivia; +mod prelude; +mod trivia; + +include!("../../ruff_formatter/shared_traits.rs"); pub fn fmt(contents: &str) -> Result> { // Create a reusable locator. @@ -67,7 +69,6 @@ mod tests { use insta::assert_snapshot; use ruff_testing_macros::fixture; - use ruff_text_size::TextSize; use similar::TextDiff; use crate::fmt; @@ -180,7 +181,7 @@ mod tests { #[test] fn string_processing() { - use ruff_formatter::prelude::*; + use crate::prelude::*; use ruff_formatter::{format, format_args, write}; struct FormatString<'a>(&'a str); @@ -208,7 +209,7 @@ mod tests { while let Some(word) = words.next() { let is_last = words.peek().is_none(); let format_word = format_with(|f| { - write!(f, [dynamic_text(word, TextSize::default())])?; + write!(f, [dynamic_text(word, None)])?; if is_last { write!(f, [text("\"")])?; diff --git a/crates/ruff_python_formatter/src/prelude.rs b/crates/ruff_python_formatter/src/prelude.rs new file mode 100644 index 0000000000000..01089927b27ca --- /dev/null +++ b/crates/ruff_python_formatter/src/prelude.rs @@ -0,0 +1,3 @@ +#[allow(unused_imports)] +pub(crate) use crate::{ASTFormatContext, AsFormat, FormattedIterExt as _, IntoFormat}; +pub(crate) use ruff_formatter::prelude::*; diff --git a/crates/ruff_python_formatter/src/trivia.rs b/crates/ruff_python_formatter/src/trivia.rs index 8bc00de5bd79a..cab42fd7b6d70 100644 --- a/crates/ruff_python_formatter/src/trivia.rs +++ b/crates/ruff_python_formatter/src/trivia.rs @@ -9,7 +9,7 @@ use crate::cst::{ }; #[derive(Clone, Copy, Debug)] -pub enum Node<'a> { +pub(crate) enum Node<'a> { Alias(&'a Alias), Arg(&'a Arg), Body(&'a Body), @@ -27,7 +27,7 @@ pub enum Node<'a> { } impl Node<'_> { - pub fn id(&self) -> usize { + pub(crate) fn id(&self) -> usize { match self { Node::Alias(node) => node.id(), Node::Arg(node) => node.id(), @@ -46,7 +46,7 @@ impl Node<'_> { } } - pub fn start(&self) -> TextSize { + pub(crate) fn start(&self) -> TextSize { match self { Node::Alias(node) => node.start(), Node::Arg(node) => node.start(), @@ -65,7 +65,7 @@ impl Node<'_> { } } - pub fn end(&self) -> TextSize { + pub(crate) fn end(&self) -> TextSize { match self { Node::Alias(node) => node.end(), Node::Arg(node) => node.end(), @@ -86,7 +86,7 @@ impl Node<'_> { } #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum TriviaTokenKind { +pub(crate) enum TriviaTokenKind { OwnLineComment, EndOfLineComment, MagicTrailingComma, @@ -95,23 +95,23 @@ pub enum TriviaTokenKind { } #[derive(Clone, Debug, PartialEq, Eq)] -pub struct TriviaToken { - pub range: TextRange, - pub kind: TriviaTokenKind, +pub(crate) struct TriviaToken { + pub(crate) range: TextRange, + pub(crate) kind: TriviaTokenKind, } impl TriviaToken { - pub const fn start(&self) -> TextSize { + pub(crate) const fn start(&self) -> TextSize { self.range.start() } - pub const fn end(&self) -> TextSize { + pub(crate) const fn end(&self) -> TextSize { self.range.end() } } #[derive(Clone, Copy, Debug, PartialEq, Eq, is_macro::Is)] -pub enum TriviaKind { +pub(crate) enum TriviaKind { /// A Comment that is separated by at least one line break from the /// preceding token. /// @@ -140,14 +140,14 @@ pub enum TriviaKind { } #[derive(Clone, Copy, Debug, PartialEq, Eq, is_macro::Is)] -pub enum Relationship { +pub(crate) enum Relationship { Leading, Trailing, Dangling, } #[derive(Clone, Copy, Debug, PartialEq, Eq, is_macro::Is)] -pub enum Parenthesize { +pub(crate) enum Parenthesize { /// Always parenthesize the statement or expression. Always, /// Never parenthesize the statement or expression. @@ -157,13 +157,13 @@ pub enum Parenthesize { } #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Trivia { - pub kind: TriviaKind, - pub relationship: Relationship, +pub(crate) struct Trivia { + pub(crate) kind: TriviaKind, + pub(crate) relationship: Relationship, } impl Trivia { - pub fn from_token(token: &TriviaToken, relationship: Relationship) -> Self { + pub(crate) fn from_token(token: &TriviaToken, relationship: Relationship) -> Self { match token.kind { TriviaTokenKind::MagicTrailingComma => Self { kind: TriviaKind::MagicTrailingComma, @@ -189,7 +189,7 @@ impl Trivia { } } -pub fn extract_trivia_tokens(lxr: &[LexResult]) -> Vec { +pub(crate) fn extract_trivia_tokens(lxr: &[LexResult]) -> Vec { let mut tokens = vec![]; let mut prev_tok: Option<(&Tok, TextRange)> = None; let mut prev_semantic_tok: Option<(&Tok, TextRange)> = None; @@ -731,14 +731,14 @@ fn sorted_child_nodes_inner<'a>(node: Node<'a>, result: &mut Vec>) { } } -pub fn sorted_child_nodes(node: Node) -> Vec { +pub(crate) fn sorted_child_nodes(node: Node) -> Vec { let mut result = Vec::new(); sorted_child_nodes_inner(node, &mut result); result } -pub fn decorate_token<'a>( +pub(crate) fn decorate_token<'a>( token: &TriviaToken, node: Node<'a>, enclosing_node: Option>, @@ -818,20 +818,20 @@ pub fn decorate_token<'a>( } #[derive(Debug, Default)] -pub struct TriviaIndex { - pub alias: FxHashMap>, - pub arg: FxHashMap>, - pub body: FxHashMap>, - pub bool_op: FxHashMap>, - pub cmp_op: FxHashMap>, - pub excepthandler: FxHashMap>, - pub expr: FxHashMap>, - pub keyword: FxHashMap>, - pub operator: FxHashMap>, - pub pattern: FxHashMap>, - pub slice_index: FxHashMap>, - pub stmt: FxHashMap>, - pub unary_op: FxHashMap>, +pub(crate) struct TriviaIndex { + pub(crate) alias: FxHashMap>, + pub(crate) arg: FxHashMap>, + pub(crate) body: FxHashMap>, + pub(crate) bool_op: FxHashMap>, + pub(crate) cmp_op: FxHashMap>, + pub(crate) excepthandler: FxHashMap>, + pub(crate) expr: FxHashMap>, + pub(crate) keyword: FxHashMap>, + pub(crate) operator: FxHashMap>, + pub(crate) pattern: FxHashMap>, + pub(crate) slice_index: FxHashMap>, + pub(crate) stmt: FxHashMap>, + pub(crate) unary_op: FxHashMap>, } fn add_comment(comment: Trivia, node: &Node, trivia: &mut TriviaIndex) { @@ -931,7 +931,7 @@ fn add_comment(comment: Trivia, node: &Node, trivia: &mut TriviaIndex) { } } -pub fn decorate_trivia(tokens: Vec, python_ast: &[Stmt]) -> TriviaIndex { +pub(crate) fn decorate_trivia(tokens: Vec, python_ast: &[Stmt]) -> TriviaIndex { let mut stack = vec![]; let mut cache = FxHashMap::default(); for token in &tokens { diff --git a/crates/ruff_python_semantic/Cargo.toml b/crates/ruff_python_semantic/Cargo.toml index 87ac6230c070c..d0fa45debd163 100644 --- a/crates/ruff_python_semantic/Cargo.toml +++ b/crates/ruff_python_semantic/Cargo.toml @@ -11,6 +11,7 @@ rust-version = { workspace = true } ruff_python_ast = { path = "../ruff_python_ast" } ruff_python_stdlib = { path = "../ruff_python_stdlib" } ruff_text_size = { workspace = true } +ruff_index = { path = "../ruff_index" } bitflags = { workspace = true } is-macro = { workspace = true } diff --git a/crates/ruff_python_semantic/src/analyze/function_type.rs b/crates/ruff_python_semantic/src/analyze/function_type.rs index 1df2f34a07f62..8d90e1edd7b6b 100644 --- a/crates/ruff_python_semantic/src/analyze/function_type.rs +++ b/crates/ruff_python_semantic/src/analyze/function_type.rs @@ -3,7 +3,7 @@ use rustpython_parser::ast::Expr; use ruff_python_ast::call_path::from_qualified_name; use ruff_python_ast::helpers::map_callable; -use crate::context::Context; +use crate::model::SemanticModel; use crate::scope::{Scope, ScopeKind}; const CLASS_METHODS: [&str; 3] = ["__new__", "__init_subclass__", "__class_getitem__"]; @@ -19,7 +19,7 @@ pub enum FunctionType { /// Classify a function based on its scope, name, and decorators. pub fn classify( - ctx: &Context, + model: &SemanticModel, scope: &Scope, name: &str, decorator_list: &[Expr], @@ -32,7 +32,8 @@ pub fn classify( if decorator_list.iter().any(|expr| { // The method is decorated with a static method decorator (like // `@staticmethod`). - ctx.resolve_call_path(map_callable(expr)) + model + .resolve_call_path(map_callable(expr)) .map_or(false, |call_path| { call_path.as_slice() == ["", "staticmethod"] || staticmethod_decorators @@ -45,7 +46,7 @@ pub fn classify( // Special-case class method, like `__new__`. || scope.bases.iter().any(|expr| { // The class itself extends a known metaclass, so all methods are class methods. - ctx.resolve_call_path(map_callable(expr)).map_or(false, |call_path| { + model.resolve_call_path(map_callable(expr)).map_or(false, |call_path| { METACLASS_BASES .iter() .any(|(module, member)| call_path.as_slice() == [*module, *member]) @@ -53,7 +54,7 @@ pub fn classify( }) || decorator_list.iter().any(|expr| { // The method is decorated with a class method decorator (like `@classmethod`). - ctx.resolve_call_path(map_callable(expr)).map_or(false, |call_path| { + model.resolve_call_path(map_callable(expr)).map_or(false, |call_path| { call_path.as_slice() == ["", "classmethod"] || classmethod_decorators .iter() diff --git a/crates/ruff_python_semantic/src/analyze/logging.rs b/crates/ruff_python_semantic/src/analyze/logging.rs index 036c8afb23e63..d16ed0425de80 100644 --- a/crates/ruff_python_semantic/src/analyze/logging.rs +++ b/crates/ruff_python_semantic/src/analyze/logging.rs @@ -2,7 +2,7 @@ use rustpython_parser::ast::{self, Expr}; use ruff_python_ast::call_path::collect_call_path; -use crate::context::Context; +use crate::model::SemanticModel; /// Return `true` if the given `Expr` is a potential logging call. Matches /// `logging.error`, `logger.error`, `self.logger.error`, etc., but not @@ -16,9 +16,9 @@ use crate::context::Context; /// # This is detected to be a logger candidate /// bar.error() /// ``` -pub fn is_logger_candidate(context: &Context, func: &Expr) -> bool { +pub fn is_logger_candidate(func: &Expr, model: &SemanticModel) -> bool { if let Expr::Attribute(ast::ExprAttribute { value, .. }) = func { - let Some(call_path) = (if let Some(call_path) = context.resolve_call_path(value) { + let Some(call_path) = (if let Some(call_path) = model.resolve_call_path(value) { if call_path.first().map_or(false, |module| *module == "logging") || call_path.as_slice() == ["flask", "current_app", "logger"] { Some(call_path) } else { diff --git a/crates/ruff_python_semantic/src/analyze/typing.rs b/crates/ruff_python_semantic/src/analyze/typing.rs index 4b467de4aafdc..e9c6bf4a8f8ab 100644 --- a/crates/ruff_python_semantic/src/analyze/typing.rs +++ b/crates/ruff_python_semantic/src/analyze/typing.rs @@ -5,7 +5,7 @@ use ruff_python_stdlib::typing::{ IMMUTABLE_GENERIC_TYPES, IMMUTABLE_TYPES, PEP_585_GENERICS, PEP_593_SUBSCRIPTS, SUBSCRIPTS, }; -use crate::context::Context; +use crate::model::SemanticModel; #[derive(Copy, Clone)] pub enum Callable { @@ -26,14 +26,14 @@ pub enum SubscriptKind { pub fn match_annotated_subscript<'a>( expr: &Expr, - context: &Context, + model: &SemanticModel, typing_modules: impl Iterator, ) -> Option { if !matches!(expr, Expr::Name(_) | Expr::Attribute(_)) { return None; } - context.resolve_call_path(expr).and_then(|call_path| { + model.resolve_call_path(expr).and_then(|call_path| { if SUBSCRIPTS.contains(&call_path.as_slice()) { return Some(SubscriptKind::AnnotatedSubscript); } @@ -80,8 +80,8 @@ impl std::fmt::Display for ModuleMember { /// Returns the PEP 585 standard library generic variant for a `typing` module reference, if such /// a variant exists. -pub fn to_pep585_generic(expr: &Expr, context: &Context) -> Option { - context.resolve_call_path(expr).and_then(|call_path| { +pub fn to_pep585_generic(expr: &Expr, model: &SemanticModel) -> Option { + model.resolve_call_path(expr).and_then(|call_path| { let [module, name] = call_path.as_slice() else { return None; }; @@ -110,7 +110,11 @@ pub enum Pep604Operator { } /// Return the PEP 604 operator variant to which the given subscript [`Expr`] corresponds, if any. -pub fn to_pep604_operator(value: &Expr, slice: &Expr, context: &Context) -> Option { +pub fn to_pep604_operator( + value: &Expr, + slice: &Expr, + model: &SemanticModel, +) -> Option { /// Returns `true` if any argument in the slice is a string. fn any_arg_is_str(slice: &Expr) -> bool { match slice { @@ -129,13 +133,13 @@ pub fn to_pep604_operator(value: &Expr, slice: &Expr, context: &Context) -> Opti return None; } - context + model .resolve_call_path(value) .as_ref() .and_then(|call_path| { - if context.match_typing_call_path(call_path, "Optional") { + if model.match_typing_call_path(call_path, "Optional") { Some(Pep604Operator::Optional) - } else if context.match_typing_call_path(call_path, "Union") { + } else if model.match_typing_call_path(call_path, "Union") { Some(Pep604Operator::Union) } else { None @@ -145,10 +149,10 @@ pub fn to_pep604_operator(value: &Expr, slice: &Expr, context: &Context) -> Opti /// Return `true` if `Expr` represents a reference to a type annotation that resolves to an /// immutable type. -pub fn is_immutable_annotation(context: &Context, expr: &Expr) -> bool { +pub fn is_immutable_annotation(model: &SemanticModel, expr: &Expr) -> bool { match expr { Expr::Name(_) | Expr::Attribute(_) => { - context.resolve_call_path(expr).map_or(false, |call_path| { + model.resolve_call_path(expr).map_or(false, |call_path| { IMMUTABLE_TYPES .iter() .chain(IMMUTABLE_GENERIC_TYPES) @@ -156,7 +160,7 @@ pub fn is_immutable_annotation(context: &Context, expr: &Expr) -> bool { }) } Expr::Subscript(ast::ExprSubscript { value, slice, .. }) => { - context.resolve_call_path(value).map_or(false, |call_path| { + model.resolve_call_path(value).map_or(false, |call_path| { if IMMUTABLE_GENERIC_TYPES .iter() .any(|target| call_path.as_slice() == *target) @@ -164,16 +168,16 @@ pub fn is_immutable_annotation(context: &Context, expr: &Expr) -> bool { true } else if call_path.as_slice() == ["typing", "Union"] { if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() { - elts.iter().all(|elt| is_immutable_annotation(context, elt)) + elts.iter().all(|elt| is_immutable_annotation(model, elt)) } else { false } } else if call_path.as_slice() == ["typing", "Optional"] { - is_immutable_annotation(context, slice) + is_immutable_annotation(model, slice) } else if call_path.as_slice() == ["typing", "Annotated"] { if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() { elts.first() - .map_or(false, |elt| is_immutable_annotation(context, elt)) + .map_or(false, |elt| is_immutable_annotation(model, elt)) } else { false } @@ -187,7 +191,7 @@ pub fn is_immutable_annotation(context: &Context, expr: &Expr) -> bool { op: Operator::BitOr, right, range: _range, - }) => is_immutable_annotation(context, left) && is_immutable_annotation(context, right), + }) => is_immutable_annotation(model, left) && is_immutable_annotation(model, right), Expr::Constant(ast::ExprConstant { value: Constant::None, .. @@ -213,11 +217,11 @@ const IMMUTABLE_FUNCS: &[&[&str]] = &[ /// Return `true` if `func` is a function that returns an immutable object. pub fn is_immutable_func( - context: &Context, + model: &SemanticModel, func: &Expr, extend_immutable_calls: &[CallPath], ) -> bool { - context.resolve_call_path(func).map_or(false, |call_path| { + model.resolve_call_path(func).map_or(false, |call_path| { IMMUTABLE_FUNCS .iter() .any(|target| call_path.as_slice() == *target) diff --git a/crates/ruff_python_semantic/src/analyze/visibility.rs b/crates/ruff_python_semantic/src/analyze/visibility.rs index 4c2bc6a4bc64f..b5e5ac9d25e42 100644 --- a/crates/ruff_python_semantic/src/analyze/visibility.rs +++ b/crates/ruff_python_semantic/src/analyze/visibility.rs @@ -5,7 +5,7 @@ use rustpython_parser::ast::{self, Expr, Stmt}; use ruff_python_ast::call_path::{collect_call_path, CallPath}; use ruff_python_ast::helpers::map_callable; -use crate::context::Context; +use crate::model::SemanticModel; #[derive(Debug, Clone, Copy, is_macro::Is)] pub enum Visibility { @@ -14,9 +14,10 @@ pub enum Visibility { } /// Returns `true` if a function is a "static method". -pub fn is_staticmethod(ctx: &Context, decorator_list: &[Expr]) -> bool { +pub fn is_staticmethod(model: &SemanticModel, decorator_list: &[Expr]) -> bool { decorator_list.iter().any(|expr| { - ctx.resolve_call_path(map_callable(expr)) + model + .resolve_call_path(map_callable(expr)) .map_or(false, |call_path| { call_path.as_slice() == ["", "staticmethod"] }) @@ -24,9 +25,10 @@ pub fn is_staticmethod(ctx: &Context, decorator_list: &[Expr]) -> bool { } /// Returns `true` if a function is a "class method". -pub fn is_classmethod(ctx: &Context, decorator_list: &[Expr]) -> bool { +pub fn is_classmethod(model: &SemanticModel, decorator_list: &[Expr]) -> bool { decorator_list.iter().any(|expr| { - ctx.resolve_call_path(map_callable(expr)) + model + .resolve_call_path(map_callable(expr)) .map_or(false, |call_path| { call_path.as_slice() == ["", "classmethod"] }) @@ -34,23 +36,24 @@ pub fn is_classmethod(ctx: &Context, decorator_list: &[Expr]) -> bool { } /// Returns `true` if a function definition is an `@overload`. -pub fn is_overload(ctx: &Context, decorator_list: &[Expr]) -> bool { +pub fn is_overload(model: &SemanticModel, decorator_list: &[Expr]) -> bool { decorator_list .iter() - .any(|expr| ctx.match_typing_expr(map_callable(expr), "overload")) + .any(|expr| model.match_typing_expr(map_callable(expr), "overload")) } /// Returns `true` if a function definition is an `@override` (PEP 698). -pub fn is_override(ctx: &Context, decorator_list: &[Expr]) -> bool { +pub fn is_override(model: &SemanticModel, decorator_list: &[Expr]) -> bool { decorator_list .iter() - .any(|expr| ctx.match_typing_expr(map_callable(expr), "override")) + .any(|expr| model.match_typing_expr(map_callable(expr), "override")) } /// Returns `true` if a function definition is an abstract method based on its decorators. -pub fn is_abstract(ctx: &Context, decorator_list: &[Expr]) -> bool { +pub fn is_abstract(model: &SemanticModel, decorator_list: &[Expr]) -> bool { decorator_list.iter().any(|expr| { - ctx.resolve_call_path(map_callable(expr)) + model + .resolve_call_path(map_callable(expr)) .map_or(false, |call_path| { matches!( call_path.as_slice(), @@ -69,9 +72,14 @@ pub fn is_abstract(ctx: &Context, decorator_list: &[Expr]) -> bool { /// Returns `true` if a function definition is a `@property`. /// `extra_properties` can be used to check additional non-standard /// `@property`-like decorators. -pub fn is_property(ctx: &Context, decorator_list: &[Expr], extra_properties: &[CallPath]) -> bool { +pub fn is_property( + model: &SemanticModel, + decorator_list: &[Expr], + extra_properties: &[CallPath], +) -> bool { decorator_list.iter().any(|expr| { - ctx.resolve_call_path(map_callable(expr)) + model + .resolve_call_path(map_callable(expr)) .map_or(false, |call_path| { call_path.as_slice() == ["", "property"] || call_path.as_slice() == ["functools", "cached_property"] diff --git a/crates/ruff_python_semantic/src/binding.rs b/crates/ruff_python_semantic/src/binding.rs index 53092906c049c..740b898eead38 100644 --- a/crates/ruff_python_semantic/src/binding.rs +++ b/crates/ruff_python_semantic/src/binding.rs @@ -1,62 +1,51 @@ -use std::num::TryFromIntError; -use std::ops::{Deref, Index, IndexMut}; +use std::ops::{Deref, DerefMut}; use bitflags::bitflags; use ruff_text_size::TextRange; +use ruff_index::{newtype_index, IndexSlice, IndexVec}; +use ruff_python_ast::helpers; +use ruff_python_ast::source_code::Locator; + +use crate::context::ExecutionContext; +use crate::model::SemanticModel; use crate::node::NodeId; -use crate::scope::ScopeId; +use crate::reference::ReferenceId; #[derive(Debug, Clone)] pub struct Binding<'a> { pub kind: BindingKind<'a>, pub range: TextRange, - /// The context in which the binding was created. + /// The context in which the [`Binding`] was created. pub context: ExecutionContext, /// The statement in which the [`Binding`] was defined. pub source: Option, - /// Tuple of (scope index, range) indicating the scope and range at which - /// the binding was last used in a runtime context. - pub runtime_usage: Option<(ScopeId, TextRange)>, - /// Tuple of (scope index, range) indicating the scope and range at which - /// the binding was last used in a typing-time context. - pub typing_usage: Option<(ScopeId, TextRange)>, - /// Tuple of (scope index, range) indicating the scope and range at which - /// the binding was last used in a synthetic context. This is used for - /// (e.g.) `__future__` imports, explicit re-exports, and other bindings - /// that should be considered used even if they're never referenced. - pub synthetic_usage: Option<(ScopeId, TextRange)>, - /// The exceptions that were handled when the binding was defined. + /// The references to the [`Binding`]. + pub references: Vec, + /// The exceptions that were handled when the [`Binding`] was defined. pub exceptions: Exceptions, + /// Flags for the [`Binding`]. + pub flags: BindingFlags, } impl<'a> Binding<'a> { - pub fn mark_used(&mut self, scope: ScopeId, range: TextRange, context: ExecutionContext) { - match context { - ExecutionContext::Runtime => self.runtime_usage = Some((scope, range)), - ExecutionContext::Typing => self.typing_usage = Some((scope, range)), - } + /// Return `true` if this [`Binding`] is used. + pub fn is_used(&self) -> bool { + !self.references.is_empty() } - pub const fn used(&self) -> bool { - self.runtime_usage.is_some() - || self.synthetic_usage.is_some() - || self.typing_usage.is_some() + /// Returns an iterator over all references for the current [`Binding`]. + pub fn references(&self) -> impl Iterator + '_ { + self.references.iter().copied() } - pub const fn is_definition(&self) -> bool { - matches!( - self.kind, - BindingKind::ClassDefinition - | BindingKind::FunctionDefinition - | BindingKind::Builtin - | BindingKind::FutureImportation - | BindingKind::Importation(..) - | BindingKind::FromImportation(..) - | BindingKind::SubmoduleImportation(..) - ) + /// Return `true` if this [`Binding`] represents an explicit re-export + /// (e.g., `import FastAPI as FastAPI`). + pub const fn is_explicit_export(&self) -> bool { + self.flags.contains(BindingFlags::EXPLICIT_EXPORT) } + /// Return `true` if this binding redefines the given binding. pub fn redefines(&self, existing: &'a Binding) -> bool { match &self.kind { BindingKind::Importation(Importation { full_name, .. }) => { @@ -106,7 +95,41 @@ impl<'a> Binding<'a> { } _ => {} } - existing.is_definition() + matches!( + existing.kind, + BindingKind::ClassDefinition + | BindingKind::FunctionDefinition + | BindingKind::Builtin + | BindingKind::Importation(..) + | BindingKind::FromImportation(..) + | BindingKind::SubmoduleImportation(..) + ) + } + + /// Returns the appropriate visual range for highlighting this binding. + pub fn trimmed_range(&self, semantic_model: &SemanticModel, locator: &Locator) -> TextRange { + match self.kind { + BindingKind::ClassDefinition | BindingKind::FunctionDefinition => { + self.source.map_or(self.range, |source| { + helpers::identifier_range(semantic_model.stmts[source], locator) + }) + } + _ => self.range, + } + } +} + +bitflags! { + /// Flags on a [`Binding`]. + #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] + pub struct BindingFlags: u8 { + /// The binding represents an explicit re-export. + /// + /// For example, the binding could be `FastAPI` in: + /// ```python + /// import FastAPI as FastAPI + /// ``` + const EXPLICIT_EXPORT = 1 << 0; } } @@ -115,16 +138,8 @@ impl<'a> Binding<'a> { /// Using a `u32` to identify [Binding]s should is sufficient because Ruff only supports documents with a /// size smaller than or equal to `u32::max`. A document with the size of `u32::max` must have fewer than `u32::max` /// bindings because bindings must be separated by whitespace (and have an assignment). -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct BindingId(u32); - -impl TryFrom for BindingId { - type Error = TryFromIntError; - - fn try_from(value: usize) -> Result { - Ok(Self(u32::try_from(value)?)) - } -} +#[newtype_index] +pub struct BindingId; impl nohash_hasher::IsEnabled for BindingId {} @@ -132,53 +147,37 @@ impl nohash_hasher::IsEnabled for BindingId {} /// /// Bindings are indexed by [`BindingId`] #[derive(Debug, Clone, Default)] -pub struct Bindings<'a>(Vec>); +pub struct Bindings<'a>(IndexVec>); impl<'a> Bindings<'a> { /// Pushes a new binding and returns its id pub fn push(&mut self, binding: Binding<'a>) -> BindingId { - let id = self.next_id(); - self.0.push(binding); - id + self.0.push(binding) } /// Returns the id that will be assigned when pushing the next binding pub fn next_id(&self) -> BindingId { - BindingId::try_from(self.0.len()).unwrap() - } -} - -impl<'a> Index for Bindings<'a> { - type Output = Binding<'a>; - - fn index(&self, index: BindingId) -> &Self::Output { - &self.0[usize::from(index)] - } -} - -impl<'a> IndexMut for Bindings<'a> { - fn index_mut(&mut self, index: BindingId) -> &mut Self::Output { - &mut self.0[usize::from(index)] + self.0.next_index() } } impl<'a> Deref for Bindings<'a> { - type Target = [Binding<'a>]; + type Target = IndexSlice>; fn deref(&self) -> &Self::Target { &self.0 } } -impl<'a> FromIterator> for Bindings<'a> { - fn from_iter>>(iter: T) -> Self { - Self(Vec::from_iter(iter)) +impl<'a> DerefMut for Bindings<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl From for usize { - fn from(value: BindingId) -> Self { - value.0 as usize +impl<'a> FromIterator> for Bindings<'a> { + fn from_iter>>(iter: T) -> Self { + Self(IndexVec::from_iter(iter)) } } @@ -274,9 +273,3 @@ bitflags! { const IMPORT_ERROR = 0b0000_0100; } } - -#[derive(Copy, Debug, Clone)] -pub enum ExecutionContext { - Runtime, - Typing, -} diff --git a/crates/ruff_python_semantic/src/context.rs b/crates/ruff_python_semantic/src/context.rs index eb52f550ff3cd..6742b68219229 100644 --- a/crates/ruff_python_semantic/src/context.rs +++ b/crates/ruff_python_semantic/src/context.rs @@ -1,710 +1,7 @@ -use std::collections::HashMap; -use std::path::Path; - -use bitflags::bitflags; -use nohash_hasher::{BuildNoHashHasher, IntMap}; -use rustpython_parser::ast::{Expr, Stmt}; -use smallvec::smallvec; - -use ruff_python_ast::call_path::{collect_call_path, from_unqualified_name, CallPath}; -use ruff_python_ast::helpers::from_relative_import; -use ruff_python_stdlib::path::is_python_stub_file; -use ruff_python_stdlib::typing::TYPING_EXTENSIONS; - -use crate::binding::{ - Binding, BindingId, BindingKind, Bindings, Exceptions, ExecutionContext, FromImportation, - Importation, SubmoduleImportation, -}; -use crate::definition::{Definition, DefinitionId, Definitions, Member, Module}; -use crate::node::{NodeId, Nodes}; -use crate::scope::{Scope, ScopeId, ScopeKind, Scopes}; - -#[allow(clippy::struct_excessive_bools)] -pub struct Context<'a> { - pub typing_modules: &'a [String], - pub module_path: Option<&'a [String]>, - // Stack of all visited statements, along with the identifier of the current statement. - pub stmts: Nodes<'a>, - pub stmt_id: Option, - // Stack of current expressions. - pub exprs: Vec<&'a Expr>, - // Stack of all scopes, along with the identifier of the current scope. - pub scopes: Scopes<'a>, - pub scope_id: ScopeId, - pub dead_scopes: Vec, - // Stack of all definitions created in any scope, at any point in execution, along with the - // identifier of the current definition. - pub definitions: Definitions<'a>, - pub definition_id: DefinitionId, - // A stack of all bindings created in any scope, at any point in execution. - pub bindings: Bindings<'a>, - // Map from binding index to indexes of bindings that shadow it in other scopes. - pub shadowed_bindings: HashMap, BuildNoHashHasher>, - // Body iteration; used to peek at siblings. - pub body: &'a [Stmt], - pub body_index: usize, - // Internal, derivative state. - pub flags: ContextFlags, - pub handled_exceptions: Vec, -} - -impl<'a> Context<'a> { - pub fn new(typing_modules: &'a [String], path: &'a Path, module: Module<'a>) -> Self { - Self { - typing_modules, - module_path: module.path(), - stmts: Nodes::default(), - stmt_id: None, - exprs: Vec::default(), - scopes: Scopes::default(), - scope_id: ScopeId::global(), - dead_scopes: Vec::default(), - definitions: Definitions::for_module(module), - definition_id: DefinitionId::module(), - bindings: Bindings::default(), - shadowed_bindings: IntMap::default(), - body: &[], - body_index: 0, - flags: ContextFlags::new(path), - handled_exceptions: Vec::default(), - } - } - - /// Return `true` if the `Expr` is a reference to `typing.${target}`. - pub fn match_typing_expr(&self, expr: &Expr, target: &str) -> bool { - self.resolve_call_path(expr).map_or(false, |call_path| { - self.match_typing_call_path(&call_path, target) - }) - } - - /// Return `true` if the call path is a reference to `typing.${target}`. - pub fn match_typing_call_path(&self, call_path: &CallPath, target: &str) -> bool { - if call_path.as_slice() == ["typing", target] { - return true; - } - - if TYPING_EXTENSIONS.contains(target) { - if call_path.as_slice() == ["typing_extensions", target] { - return true; - } - } - - if self.typing_modules.iter().any(|module| { - let mut module: CallPath = from_unqualified_name(module); - module.push(target); - *call_path == module - }) { - return true; - } - - false - } - - /// Return the current `Binding` for a given `name`. - pub fn find_binding(&self, member: &str) -> Option<&Binding> { - self.scopes() - .find_map(|scope| scope.get(member)) - .map(|index| &self.bindings[*index]) - } - - /// Return `true` if `member` is bound as a builtin. - pub fn is_builtin(&self, member: &str) -> bool { - self.find_binding(member) - .map_or(false, |binding| binding.kind.is_builtin()) - } - - /// Resolves the [`Expr`] to a fully-qualified symbol-name, if `value` resolves to an imported - /// or builtin symbol. - /// - /// E.g., given: - /// - /// - /// ```python - /// from sys import version_info as python_version - /// print(python_version) - /// ``` - /// - /// ...then `resolve_call_path(${python_version})` will resolve to `sys.version_info`. - pub fn resolve_call_path<'b>(&'a self, value: &'b Expr) -> Option> - where - 'b: 'a, - { - let Some(call_path) = collect_call_path(value) else { - return None; - }; - let Some(head) = call_path.first() else { - return None; - }; - let Some(binding) = self.find_binding(head) else { - return None; - }; - match &binding.kind { - BindingKind::Importation(Importation { - full_name: name, .. - }) - | BindingKind::SubmoduleImportation(SubmoduleImportation { name, .. }) => { - if name.starts_with('.') { - if let Some(module) = &self.module_path { - let mut source_path = from_relative_import(module, name); - if source_path.is_empty() { - None - } else { - source_path.extend(call_path.into_iter().skip(1)); - Some(source_path) - } - } else { - None - } - } else { - let mut source_path: CallPath = from_unqualified_name(name); - source_path.extend(call_path.into_iter().skip(1)); - Some(source_path) - } - } - BindingKind::FromImportation(FromImportation { - full_name: name, .. - }) => { - if name.starts_with('.') { - if let Some(module) = &self.module_path { - let mut source_path = from_relative_import(module, name); - if source_path.is_empty() { - None - } else { - source_path.extend(call_path.into_iter().skip(1)); - Some(source_path) - } - } else { - None - } - } else { - let mut source_path: CallPath = from_unqualified_name(name); - source_path.extend(call_path.into_iter().skip(1)); - Some(source_path) - } - } - BindingKind::Builtin => { - let mut source_path: CallPath = smallvec![]; - source_path.push(""); - source_path.extend(call_path); - Some(source_path) - } - _ => None, - } - } - - /// Given a `module` and `member`, return the fully-qualified name of the binding in the current - /// scope, if it exists. - /// - /// E.g., given: - /// - /// ```python - /// from sys import version_info as python_version - /// print(python_version) - /// ``` - /// - /// ...then `resolve_qualified_import_name("sys", "version_info")` will return - /// `Some("python_version")`. - pub fn resolve_qualified_import_name( - &self, - module: &str, - member: &str, - ) -> Option<(&Stmt, String)> { - self.scopes().enumerate().find_map(|(scope_index, scope)| { - scope.binding_ids().find_map(|binding_index| { - let binding = &self.bindings[*binding_index]; - match &binding.kind { - // Ex) Given `module="sys"` and `object="exit"`: - // `import sys` -> `sys.exit` - // `import sys as sys2` -> `sys2.exit` - BindingKind::Importation(Importation { name, full_name }) => { - if full_name == &module { - // Verify that `sys` isn't bound in an inner scope. - if self - .scopes() - .take(scope_index) - .all(|scope| scope.get(name).is_none()) - { - if let Some(source) = binding.source { - return Some((self.stmts[source], format!("{name}.{member}"))); - } - } - } - } - // Ex) Given `module="os.path"` and `object="join"`: - // `from os.path import join` -> `join` - // `from os.path import join as join2` -> `join2` - BindingKind::FromImportation(FromImportation { name, full_name }) => { - if let Some((target_module, target_member)) = full_name.split_once('.') { - if target_module == module && target_member == member { - // Verify that `join` isn't bound in an inner scope. - if self - .scopes() - .take(scope_index) - .all(|scope| scope.get(name).is_none()) - { - if let Some(source) = binding.source { - return Some((self.stmts[source], (*name).to_string())); - } - } - } - } - } - // Ex) Given `module="os"` and `object="name"`: - // `import os.path ` -> `os.name` - BindingKind::SubmoduleImportation(SubmoduleImportation { name, .. }) => { - if name == &module { - // Verify that `os` isn't bound in an inner scope. - if self - .scopes() - .take(scope_index) - .all(|scope| scope.get(name).is_none()) - { - if let Some(source) = binding.source { - return Some((self.stmts[source], format!("{name}.{member}"))); - } - } - } - } - // Non-imports. - _ => {} - } - None - }) - }) - } - - /// Push a [`Stmt`] onto the stack. - pub fn push_stmt(&mut self, stmt: &'a Stmt) { - self.stmt_id = Some(self.stmts.insert(stmt, self.stmt_id)); - } - - /// Pop the current [`Stmt`] off the stack. - pub fn pop_stmt(&mut self) { - let node_id = self.stmt_id.expect("Attempted to pop without statement"); - self.stmt_id = self.stmts.parent_id(node_id); - } - - /// Push an [`Expr`] onto the stack. - pub fn push_expr(&mut self, expr: &'a Expr) { - self.exprs.push(expr); - } - - /// Pop the current [`Expr`] off the stack. - pub fn pop_expr(&mut self) { - self.exprs - .pop() - .expect("Attempted to pop without expression"); - } - - /// Push a [`Scope`] with the given [`ScopeKind`] onto the stack. - pub fn push_scope(&mut self, kind: ScopeKind<'a>) { - let id = self.scopes.push_scope(kind, self.scope_id); - self.scope_id = id; - } - - /// Pop the current [`Scope`] off the stack. - pub fn pop_scope(&mut self) { - self.dead_scopes.push(self.scope_id); - self.scope_id = self.scopes[self.scope_id] - .parent - .expect("Attempted to pop without scope"); - } - - /// Push a [`Member`] onto the stack. - pub fn push_definition(&mut self, definition: Member<'a>) { - self.definition_id = self.definitions.push_member(definition); - } - - /// Pop the current [`Member`] off the stack. - pub fn pop_definition(&mut self) { - let Definition::Member(member) = &self.definitions[self.definition_id] else { - panic!("Attempted to pop without member definition"); - }; - self.definition_id = member.parent; - } - - /// Return the current `Stmt`. - pub fn stmt(&self) -> &'a Stmt { - let node_id = self.stmt_id.expect("No current statement"); - self.stmts[node_id] - } - - /// Return the parent `Stmt` of the current `Stmt`, if any. - pub fn stmt_parent(&self) -> Option<&'a Stmt> { - let node_id = self.stmt_id.expect("No current statement"); - let parent_id = self.stmts.parent_id(node_id)?; - Some(self.stmts[parent_id]) - } - - /// Return the current `Expr`. - pub fn expr(&self) -> Option<&'a Expr> { - self.exprs.iter().last().copied() - } - - /// Return the parent `Expr` of the current `Expr`. - pub fn expr_parent(&self) -> Option<&'a Expr> { - self.exprs.iter().rev().nth(1).copied() - } - - /// Return the grandparent `Expr` of the current `Expr`. - pub fn expr_grandparent(&self) -> Option<&'a Expr> { - self.exprs.iter().rev().nth(2).copied() - } - - /// Return an [`Iterator`] over the current `Expr` parents. - pub fn expr_ancestors(&self) -> impl Iterator { - self.exprs.iter().rev().skip(1) - } - - /// Return the `Stmt` that immediately follows the current `Stmt`, if any. - pub fn sibling_stmt(&self) -> Option<&'a Stmt> { - self.body.get(self.body_index + 1) - } - - /// Returns a reference to the global scope - pub fn global_scope(&self) -> &Scope<'a> { - self.scopes.global() - } - - /// Returns a mutable reference to the global scope - pub fn global_scope_mut(&mut self) -> &mut Scope<'a> { - self.scopes.global_mut() - } - - /// Returns the current top most scope. - pub fn scope(&self) -> &Scope<'a> { - &self.scopes[self.scope_id] - } - - /// Returns a mutable reference to the current top most scope. - pub fn scope_mut(&mut self) -> &mut Scope<'a> { - &mut self.scopes[self.scope_id] - } - - /// Returns an iterator over all scopes, starting from the current scope. - pub fn scopes(&self) -> impl Iterator { - self.scopes.ancestors(self.scope_id) - } - - pub fn parents(&self) -> impl Iterator + '_ { - let node_id = self.stmt_id.expect("No current statement"); - self.stmts.ancestor_ids(node_id).map(|id| self.stmts[id]) - } - - /// Return `true` if the context is at the top level of the module (i.e., in the module scope, - /// and not nested within any statements). - pub fn at_top_level(&self) -> bool { - self.scope_id.is_global() - && self - .stmt_id - .map_or(true, |stmt_id| self.stmts.parent_id(stmt_id).is_none()) - } - - /// Return the [`ExecutionContext`] of the current scope. - pub const fn execution_context(&self) -> ExecutionContext { - if self.in_type_checking_block() - || self.in_annotation() - || self.in_complex_string_type_definition() - || self.in_simple_string_type_definition() - { - ExecutionContext::Typing - } else { - ExecutionContext::Runtime - } - } - - /// Return the union of all handled exceptions as an [`Exceptions`] bitflag. - pub fn exceptions(&self) -> Exceptions { - let mut exceptions = Exceptions::empty(); - for exception in &self.handled_exceptions { - exceptions.insert(*exception); - } - exceptions - } - - /// Generate a [`Snapshot`] of the current context. - pub fn snapshot(&self) -> Snapshot { - Snapshot { - scope_id: self.scope_id, - stmt_id: self.stmt_id, - definition_id: self.definition_id, - flags: self.flags, - } - } - - /// Restore the context to the given [`Snapshot`]. - pub fn restore(&mut self, snapshot: Snapshot) { - let Snapshot { - scope_id, - stmt_id, - definition_id, - flags, - } = snapshot; - self.scope_id = scope_id; - self.stmt_id = stmt_id; - self.definition_id = definition_id; - self.flags = flags; - } - - /// Return `true` if the context is in a type annotation. - pub const fn in_annotation(&self) -> bool { - self.flags.contains(ContextFlags::ANNOTATION) - } - - /// Return `true` if the context is in a type definition. - pub const fn in_type_definition(&self) -> bool { - self.flags.contains(ContextFlags::TYPE_DEFINITION) - } - - /// Return `true` if the context is in a "simple" string type definition. - pub const fn in_simple_string_type_definition(&self) -> bool { - self.flags - .contains(ContextFlags::SIMPLE_STRING_TYPE_DEFINITION) - } - - /// Return `true` if the context is in a "complex" string type definition. - pub const fn in_complex_string_type_definition(&self) -> bool { - self.flags - .contains(ContextFlags::COMPLEX_STRING_TYPE_DEFINITION) - } - - /// Return `true` if the context is in a `__future__` type definition. - pub const fn in_future_type_definition(&self) -> bool { - self.flags.contains(ContextFlags::FUTURE_TYPE_DEFINITION) - } - - /// Return `true` if the context is in any kind of deferred type definition. - pub const fn in_deferred_type_definition(&self) -> bool { - self.in_simple_string_type_definition() - || self.in_complex_string_type_definition() - || self.in_future_type_definition() - } - - /// Return `true` if the context is in an exception handler. - pub const fn in_exception_handler(&self) -> bool { - self.flags.contains(ContextFlags::EXCEPTION_HANDLER) - } - - /// Return `true` if the context is in an f-string. - pub const fn in_f_string(&self) -> bool { - self.flags.contains(ContextFlags::F_STRING) - } - - /// Return `true` if the context is in boolean test. - pub const fn in_boolean_test(&self) -> bool { - self.flags.contains(ContextFlags::BOOLEAN_TEST) - } - - /// Return `true` if the context is in a `typing::Literal` annotation. - pub const fn in_literal(&self) -> bool { - self.flags.contains(ContextFlags::LITERAL) - } - - /// Return `true` if the context is in a subscript expression. - pub const fn in_subscript(&self) -> bool { - self.flags.contains(ContextFlags::SUBSCRIPT) - } - - /// Return `true` if the context is in a type-checking block. - pub const fn in_type_checking_block(&self) -> bool { - self.flags.contains(ContextFlags::TYPE_CHECKING_BLOCK) - } - - /// Return `true` if the context has traversed past the "top-of-file" import boundary. - pub const fn seen_import_boundary(&self) -> bool { - self.flags.contains(ContextFlags::IMPORT_BOUNDARY) - } - - /// Return `true` if the context has traverse past the `__future__` import boundary. - pub const fn seen_futures_boundary(&self) -> bool { - self.flags.contains(ContextFlags::FUTURES_BOUNDARY) - } - - /// Return `true` if `__future__`-style type annotations are enabled. - pub const fn future_annotations(&self) -> bool { - self.flags.contains(ContextFlags::FUTURE_ANNOTATIONS) - } -} - -bitflags! { - /// Flags indicating the current context of the analysis. - #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] - pub struct ContextFlags: u16 { - /// The context is in a type annotation. - /// - /// For example, the context could be visiting `int` in: - /// ```python - /// x: int = 1 - /// ``` - const ANNOTATION = 1 << 0; - - /// The context is in a type definition. - /// - /// For example, the context could be visiting `int` in: - /// ```python - /// from typing import NewType - /// - /// UserId = NewType("UserId", int) - /// ``` - /// - /// All type annotations are also type definitions, but the converse is not true. - /// In our example, `int` is a type definition but not a type annotation, as it - /// doesn't appear in a type annotation context, but rather in a type definition. - const TYPE_DEFINITION = 1 << 1; - - /// The context is in a (deferred) "simple" string type definition. - /// - /// For example, the context could be visiting `list[int]` in: - /// ```python - /// x: "list[int]" = [] - /// ``` - /// - /// "Simple" string type definitions are those that consist of a single string literal, - /// as opposed to an implicitly concatenated string literal. - const SIMPLE_STRING_TYPE_DEFINITION = 1 << 2; - - /// The context is in a (deferred) "complex" string type definition. - /// - /// For example, the context could be visiting `list[int]` in: - /// ```python - /// x: ("list" "[int]") = [] - /// ``` - /// - /// "Complex" string type definitions are those that consist of a implicitly concatenated - /// string literals. These are uncommon but valid. - const COMPLEX_STRING_TYPE_DEFINITION = 1 << 3; - - /// The context is in a (deferred) `__future__` type definition. - /// - /// For example, the context could be visiting `list[int]` in: - /// ```python - /// from __future__ import annotations - /// - /// x: list[int] = [] - /// ``` - /// - /// `__future__`-style type annotations are only enabled if the `annotations` feature - /// is enabled via `from __future__ import annotations`. - const FUTURE_TYPE_DEFINITION = 1 << 4; - - /// The context is in an exception handler. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// try: - /// ... - /// except Exception: - /// x: int = 1 - /// ``` - const EXCEPTION_HANDLER = 1 << 5; - - /// The context is in an f-string. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// f'{x}' - /// ``` - const F_STRING = 1 << 6; - - /// The context is in a boolean test. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// if x: - /// ... - /// ``` - /// - /// The implication is that the actual value returned by the current expression is - /// not used, only its truthiness. - const BOOLEAN_TEST = 1 << 7; - - /// The context is in a `typing::Literal` annotation. - /// - /// For example, the context could be visiting any of `"A"`, `"B"`, or `"C"` in: - /// ```python - /// def f(x: Literal["A", "B", "C"]): - /// ... - /// ``` - const LITERAL = 1 << 8; - - /// The context is in a subscript expression. - /// - /// For example, the context could be visiting `x["a"]` in: - /// ```python - /// x["a"]["b"] - /// ``` - const SUBSCRIPT = 1 << 9; - - /// The context is in a type-checking block. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// from typing import TYPE_CHECKING - /// - /// - /// if TYPE_CHECKING: - /// x: int = 1 - /// ``` - const TYPE_CHECKING_BLOCK = 1 << 10; - - - /// The context has traversed past the "top-of-file" import boundary. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// import os - /// - /// def f() -> None: - /// ... - /// - /// x: int = 1 - /// ``` - const IMPORT_BOUNDARY = 1 << 11; - - /// The context has traversed past the `__future__` import boundary. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// from __future__ import annotations - /// - /// import os - /// - /// x: int = 1 - /// ``` - /// - /// Python considers it a syntax error to import from `__future__` after - /// any other non-`__future__`-importing statements. - const FUTURES_BOUNDARY = 1 << 12; - - /// `__future__`-style type annotations are enabled in this context. - /// - /// For example, the context could be visiting `x` in: - /// ```python - /// from __future__ import annotations - /// - /// - /// def f(x: int) -> int: - /// ... - /// ``` - const FUTURE_ANNOTATIONS = 1 << 13; - } -} - -impl ContextFlags { - pub fn new(path: &Path) -> Self { - let mut flags = Self::default(); - if is_python_stub_file(path) { - flags |= Self::FUTURE_ANNOTATIONS; - } - flags - } -} - -/// A snapshot of the [`Context`] at a given point in the AST traversal. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct Snapshot { - scope_id: ScopeId, - stmt_id: Option, - definition_id: DefinitionId, - flags: ContextFlags, +#[derive(Debug, Copy, Clone, is_macro::Is)] +pub enum ExecutionContext { + /// The reference occurs in a runtime context. + Runtime, + /// The reference occurs in a typing-only context. + Typing, } diff --git a/crates/ruff_python_semantic/src/definition.rs b/crates/ruff_python_semantic/src/definition.rs index e9daf9feec84e..df75490f5ed05 100644 --- a/crates/ruff_python_semantic/src/definition.rs +++ b/crates/ruff_python_semantic/src/definition.rs @@ -2,9 +2,9 @@ //! can be documented, such as a module, class, or function. use std::fmt::Debug; -use std::num::TryFromIntError; -use std::ops::{Deref, Index}; +use std::ops::Deref; +use ruff_index::{newtype_index, IndexSlice, IndexVec}; use rustpython_parser::ast::{self, Stmt}; use crate::analyze::visibility::{ @@ -12,28 +12,14 @@ use crate::analyze::visibility::{ }; /// Id uniquely identifying a definition in a program. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] -pub struct DefinitionId(u32); +#[newtype_index] +pub struct DefinitionId; impl DefinitionId { /// Returns the ID for the module definition. #[inline] pub const fn module() -> Self { - DefinitionId(0) - } -} - -impl TryFrom for DefinitionId { - type Error = TryFromIntError; - - fn try_from(value: usize) -> Result { - Ok(Self(u32::try_from(value)?)) - } -} - -impl From for usize { - fn from(value: DefinitionId) -> Self { - value.0 as usize + DefinitionId::from_u32(0) } } @@ -118,11 +104,11 @@ impl Definition<'_> { /// The definitions within a Python program indexed by [`DefinitionId`]. #[derive(Debug, Default)] -pub struct Definitions<'a>(Vec>); +pub struct Definitions<'a>(IndexVec>); impl<'a> Definitions<'a> { pub fn for_module(definition: Module<'a>) -> Self { - Self(vec![Definition::Module(definition)]) + Self(IndexVec::from_raw(vec![Definition::Module(definition)])) } /// Pushes a new member definition and returns its unique id. @@ -130,14 +116,13 @@ impl<'a> Definitions<'a> { /// Members are assumed to be pushed in traversal order, such that parents are pushed before /// their children. pub fn push_member(&mut self, member: Member<'a>) -> DefinitionId { - let next_id = DefinitionId::try_from(self.0.len()).unwrap(); - self.0.push(Definition::Member(member)); - next_id + self.0.push(Definition::Member(member)) } /// Resolve the visibility of each definition in the collection. pub fn resolve(self, exports: Option<&[&str]>) -> ContextualizedDefinitions<'a> { - let mut definitions: Vec> = Vec::with_capacity(self.len()); + let mut definitions: IndexVec> = + IndexVec::with_capacity(self.len()); for definition in self { // Determine the visibility of the next definition, taking into account its parent's @@ -147,7 +132,7 @@ impl<'a> Definitions<'a> { Definition::Module(module) => module.source.to_visibility(), Definition::Member(member) => match member.kind { MemberKind::Class => { - let parent = &definitions[usize::from(member.parent)]; + let parent = &definitions[member.parent]; if parent.visibility.is_private() || exports .map_or(false, |exports| !exports.contains(&member.name())) @@ -158,7 +143,7 @@ impl<'a> Definitions<'a> { } } MemberKind::NestedClass => { - let parent = &definitions[usize::from(member.parent)]; + let parent = &definitions[member.parent]; if parent.visibility.is_private() || matches!( parent.definition, @@ -176,7 +161,7 @@ impl<'a> Definitions<'a> { } } MemberKind::Function => { - let parent = &definitions[usize::from(member.parent)]; + let parent = &definitions[member.parent]; if parent.visibility.is_private() || exports .map_or(false, |exports| !exports.contains(&member.name())) @@ -188,7 +173,7 @@ impl<'a> Definitions<'a> { } MemberKind::NestedFunction => Visibility::Private, MemberKind::Method => { - let parent = &definitions[usize::from(member.parent)]; + let parent = &definitions[member.parent]; if parent.visibility.is_private() { Visibility::Private } else { @@ -204,20 +189,13 @@ impl<'a> Definitions<'a> { }); } - ContextualizedDefinitions(definitions) - } -} - -impl<'a> Index for Definitions<'a> { - type Output = Definition<'a>; - - fn index(&self, index: DefinitionId) -> &Self::Output { - &self.0[usize::from(index)] + ContextualizedDefinitions(definitions.raw) } } impl<'a> Deref for Definitions<'a> { - type Target = [Definition<'a>]; + type Target = IndexSlice>; + fn deref(&self) -> &Self::Target { &self.0 } diff --git a/crates/ruff_python_semantic/src/lib.rs b/crates/ruff_python_semantic/src/lib.rs index e5f72a625b54a..86e26c5481bc3 100644 --- a/crates/ruff_python_semantic/src/lib.rs +++ b/crates/ruff_python_semantic/src/lib.rs @@ -2,5 +2,7 @@ pub mod analyze; pub mod binding; pub mod context; pub mod definition; +pub mod model; pub mod node; +pub mod reference; pub mod scope; diff --git a/crates/ruff_python_semantic/src/model.rs b/crates/ruff_python_semantic/src/model.rs new file mode 100644 index 0000000000000..b7eb31ece9d4e --- /dev/null +++ b/crates/ruff_python_semantic/src/model.rs @@ -0,0 +1,900 @@ +use std::collections::HashMap; +use std::path::Path; + +use bitflags::bitflags; +use nohash_hasher::{BuildNoHashHasher, IntMap}; +use ruff_text_size::TextRange; +use rustpython_parser::ast::{Expr, Stmt}; +use smallvec::smallvec; + +use ruff_python_ast::call_path::{collect_call_path, from_unqualified_name, CallPath}; +use ruff_python_ast::helpers::from_relative_import; +use ruff_python_stdlib::path::is_python_stub_file; +use ruff_python_stdlib::typing::TYPING_EXTENSIONS; + +use crate::binding::{ + Binding, BindingId, BindingKind, Bindings, Exceptions, FromImportation, Importation, + SubmoduleImportation, +}; +use crate::context::ExecutionContext; +use crate::definition::{Definition, DefinitionId, Definitions, Member, Module}; +use crate::node::{NodeId, Nodes}; +use crate::reference::References; +use crate::scope::{Scope, ScopeId, ScopeKind, Scopes}; + +/// A semantic model for a Python module, to enable querying the module's semantic information. +pub struct SemanticModel<'a> { + pub typing_modules: &'a [String], + pub module_path: Option<&'a [String]>, + // Stack of all visited statements, along with the identifier of the current statement. + pub stmts: Nodes<'a>, + pub stmt_id: Option, + // Stack of current expressions. + pub exprs: Vec<&'a Expr>, + // Stack of all scopes, along with the identifier of the current scope. + pub scopes: Scopes<'a>, + pub scope_id: ScopeId, + pub dead_scopes: Vec, + // Stack of all definitions created in any scope, at any point in execution, along with the + // identifier of the current definition. + pub definitions: Definitions<'a>, + pub definition_id: DefinitionId, + // A stack of all bindings created in any scope, at any point in execution. + pub bindings: Bindings<'a>, + // Stack of all references created in any scope, at any point in execution. + pub references: References, + // Map from binding index to indexes of bindings that shadow it in other scopes. + pub shadowed_bindings: HashMap, BuildNoHashHasher>, + // Body iteration; used to peek at siblings. + pub body: &'a [Stmt], + pub body_index: usize, + // Internal, derivative state. + pub flags: SemanticModelFlags, + pub handled_exceptions: Vec, +} + +impl<'a> SemanticModel<'a> { + pub fn new(typing_modules: &'a [String], path: &'a Path, module: Module<'a>) -> Self { + Self { + typing_modules, + module_path: module.path(), + stmts: Nodes::default(), + stmt_id: None, + exprs: Vec::default(), + scopes: Scopes::default(), + scope_id: ScopeId::global(), + dead_scopes: Vec::default(), + definitions: Definitions::for_module(module), + definition_id: DefinitionId::module(), + bindings: Bindings::default(), + references: References::default(), + shadowed_bindings: IntMap::default(), + body: &[], + body_index: 0, + flags: SemanticModelFlags::new(path), + handled_exceptions: Vec::default(), + } + } + + /// Return `true` if the `Expr` is a reference to `typing.${target}`. + pub fn match_typing_expr(&self, expr: &Expr, target: &str) -> bool { + self.resolve_call_path(expr).map_or(false, |call_path| { + self.match_typing_call_path(&call_path, target) + }) + } + + /// Return `true` if the call path is a reference to `typing.${target}`. + pub fn match_typing_call_path(&self, call_path: &CallPath, target: &str) -> bool { + if call_path.as_slice() == ["typing", target] { + return true; + } + + if TYPING_EXTENSIONS.contains(target) { + if call_path.as_slice() == ["typing_extensions", target] { + return true; + } + } + + if self.typing_modules.iter().any(|module| { + let mut module: CallPath = from_unqualified_name(module); + module.push(target); + *call_path == module + }) { + return true; + } + + false + } + + /// Return the current `Binding` for a given `name`. + pub fn find_binding(&self, member: &str) -> Option<&Binding> { + self.scopes() + .find_map(|scope| scope.get(member)) + .map(|binding_id| &self.bindings[binding_id]) + } + + /// Return `true` if `member` is bound as a builtin. + pub fn is_builtin(&self, member: &str) -> bool { + self.find_binding(member) + .map_or(false, |binding| binding.kind.is_builtin()) + } + + /// Resolve a reference to the given symbol. + pub fn resolve_reference(&mut self, symbol: &str, range: TextRange) -> ResolvedReference { + // PEP 563 indicates that if a forward reference can be resolved in the module scope, we + // should prefer it over local resolutions. + if self.in_deferred_type_definition() { + if let Some(binding_id) = self.scopes.global().get(symbol) { + // Mark the binding as used. + let context = self.execution_context(); + let reference_id = self.references.push(ScopeId::global(), range, context); + self.bindings[binding_id].references.push(reference_id); + + // Mark any submodule aliases as used. + if let Some(binding_id) = self.resolve_submodule(ScopeId::global(), binding_id) { + let reference_id = self.references.push(ScopeId::global(), range, context); + self.bindings[binding_id].references.push(reference_id); + } + + return ResolvedReference::Resolved(binding_id); + } + } + + let mut seen_function = false; + let mut import_starred = false; + for (index, scope_id) in self.scopes.ancestor_ids(self.scope_id).enumerate() { + let scope = &self.scopes[scope_id]; + if scope.kind.is_class() { + // Allow usages of `__class__` within methods, e.g.: + // + // ```python + // class Foo: + // def __init__(self): + // print(__class__) + // ``` + if seen_function && matches!(symbol, "__class__") { + return ResolvedReference::ImplicitGlobal; + } + if index > 0 { + continue; + } + } + + if let Some(binding_id) = scope.get(symbol) { + // Mark the binding as used. + let context = self.execution_context(); + let reference_id = self.references.push(self.scope_id, range, context); + self.bindings[binding_id].references.push(reference_id); + + // Mark any submodule aliases as used. + if let Some(binding_id) = self.resolve_submodule(scope_id, binding_id) { + let reference_id = self.references.push(self.scope_id, range, context); + self.bindings[binding_id].references.push(reference_id); + } + + // But if it's a type annotation, don't treat it as resolved, unless we're in a + // forward reference. For example, given: + // + // ```python + // name: str + // print(name) + // ``` + // + // The `name` in `print(name)` should be treated as unresolved, but the `name` in + // `name: str` should be treated as used. + if !self.in_deferred_type_definition() + && self.bindings[binding_id].kind.is_annotation() + { + continue; + } + + return ResolvedReference::Resolved(binding_id); + } + + // Allow usages of `__module__` and `__qualname__` within class scopes, e.g.: + // + // ```python + // class Foo: + // print(__qualname__) + // ``` + // + // Intentionally defer this check to _after_ the standard `scope.get` logic, so that + // we properly attribute reads to overridden class members, e.g.: + // + // ```python + // class Foo: + // __qualname__ = "Bar" + // print(__qualname__) + // ``` + if index == 0 && scope.kind.is_class() { + if matches!(symbol, "__module__" | "__qualname__") { + return ResolvedReference::ImplicitGlobal; + } + } + + seen_function |= scope.kind.is_function(); + import_starred = import_starred || scope.uses_star_imports(); + } + + if import_starred { + ResolvedReference::StarImport + } else { + ResolvedReference::NotFound + } + } + + /// Given a `BindingId`, return the `BindingId` of the submodule import that it aliases. + fn resolve_submodule(&self, scope_id: ScopeId, binding_id: BindingId) -> Option { + // If the name of a submodule import is the same as an alias of another import, and the + // alias is used, then the submodule import should be marked as used too. + // + // For example, mark `pyarrow.csv` as used in: + // + // ```python + // import pyarrow as pa + // import pyarrow.csv + // print(pa.csv.read_csv("test.csv")) + // ``` + let (name, full_name) = match &self.bindings[binding_id].kind { + BindingKind::Importation(Importation { name, full_name }) => (*name, *full_name), + BindingKind::SubmoduleImportation(SubmoduleImportation { name, full_name }) => { + (*name, *full_name) + } + BindingKind::FromImportation(FromImportation { name, full_name }) => { + (*name, full_name.as_str()) + } + _ => return None, + }; + + let has_alias = full_name + .split('.') + .last() + .map(|segment| segment != name) + .unwrap_or_default(); + if !has_alias { + return None; + } + + self.scopes[scope_id].get(full_name) + } + + /// Resolves the [`Expr`] to a fully-qualified symbol-name, if `value` resolves to an imported + /// or builtin symbol. + /// + /// E.g., given: + /// + /// + /// ```python + /// from sys import version_info as python_version + /// print(python_version) + /// ``` + /// + /// ...then `resolve_call_path(${python_version})` will resolve to `sys.version_info`. + pub fn resolve_call_path(&'a self, value: &'a Expr) -> Option> { + let Some(call_path) = collect_call_path(value) else { + return None; + }; + let Some(head) = call_path.first() else { + return None; + }; + let Some(binding) = self.find_binding(head) else { + return None; + }; + match &binding.kind { + BindingKind::Importation(Importation { + full_name: name, .. + }) + | BindingKind::SubmoduleImportation(SubmoduleImportation { name, .. }) => { + if name.starts_with('.') { + if let Some(module) = &self.module_path { + let mut source_path = from_relative_import(module, name); + if source_path.is_empty() { + None + } else { + source_path.extend(call_path.into_iter().skip(1)); + Some(source_path) + } + } else { + None + } + } else { + let mut source_path: CallPath = from_unqualified_name(name); + source_path.extend(call_path.into_iter().skip(1)); + Some(source_path) + } + } + BindingKind::FromImportation(FromImportation { + full_name: name, .. + }) => { + if name.starts_with('.') { + if let Some(module) = &self.module_path { + let mut source_path = from_relative_import(module, name); + if source_path.is_empty() { + None + } else { + source_path.extend(call_path.into_iter().skip(1)); + Some(source_path) + } + } else { + None + } + } else { + let mut source_path: CallPath = from_unqualified_name(name); + source_path.extend(call_path.into_iter().skip(1)); + Some(source_path) + } + } + BindingKind::Builtin => { + let mut source_path: CallPath = smallvec![]; + source_path.push(""); + source_path.extend(call_path); + Some(source_path) + } + _ => None, + } + } + + /// Given a `module` and `member`, return the fully-qualified name of the binding in the current + /// scope, if it exists. + /// + /// E.g., given: + /// + /// ```python + /// from sys import version_info as python_version + /// print(python_version) + /// ``` + /// + /// ...then `resolve_qualified_import_name("sys", "version_info")` will return + /// `Some("python_version")`. + pub fn resolve_qualified_import_name( + &self, + module: &str, + member: &str, + ) -> Option<(&Stmt, String)> { + self.scopes().enumerate().find_map(|(scope_index, scope)| { + scope.binding_ids().find_map(|binding_id| { + let binding = &self.bindings[binding_id]; + match &binding.kind { + // Ex) Given `module="sys"` and `object="exit"`: + // `import sys` -> `sys.exit` + // `import sys as sys2` -> `sys2.exit` + BindingKind::Importation(Importation { name, full_name }) => { + if full_name == &module { + // Verify that `sys` isn't bound in an inner scope. + if self + .scopes() + .take(scope_index) + .all(|scope| scope.get(name).is_none()) + { + if let Some(source) = binding.source { + return Some((self.stmts[source], format!("{name}.{member}"))); + } + } + } + } + // Ex) Given `module="os.path"` and `object="join"`: + // `from os.path import join` -> `join` + // `from os.path import join as join2` -> `join2` + BindingKind::FromImportation(FromImportation { name, full_name }) => { + if let Some((target_module, target_member)) = full_name.split_once('.') { + if target_module == module && target_member == member { + // Verify that `join` isn't bound in an inner scope. + if self + .scopes() + .take(scope_index) + .all(|scope| scope.get(name).is_none()) + { + if let Some(source) = binding.source { + return Some((self.stmts[source], (*name).to_string())); + } + } + } + } + } + // Ex) Given `module="os"` and `object="name"`: + // `import os.path ` -> `os.name` + BindingKind::SubmoduleImportation(SubmoduleImportation { name, .. }) => { + if name == &module { + // Verify that `os` isn't bound in an inner scope. + if self + .scopes() + .take(scope_index) + .all(|scope| scope.get(name).is_none()) + { + if let Some(source) = binding.source { + return Some((self.stmts[source], format!("{name}.{member}"))); + } + } + } + } + // Non-imports. + _ => {} + } + None + }) + }) + } + + /// Push a [`Stmt`] onto the stack. + pub fn push_stmt(&mut self, stmt: &'a Stmt) { + self.stmt_id = Some(self.stmts.insert(stmt, self.stmt_id)); + } + + /// Pop the current [`Stmt`] off the stack. + pub fn pop_stmt(&mut self) { + let node_id = self.stmt_id.expect("Attempted to pop without statement"); + self.stmt_id = self.stmts.parent_id(node_id); + } + + /// Push an [`Expr`] onto the stack. + pub fn push_expr(&mut self, expr: &'a Expr) { + self.exprs.push(expr); + } + + /// Pop the current [`Expr`] off the stack. + pub fn pop_expr(&mut self) { + self.exprs + .pop() + .expect("Attempted to pop without expression"); + } + + /// Push a [`Scope`] with the given [`ScopeKind`] onto the stack. + pub fn push_scope(&mut self, kind: ScopeKind<'a>) { + let id = self.scopes.push_scope(kind, self.scope_id); + self.scope_id = id; + } + + /// Pop the current [`Scope`] off the stack. + pub fn pop_scope(&mut self) { + self.dead_scopes.push(self.scope_id); + self.scope_id = self.scopes[self.scope_id] + .parent + .expect("Attempted to pop without scope"); + } + + /// Push a [`Member`] onto the stack. + pub fn push_definition(&mut self, definition: Member<'a>) { + self.definition_id = self.definitions.push_member(definition); + } + + /// Pop the current [`Member`] off the stack. + pub fn pop_definition(&mut self) { + let Definition::Member(member) = &self.definitions[self.definition_id] else { + panic!("Attempted to pop without member definition"); + }; + self.definition_id = member.parent; + } + + /// Return the current `Stmt`. + pub fn stmt(&self) -> &'a Stmt { + let node_id = self.stmt_id.expect("No current statement"); + self.stmts[node_id] + } + + /// Return the parent `Stmt` of the current `Stmt`, if any. + pub fn stmt_parent(&self) -> Option<&'a Stmt> { + let node_id = self.stmt_id.expect("No current statement"); + let parent_id = self.stmts.parent_id(node_id)?; + Some(self.stmts[parent_id]) + } + + /// Return the current `Expr`. + pub fn expr(&self) -> Option<&'a Expr> { + self.exprs.iter().last().copied() + } + + /// Return the parent `Expr` of the current `Expr`. + pub fn expr_parent(&self) -> Option<&'a Expr> { + self.exprs.iter().rev().nth(1).copied() + } + + /// Return the grandparent `Expr` of the current `Expr`. + pub fn expr_grandparent(&self) -> Option<&'a Expr> { + self.exprs.iter().rev().nth(2).copied() + } + + /// Return an [`Iterator`] over the current `Expr` parents. + pub fn expr_ancestors(&self) -> impl Iterator { + self.exprs.iter().rev().skip(1) + } + + /// Return the `Stmt` that immediately follows the current `Stmt`, if any. + pub fn sibling_stmt(&self) -> Option<&'a Stmt> { + self.body.get(self.body_index + 1) + } + + /// Returns a reference to the global scope + pub fn global_scope(&self) -> &Scope<'a> { + self.scopes.global() + } + + /// Returns a mutable reference to the global scope + pub fn global_scope_mut(&mut self) -> &mut Scope<'a> { + self.scopes.global_mut() + } + + /// Returns the current top most scope. + pub fn scope(&self) -> &Scope<'a> { + &self.scopes[self.scope_id] + } + + /// Returns a mutable reference to the current top most scope. + pub fn scope_mut(&mut self) -> &mut Scope<'a> { + &mut self.scopes[self.scope_id] + } + + /// Returns an iterator over all scopes, starting from the current scope. + pub fn scopes(&self) -> impl Iterator { + self.scopes.ancestors(self.scope_id) + } + + /// Returns an iterator over all parent statements. + pub fn parents(&self) -> impl Iterator + '_ { + let node_id = self.stmt_id.expect("No current statement"); + self.stmts.ancestor_ids(node_id).map(|id| self.stmts[id]) + } + + /// Return `true` if the given [`ScopeId`] matches that of the current scope. + pub fn is_current_scope(&self, scope_id: ScopeId) -> bool { + self.scope_id == scope_id + } + + /// Return `true` if the context is at the top level of the module (i.e., in the module scope, + /// and not nested within any statements). + pub fn at_top_level(&self) -> bool { + self.scope_id.is_global() + && self + .stmt_id + .map_or(true, |stmt_id| self.stmts.parent_id(stmt_id).is_none()) + } + + /// Returns `true` if the given [`BindingId`] is used. + pub fn is_used(&self, binding_id: BindingId) -> bool { + self.bindings[binding_id].is_used() + } + + /// Add a reference to the given [`BindingId`] in the local scope. + pub fn add_local_reference( + &mut self, + binding_id: BindingId, + range: TextRange, + context: ExecutionContext, + ) { + let reference_id = self.references.push(self.scope_id, range, context); + self.bindings[binding_id].references.push(reference_id); + } + + /// Add a reference to the given [`BindingId`] in the global scope. + pub fn add_global_reference( + &mut self, + binding_id: BindingId, + range: TextRange, + context: ExecutionContext, + ) { + let reference_id = self.references.push(ScopeId::global(), range, context); + self.bindings[binding_id].references.push(reference_id); + } + + /// Return the [`ExecutionContext`] of the current scope. + pub const fn execution_context(&self) -> ExecutionContext { + if self.in_type_checking_block() + || self.in_annotation() + || self.in_complex_string_type_definition() + || self.in_simple_string_type_definition() + { + ExecutionContext::Typing + } else { + ExecutionContext::Runtime + } + } + + /// Return the union of all handled exceptions as an [`Exceptions`] bitflag. + pub fn exceptions(&self) -> Exceptions { + let mut exceptions = Exceptions::empty(); + for exception in &self.handled_exceptions { + exceptions.insert(*exception); + } + exceptions + } + + /// Generate a [`Snapshot`] of the current context. + pub fn snapshot(&self) -> Snapshot { + Snapshot { + scope_id: self.scope_id, + stmt_id: self.stmt_id, + definition_id: self.definition_id, + flags: self.flags, + } + } + + /// Restore the context to the given [`Snapshot`]. + pub fn restore(&mut self, snapshot: Snapshot) { + let Snapshot { + scope_id, + stmt_id, + definition_id, + flags, + } = snapshot; + self.scope_id = scope_id; + self.stmt_id = stmt_id; + self.definition_id = definition_id; + self.flags = flags; + } + + /// Return `true` if the context is in a type annotation. + pub const fn in_annotation(&self) -> bool { + self.flags.contains(SemanticModelFlags::ANNOTATION) + } + + /// Return `true` if the context is in a type definition. + pub const fn in_type_definition(&self) -> bool { + self.flags.contains(SemanticModelFlags::TYPE_DEFINITION) + } + + /// Return `true` if the context is in a "simple" string type definition. + pub const fn in_simple_string_type_definition(&self) -> bool { + self.flags + .contains(SemanticModelFlags::SIMPLE_STRING_TYPE_DEFINITION) + } + + /// Return `true` if the context is in a "complex" string type definition. + pub const fn in_complex_string_type_definition(&self) -> bool { + self.flags + .contains(SemanticModelFlags::COMPLEX_STRING_TYPE_DEFINITION) + } + + /// Return `true` if the context is in a `__future__` type definition. + pub const fn in_future_type_definition(&self) -> bool { + self.flags + .contains(SemanticModelFlags::FUTURE_TYPE_DEFINITION) + } + + /// Return `true` if the context is in any kind of deferred type definition. + pub const fn in_deferred_type_definition(&self) -> bool { + self.in_simple_string_type_definition() + || self.in_complex_string_type_definition() + || self.in_future_type_definition() + } + + /// Return `true` if the context is in an exception handler. + pub const fn in_exception_handler(&self) -> bool { + self.flags.contains(SemanticModelFlags::EXCEPTION_HANDLER) + } + + /// Return `true` if the context is in an f-string. + pub const fn in_f_string(&self) -> bool { + self.flags.contains(SemanticModelFlags::F_STRING) + } + + /// Return `true` if the context is in boolean test. + pub const fn in_boolean_test(&self) -> bool { + self.flags.contains(SemanticModelFlags::BOOLEAN_TEST) + } + + /// Return `true` if the context is in a `typing::Literal` annotation. + pub const fn in_literal(&self) -> bool { + self.flags.contains(SemanticModelFlags::LITERAL) + } + + /// Return `true` if the context is in a subscript expression. + pub const fn in_subscript(&self) -> bool { + self.flags.contains(SemanticModelFlags::SUBSCRIPT) + } + + /// Return `true` if the context is in a type-checking block. + pub const fn in_type_checking_block(&self) -> bool { + self.flags.contains(SemanticModelFlags::TYPE_CHECKING_BLOCK) + } + + /// Return `true` if the context has traversed past the "top-of-file" import boundary. + pub const fn seen_import_boundary(&self) -> bool { + self.flags.contains(SemanticModelFlags::IMPORT_BOUNDARY) + } + + /// Return `true` if the context has traverse past the `__future__` import boundary. + pub const fn seen_futures_boundary(&self) -> bool { + self.flags.contains(SemanticModelFlags::FUTURES_BOUNDARY) + } + + /// Return `true` if `__future__`-style type annotations are enabled. + pub const fn future_annotations(&self) -> bool { + self.flags.contains(SemanticModelFlags::FUTURE_ANNOTATIONS) + } +} + +bitflags! { + /// Flags indicating the current context of the analysis. + #[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] + pub struct SemanticModelFlags: u16 { + /// The context is in a type annotation. + /// + /// For example, the context could be visiting `int` in: + /// ```python + /// x: int = 1 + /// ``` + const ANNOTATION = 1 << 0; + + /// The context is in a type definition. + /// + /// For example, the context could be visiting `int` in: + /// ```python + /// from typing import NewType + /// + /// UserId = NewType("UserId", int) + /// ``` + /// + /// All type annotations are also type definitions, but the converse is not true. + /// In our example, `int` is a type definition but not a type annotation, as it + /// doesn't appear in a type annotation context, but rather in a type definition. + const TYPE_DEFINITION = 1 << 1; + + /// The context is in a (deferred) "simple" string type definition. + /// + /// For example, the context could be visiting `list[int]` in: + /// ```python + /// x: "list[int]" = [] + /// ``` + /// + /// "Simple" string type definitions are those that consist of a single string literal, + /// as opposed to an implicitly concatenated string literal. + const SIMPLE_STRING_TYPE_DEFINITION = 1 << 2; + + /// The context is in a (deferred) "complex" string type definition. + /// + /// For example, the context could be visiting `list[int]` in: + /// ```python + /// x: ("list" "[int]") = [] + /// ``` + /// + /// "Complex" string type definitions are those that consist of a implicitly concatenated + /// string literals. These are uncommon but valid. + const COMPLEX_STRING_TYPE_DEFINITION = 1 << 3; + + /// The context is in a (deferred) `__future__` type definition. + /// + /// For example, the context could be visiting `list[int]` in: + /// ```python + /// from __future__ import annotations + /// + /// x: list[int] = [] + /// ``` + /// + /// `__future__`-style type annotations are only enabled if the `annotations` feature + /// is enabled via `from __future__ import annotations`. + const FUTURE_TYPE_DEFINITION = 1 << 4; + + /// The context is in an exception handler. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// try: + /// ... + /// except Exception: + /// x: int = 1 + /// ``` + const EXCEPTION_HANDLER = 1 << 5; + + /// The context is in an f-string. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// f'{x}' + /// ``` + const F_STRING = 1 << 6; + + /// The context is in a boolean test. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// if x: + /// ... + /// ``` + /// + /// The implication is that the actual value returned by the current expression is + /// not used, only its truthiness. + const BOOLEAN_TEST = 1 << 7; + + /// The context is in a `typing::Literal` annotation. + /// + /// For example, the context could be visiting any of `"A"`, `"B"`, or `"C"` in: + /// ```python + /// def f(x: Literal["A", "B", "C"]): + /// ... + /// ``` + const LITERAL = 1 << 8; + + /// The context is in a subscript expression. + /// + /// For example, the context could be visiting `x["a"]` in: + /// ```python + /// x["a"]["b"] + /// ``` + const SUBSCRIPT = 1 << 9; + + /// The context is in a type-checking block. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// from typing import TYPE_CHECKING + /// + /// + /// if TYPE_CHECKING: + /// x: int = 1 + /// ``` + const TYPE_CHECKING_BLOCK = 1 << 10; + + + /// The context has traversed past the "top-of-file" import boundary. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// import os + /// + /// def f() -> None: + /// ... + /// + /// x: int = 1 + /// ``` + const IMPORT_BOUNDARY = 1 << 11; + + /// The context has traversed past the `__future__` import boundary. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// from __future__ import annotations + /// + /// import os + /// + /// x: int = 1 + /// ``` + /// + /// Python considers it a syntax error to import from `__future__` after + /// any other non-`__future__`-importing statements. + const FUTURES_BOUNDARY = 1 << 12; + + /// `__future__`-style type annotations are enabled in this context. + /// + /// For example, the context could be visiting `x` in: + /// ```python + /// from __future__ import annotations + /// + /// + /// def f(x: int) -> int: + /// ... + /// ``` + const FUTURE_ANNOTATIONS = 1 << 13; + } +} + +impl SemanticModelFlags { + pub fn new(path: &Path) -> Self { + let mut flags = Self::default(); + if is_python_stub_file(path) { + flags |= Self::FUTURE_ANNOTATIONS; + } + flags + } +} + +/// A snapshot of the [`SemanticModel`] at a given point in the AST traversal. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Snapshot { + scope_id: ScopeId, + stmt_id: Option, + definition_id: DefinitionId, + flags: SemanticModelFlags, +} + +#[derive(Debug)] +pub enum ResolvedReference { + /// The reference is resolved to a specific binding. + Resolved(BindingId), + /// The reference is resolved to a context-specific, implicit global (e.g., `__class__` within + /// a class scope). + ImplicitGlobal, + /// The reference is unresolved, but at least one of the containing scopes contains a star + /// import. + StarImport, + /// The reference is definitively unresolved. + NotFound, +} diff --git a/crates/ruff_python_semantic/src/node.rs b/crates/ruff_python_semantic/src/node.rs index bc3dbba7d5bbd..dd71fd5f1dcdc 100644 --- a/crates/ruff_python_semantic/src/node.rs +++ b/crates/ruff_python_semantic/src/node.rs @@ -1,6 +1,6 @@ -use std::num::{NonZeroU32, TryFromIntError}; use std::ops::{Index, IndexMut}; +use ruff_index::{newtype_index, IndexVec}; use rustc_hash::FxHashMap; use rustpython_parser::ast::Stmt; @@ -11,24 +11,9 @@ use ruff_python_ast::types::RefEquality; /// Using a `u32` is sufficient because Ruff only supports parsing documents with a size of max `u32::max` /// and it is impossible to have more statements than characters in the file. We use a `NonZeroU32` to /// take advantage of memory layout optimizations. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] -pub struct NodeId(NonZeroU32); - -/// Convert a `usize` to a `NodeId` (by adding 1 to the value, and casting to `NonZeroU32`). -impl TryFrom for NodeId { - type Error = TryFromIntError; - - fn try_from(value: usize) -> Result { - Ok(Self(NonZeroU32::try_from(u32::try_from(value)? + 1)?)) - } -} - -/// Convert a `NodeId` to a `usize` (by subtracting 1 from the value, and casting to `usize`). -impl From for usize { - fn from(value: NodeId) -> Self { - value.0.get() as usize - 1 - } -} +#[newtype_index] +#[derive(Ord, PartialOrd)] +pub struct NodeId; /// A [`Node`] represents a statement in a program, along with a pointer to its parent (if any). #[derive(Debug)] @@ -44,7 +29,7 @@ struct Node<'a> { /// The nodes of a program indexed by [`NodeId`] #[derive(Debug, Default)] pub struct Nodes<'a> { - nodes: Vec>, + nodes: IndexVec>, node_to_id: FxHashMap, NodeId>, } @@ -53,16 +38,15 @@ impl<'a> Nodes<'a> { /// /// Panics if a node with the same pointer already exists. pub fn insert(&mut self, stmt: &'a Stmt, parent: Option) -> NodeId { - let next_id = NodeId::try_from(self.nodes.len()).unwrap(); + let next_id = self.nodes.next_index(); if let Some(existing_id) = self.node_to_id.insert(RefEquality(stmt), next_id) { panic!("Node already exists with id {existing_id:?}"); } self.nodes.push(Node { stmt, parent, - depth: parent.map_or(0, |parent| self.nodes[usize::from(parent)].depth + 1), - }); - next_id + depth: parent.map_or(0, |parent| self.nodes[parent].depth + 1), + }) } /// Returns the [`NodeId`] of the given node. @@ -74,26 +58,24 @@ impl<'a> Nodes<'a> { /// Return the [`NodeId`] of the parent node. #[inline] pub fn parent_id(&self, node_id: NodeId) -> Option { - self.nodes[usize::from(node_id)].parent + self.nodes[node_id].parent } /// Return the depth of the node. #[inline] pub fn depth(&self, node_id: NodeId) -> u32 { - self.nodes[usize::from(node_id)].depth + self.nodes[node_id].depth } /// Returns an iterator over all [`NodeId`] ancestors, starting from the given [`NodeId`]. pub fn ancestor_ids(&self, node_id: NodeId) -> impl Iterator + '_ { - std::iter::successors(Some(node_id), |&node_id| { - self.nodes[usize::from(node_id)].parent - }) + std::iter::successors(Some(node_id), |&node_id| self.nodes[node_id].parent) } /// Return the parent of the given node. pub fn parent(&self, node: &'a Stmt) -> Option<&'a Stmt> { let node_id = self.node_to_id.get(&RefEquality(node))?; - let parent_id = self.nodes[usize::from(*node_id)].parent?; + let parent_id = self.nodes[*node_id].parent?; Some(self[parent_id]) } } @@ -101,13 +83,15 @@ impl<'a> Nodes<'a> { impl<'a> Index for Nodes<'a> { type Output = &'a Stmt; + #[inline] fn index(&self, index: NodeId) -> &Self::Output { - &self.nodes[usize::from(index)].stmt + &self.nodes[index].stmt } } impl<'a> IndexMut for Nodes<'a> { + #[inline] fn index_mut(&mut self, index: NodeId) -> &mut Self::Output { - &mut self.nodes[usize::from(index)].stmt + &mut self.nodes[index].stmt } } diff --git a/crates/ruff_python_semantic/src/reference.rs b/crates/ruff_python_semantic/src/reference.rs new file mode 100644 index 0000000000000..cac8bb8f39c32 --- /dev/null +++ b/crates/ruff_python_semantic/src/reference.rs @@ -0,0 +1,59 @@ +use ruff_text_size::TextRange; + +use ruff_index::{newtype_index, IndexVec}; + +use crate::context::ExecutionContext; +use crate::scope::ScopeId; + +#[derive(Debug, Clone)] +pub struct Reference { + /// The scope in which the reference is defined. + scope_id: ScopeId, + /// The range of the reference in the source code. + range: TextRange, + /// The context in which the reference occurs. + context: ExecutionContext, +} + +impl Reference { + pub const fn scope_id(&self) -> ScopeId { + self.scope_id + } + + pub const fn range(&self) -> TextRange { + self.range + } + + pub const fn context(&self) -> ExecutionContext { + self.context + } +} + +/// Id uniquely identifying a read reference in a program. +#[newtype_index] +pub struct ReferenceId; + +/// The references of a program indexed by [`ReferenceId`]. +#[derive(Debug, Default)] +pub struct References(IndexVec); + +impl References { + /// Pushes a new read reference and returns its unique id. + pub fn push( + &mut self, + scope_id: ScopeId, + range: TextRange, + context: ExecutionContext, + ) -> ReferenceId { + self.0.push(Reference { + scope_id, + range, + context, + }) + } + + /// Returns the [`Reference`] with the given id. + pub fn resolve(&self, id: ReferenceId) -> &Reference { + &self.0[id] + } +} diff --git a/crates/ruff_python_semantic/src/scope.rs b/crates/ruff_python_semantic/src/scope.rs index 6a9f1926194cf..e8b2c926c1f4f 100644 --- a/crates/ruff_python_semantic/src/scope.rs +++ b/crates/ruff_python_semantic/src/scope.rs @@ -1,6 +1,6 @@ -use std::num::TryFromIntError; -use std::ops::{Deref, Index, IndexMut}; +use std::ops::{Deref, DerefMut}; +use ruff_index::{newtype_index, Idx, IndexSlice, IndexVec}; use rustc_hash::FxHashMap; use rustpython_parser::ast::{Arguments, Expr, Keyword, Stmt}; @@ -45,8 +45,8 @@ impl<'a> Scope<'a> { } /// Returns the [id](BindingId) of the binding bound to the given name. - pub fn get(&self, name: &str) -> Option<&BindingId> { - self.bindings.get(name) + pub fn get(&self, name: &str) -> Option { + self.bindings.get(name).copied() } /// Adds a new binding with the given name to this scope. @@ -70,22 +70,23 @@ impl<'a> Scope<'a> { } /// Returns the ids of all bindings defined in this scope. - pub fn binding_ids(&self) -> std::collections::hash_map::Values<&str, BindingId> { - self.bindings.values() + pub fn binding_ids(&self) -> impl Iterator + '_ { + self.bindings.values().copied() } /// Returns a tuple of the name and id of all bindings defined in this scope. - pub fn bindings(&self) -> std::collections::hash_map::Iter<&'a str, BindingId> { - self.bindings.iter() + pub fn bindings(&self) -> impl Iterator + '_ { + self.bindings.iter().map(|(&name, &id)| (name, id)) } /// Returns an iterator over all [bindings](BindingId) bound to the given name, including /// those that were shadowed by later bindings. - pub fn bindings_for_name(&self, name: &str) -> impl Iterator { + pub fn bindings_for_name(&self, name: &str) -> impl Iterator + '_ { self.bindings .get(name) .into_iter() .chain(self.shadowed_bindings.get(name).into_iter().flatten().rev()) + .copied() } /// Adds a reference to a star import (e.g., `from sys import *`) to this scope. @@ -151,39 +152,25 @@ pub struct Lambda<'a> { /// Using a `u32` is sufficient because Ruff only supports parsing documents with a size of max `u32::max` /// and it is impossible to have more scopes than characters in the file (because defining a function or class /// requires more than one character). -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] -pub struct ScopeId(u32); +#[newtype_index] +pub struct ScopeId; impl ScopeId { /// Returns the ID for the global scope #[inline] pub const fn global() -> Self { - ScopeId(0) + ScopeId::from_u32(0) } /// Returns `true` if this is the id of the global scope pub const fn is_global(&self) -> bool { - self.0 == 0 - } -} - -impl TryFrom for ScopeId { - type Error = TryFromIntError; - - fn try_from(value: usize) -> Result { - Ok(Self(u32::try_from(value)?)) - } -} - -impl From for usize { - fn from(value: ScopeId) -> Self { - value.0 as usize + self.index() == 0 } } /// The scopes of a program indexed by [`ScopeId`] #[derive(Debug)] -pub struct Scopes<'a>(Vec>); +pub struct Scopes<'a>(IndexVec>); impl<'a> Scopes<'a> { /// Returns a reference to the global scope @@ -198,7 +185,7 @@ impl<'a> Scopes<'a> { /// Pushes a new scope and returns its unique id pub fn push_scope(&mut self, kind: ScopeKind<'a>, parent: ScopeId) -> ScopeId { - let next_id = ScopeId::try_from(self.0.len()).unwrap(); + let next_id = ScopeId::new(self.0.len()); self.0.push(Scope::local(kind, parent)); next_id } @@ -218,27 +205,19 @@ impl<'a> Scopes<'a> { impl Default for Scopes<'_> { fn default() -> Self { - Self(vec![Scope::global()]) - } -} - -impl<'a> Index for Scopes<'a> { - type Output = Scope<'a>; - - fn index(&self, index: ScopeId) -> &Self::Output { - &self.0[usize::from(index)] - } -} - -impl<'a> IndexMut for Scopes<'a> { - fn index_mut(&mut self, index: ScopeId) -> &mut Self::Output { - &mut self.0[usize::from(index)] + Self(IndexVec::from_raw(vec![Scope::global()])) } } impl<'a> Deref for Scopes<'a> { - type Target = [Scope<'a>]; + type Target = IndexSlice>; fn deref(&self) -> &Self::Target { &self.0 } } + +impl<'a> DerefMut for Scopes<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/crates/ruff_testing_macros/Cargo.toml b/crates/ruff_testing_macros/Cargo.toml index aeffa5946907b..101431dc709e5 100644 --- a/crates/ruff_testing_macros/Cargo.toml +++ b/crates/ruff_testing_macros/Cargo.toml @@ -13,4 +13,4 @@ proc-macro = true glob = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } -syn = { workspace = true } +syn = { workspace = true, features = ["extra-traits"] } diff --git a/crates/ruff_testing_macros/src/lib.rs b/crates/ruff_testing_macros/src/lib.rs index 622da40057792..23815540a15c9 100644 --- a/crates/ruff_testing_macros/src/lib.rs +++ b/crates/ruff_testing_macros/src/lib.rs @@ -1,11 +1,12 @@ -use glob::{glob, Pattern}; use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::{format_ident, quote}; use std::borrow::Cow; use std::collections::BTreeMap; use std::env; use std::path::{Component, PathBuf}; + +use glob::{glob, Pattern}; +use proc_macro2::Span; +use quote::{format_ident, quote}; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::spanned::Spanned; diff --git a/crates/ruff_wasm/src/lib.rs b/crates/ruff_wasm/src/lib.rs index 32c9a484a3989..9aeb656786726 100644 --- a/crates/ruff_wasm/src/lib.rs +++ b/crates/ruff_wasm/src/lib.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use ruff::directives; +use ruff::line_width::{LineLength, TabSize}; use ruff::linter::{check_path, LinterResult}; use ruff::registry::AsRule; use ruff::rules::{ @@ -96,11 +97,14 @@ pub fn defaultSettings() -> Result { allowed_confusables: Some(Vec::default()), builtins: Some(Vec::default()), dummy_variable_rgx: Some(defaults::DUMMY_VARIABLE_RGX.as_str().to_string()), + extend_fixable: Some(Vec::default()), extend_ignore: Some(Vec::default()), extend_select: Some(Vec::default()), + extend_unfixable: Some(Vec::default()), external: Some(Vec::default()), ignore: Some(Vec::default()), - line_length: Some(defaults::LINE_LENGTH), + line_length: Some(LineLength::default()), + tab_size: Some(TabSize::default()), select: Some(defaults::PREFIXES.to_vec()), target_version: Some(defaults::TARGET_VERSION), // Ignore a bunch of options that don't make sense in a single-file editor. @@ -109,6 +113,7 @@ pub fn defaultSettings() -> Result { extend: None, extend_exclude: None, extend_include: None, + extend_per_file_ignores: None, fix: None, fix_only: None, fixable: None, @@ -143,7 +148,7 @@ pub fn defaultSettings() -> Result { flake8_import_conventions: Some( flake8_import_conventions::settings::Settings::default().into(), ), - flake8_tidy_imports: Some(flake8_tidy_imports::Settings::default().into()), + flake8_tidy_imports: Some(flake8_tidy_imports::settings::Settings::default().into()), flake8_type_checking: Some(flake8_type_checking::settings::Settings::default().into()), flake8_unused_arguments: Some( flake8_unused_arguments::settings::Settings::default().into(), diff --git a/docs/configuration.md b/docs/configuration.md index 545faaecd06c8..d7ad03733def3 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -232,13 +232,17 @@ Rule selection: --ignore Comma-separated list of rule codes to disable --extend-select - Like --select, but adds additional rule codes on top of the selected ones + Like --select, but adds additional rule codes on top of those already specified --per-file-ignores List of mappings from file pattern to code to exclude + --extend-per-file-ignores + Like `--per-file-ignores`, but adds additional ignores on top of those already specified --fixable List of rule codes to treat as eligible for autofix. Only applicable when autofix itself is enabled (e.g., via `--fix`) --unfixable List of rule codes to treat as ineligible for autofix. Only applicable when autofix itself is enabled (e.g., via `--fix`) + --extend-fixable + Like --fixable, but adds additional rule codes on top of those already specified File selection: --exclude List of paths, used to omit files and/or directories from analysis diff --git a/docs/editor-integrations.md b/docs/editor-integrations.md index 245c8474cd0f1..6264f45c9ccc9 100644 --- a/docs/editor-integrations.md +++ b/docs/editor-integrations.md @@ -10,12 +10,12 @@ which supports autofix actions, import sorting, and more. ## Language Server Protocol (Official) Ruff supports the [Language Server Protocol](https://microsoft.github.io/language-server-protocol/) -via the [`ruff-lsp`](https://github.com/charliermarsh/ruff-lsp) Python package, available on +via the [`ruff-lsp`](https://github.com/astral-sh/ruff-lsp) Python package, available on [PyPI](https://pypi.org/project/ruff-lsp/). -[`ruff-lsp`](https://github.com/charliermarsh/ruff-lsp) enables Ruff to be used with any editor that -supports the Language Server Protocol, including [Neovim](https://github.com/charliermarsh/ruff-lsp#example-neovim), -[Sublime Text](https://github.com/charliermarsh/ruff-lsp#example-sublime-text), Emacs, and more. +[`ruff-lsp`](https://github.com/astral-sh/ruff-lsp) enables Ruff to be used with any editor that +supports the Language Server Protocol, including [Neovim](https://github.com/astral-sh/ruff-lsp#example-neovim), +[Sublime Text](https://github.com/astral-sh/ruff-lsp#example-sublime-text), Emacs, and more. For example, to use `ruff-lsp` with Neovim, install `ruff-lsp` from PyPI along with [`nvim-lspconfig`](https://github.com/neovim/nvim-lspconfig). Then, add something like the following @@ -80,7 +80,7 @@ Upon successful installation, you should see Ruff's diagnostics surfaced directl ![Code Actions available in Neovim](https://user-images.githubusercontent.com/1309177/208278707-25fa37e4-079d-4597-ad35-b95dba066960.png) -To use `ruff-lsp` with other editors, including Sublime Text and Helix, see the [`ruff-lsp` documentation](https://github.com/charliermarsh/ruff-lsp#installation-and-usage). +To use `ruff-lsp` with other editors, including Sublime Text and Helix, see the [`ruff-lsp` documentation](https://github.com/astral-sh/ruff-lsp#installation-and-usage). ## Language Server Protocol (Unofficial) @@ -122,10 +122,10 @@ require'lspconfig'.pylsp.setup { ## Vim & Neovim -Ruff can be integrated into any editor that supports the Language Server Protocol via [`ruff-lsp`](https://github.com/charliermarsh/ruff-lsp) +Ruff can be integrated into any editor that supports the Language Server Protocol via [`ruff-lsp`](https://github.com/astral-sh/ruff-lsp) (see: [Language Server Protocol](#language-server-protocol-official)), including Vim and Neovim. -It's recommended that you use [`ruff-lsp`](https://github.com/charliermarsh/ruff-lsp), the +It's recommended that you use [`ruff-lsp`](https://github.com/astral-sh/ruff-lsp), the officially supported LSP server for Ruff. To use `ruff-lsp` with Neovim, install `ruff-lsp` from PyPI along with [`nvim-lspconfig`](https://github.com/neovim/nvim-lspconfig). Then, add something like the following to your `init.lua`: diff --git a/docs/faq.md b/docs/faq.md index 8eb1f5942b305..c40cfa90524b7 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -371,5 +371,5 @@ Ruff's color output is powered by the [`colored`](https://crates.io/crates/color attempts to automatically detect whether the output stream supports color. However, you can force colors off by setting the `NO_COLOR` environment variable to any value (e.g., `NO_COLOR=1`). -[`colored`](https://crates.io/crates/colored) also supports the the `CLICOLOR` and `CLICOLOR_FORCE` +[`colored`](https://crates.io/crates/colored) also supports the `CLICOLOR` and `CLICOLOR_FORCE` environment variables (see the [spec](https://bixense.com/clicolors/)). diff --git a/docs/tutorial.md b/docs/tutorial.md index 87c535f53af3f..b2dac8d196427 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -240,9 +240,9 @@ This tutorial has focused on Ruff's command-line interface, but Ruff can also be [pre-commit](https://pre-commit.com) hook: ```yaml -- repo: https://github.com/charliermarsh/ruff-pre-commit +- repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.0.269' + rev: v0.0.270 hooks: - id: ruff ``` @@ -251,7 +251,7 @@ See [_Usage_](usage.md) for more. ## Editor Integrations -Ruff can also be used as a [VS Code extension](https://github.com/charliermarsh/ruff-vscode) or -alongside any other editor through the [Ruff LSP](https://github.com/charliermarsh/ruff-lsp). +Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or +alongside any other editor through the [Ruff LSP](https://github.com/astral-sh/ruff-lsp). See [_Editor Integrations_](editor-integrations.md). diff --git a/docs/usage.md b/docs/usage.md index 7a6d98cbeddfc..812ed466fae5f 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -20,9 +20,9 @@ ruff check path/to/code/ --watch Ruff can also be used as a [pre-commit](https://pre-commit.com) hook: ```yaml -- repo: https://github.com/charliermarsh/ruff-pre-commit +- repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.0.269' + rev: v0.0.270 hooks: - id: ruff ``` @@ -30,9 +30,9 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook: Or, to enable autofix: ```yaml -- repo: https://github.com/charliermarsh/ruff-pre-commit +- repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.0.269' + rev: v0.0.270 hooks: - id: ruff args: [ --fix, --exit-non-zero-on-fix ] @@ -45,8 +45,8 @@ reformatting. ## VS Code -Ruff can also be used as a [VS Code extension](https://github.com/charliermarsh/ruff-vscode) or -alongside any other editor through the [Ruff LSP](https://github.com/charliermarsh/ruff-lsp). +Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or +alongside any other editor through the [Ruff LSP](https://github.com/astral-sh/ruff-lsp). ## GitHub Action diff --git a/pyproject.toml b/pyproject.toml index 9dd4a186d39bd..658c31fb87e9d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [build-system] -requires = ["maturin>=0.15.2,<0.16"] +requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [project] name = "ruff" -version = "0.0.269" +version = "0.0.270" description = "An extremely fast Python linter, written in Rust." authors = [{ name = "Charlie Marsh", email = "charlie.r.marsh@gmail.com" }] maintainers = [{ name = "Charlie Marsh", email = "charlie.r.marsh@gmail.com" }] diff --git a/ruff.schema.json b/ruff.schema.json index 3e0e357729d14..1bcfb5fb08d67 100644 --- a/ruff.schema.json +++ b/ruff.schema.json @@ -66,6 +66,16 @@ "type": "string" } }, + "extend-fixable": { + "description": "A list of rule codes or prefixes to consider autofixable, in addition to those specified by `fixable`.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/RuleSelector" + } + }, "extend-include": { "description": "A list of file patterns to include when linting, in addition to those specified by `include`.\n\nInclusion are based on globs, and should be single-path patterns, like `*.pyw`, to include any file with the `.pyw` extension.\n\nFor more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).", "type": [ @@ -76,6 +86,19 @@ "type": "string" } }, + "extend-per-file-ignores": { + "description": "A list of mappings from file pattern to rule codes or prefixes to exclude, in addition to any rules excluded by `per-file-ignores`.", + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/RuleSelector" + } + } + }, "extend-select": { "description": "A list of rule codes or prefixes to enable, in addition to those specified by `select`.", "type": [ @@ -286,7 +309,7 @@ ] }, "force-exclude": { - "description": "Whether to enforce `exclude` and `extend-exclude` patterns, even for paths that are passed to Ruff explicitly. Typically, Ruff will lint any paths passed in directly, even if they would typically be excluded. Setting `force-exclude = true` will cause Ruff to respect these exclusions unequivocally.\n\nThis is useful for [`pre-commit`](https://pre-commit.com/), which explicitly passes all changed files to the [`ruff-pre-commit`](https://github.com/charliermarsh/ruff-pre-commit) plugin, regardless of whether they're marked as excluded by Ruff's own settings.", + "description": "Whether to enforce `exclude` and `extend-exclude` patterns, even for paths that are passed to Ruff explicitly. Typically, Ruff will lint any paths passed in directly, even if they would typically be excluded. Setting `force-exclude = true` will cause Ruff to respect these exclusions unequivocally.\n\nThis is useful for [`pre-commit`](https://pre-commit.com/), which explicitly passes all changed files to the [`ruff-pre-commit`](https://github.com/astral-sh/ruff-pre-commit) plugin, regardless of whether they're marked as excluded by Ruff's own settings.", "type": [ "boolean", "null" @@ -343,12 +366,14 @@ }, "line-length": { "description": "The line length to use when enforcing long-lines violations (like `E501`).", - "type": [ - "integer", - "null" - ], - "format": "uint", - "minimum": 0.0 + "anyOf": [ + { + "$ref": "#/definitions/LineLength" + }, + { + "type": "null" + } + ] }, "mccabe": { "description": "Options for the `mccabe` plugin.", @@ -480,6 +505,17 @@ "type": "string" } }, + "tab-size": { + "description": "The tabulation size to calculate line length.", + "anyOf": [ + { + "$ref": "#/definitions/TabSize" + }, + { + "type": "null" + } + ] + }, "target-version": { "description": "The Python version to target, e.g., when considering automatic code upgrades, like rewriting type annotations.\n\nIf omitted, the target version will be inferred from the `project.requires-python` field in the relevant `pyproject.toml` (e.g., `requires-python = \">=3.8\"`), if present.", "anyOf": [ @@ -1253,6 +1289,12 @@ }, "additionalProperties": false }, + "LineLength": { + "description": "The length of a line of text that is considered too long.", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, "McCabeOptions": { "type": "object", "properties": { @@ -1338,12 +1380,14 @@ }, "max-doc-length": { "description": "The maximum line length to allow for line-length violations within documentation (`W505`), including standalone comments.", - "type": [ - "integer", - "null" - ], - "format": "uint", - "minimum": 0.0 + "anyOf": [ + { + "$ref": "#/definitions/LineLength" + }, + { + "type": "null" + } + ] } }, "additionalProperties": false @@ -2053,6 +2097,9 @@ "PLW012", "PLW0120", "PLW0129", + "PLW013", + "PLW0130", + "PLW0131", "PLW04", "PLW040", "PLW0406", @@ -2147,6 +2194,7 @@ "PYI010", "PYI011", "PYI012", + "PYI013", "PYI014", "PYI015", "PYI016", @@ -2247,6 +2295,7 @@ "S509", "S6", "S60", + "S601", "S602", "S603", "S604", @@ -2461,6 +2510,12 @@ } ] }, + "TabSize": { + "description": "The size of a tab.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, "Version": { "type": "string" } diff --git a/scripts/Dockerfile.ecosystem b/scripts/Dockerfile.ecosystem index b7fe449c37a91..d671b20c1e6c0 100644 --- a/scripts/Dockerfile.ecosystem +++ b/scripts/Dockerfile.ecosystem @@ -13,15 +13,16 @@ # ``` # From the project root: # ``` -# cargo build --target x86_64-unknown-linux-musl --features ecosystem_ci +# cargo build --target x86_64-unknown-linux-musl # docker buildx build -f scripts/Dockerfile.ecosystem -t ruff-ecosystem-checker --load . # docker run --rm -v ./target/x86_64-unknown-linux-musl/debug/ruff:/app/ruff-new -v ./ruff-old:/app/ruff-old ruff-ecosystem-checker # ``` -# You can customize this, e.g. cache the git checkouts and use a custom json file: +# You can customize this, e.g. cache the git checkouts, a custom json file and a glibc build: # ``` -# docker run -v ./target/x86_64-unknown-linux-musl/debug/ruff:/app/ruff-new -v ./ruff-old:/app/ruff-old \ -# -v ./target/checkouts:/app/checkouts -v ./github_search.jsonl:/app/github_search.jsonl \ -# --rm ruff-ecosystem-checker python check_ecosystem.py -v ruff-new ruff-old --checkouts checkouts > output.txt +# docker run -v ./target/debug/ruff:/app/ruff-new -v ./ruff-old:/app/ruff-old -v ./target/checkouts:/app/checkouts \ +# -v ./github_search.jsonl:/app/github_search.jsonl --rm ruff-ecosystem-checker \ +# python check_ecosystem.py --verbose ruff-new ruff-old --projects github_search.jsonl --checkouts checkouts \ +# > target/ecosystem-ci.txt # ``` FROM python:3.11 diff --git a/scripts/check_ecosystem.py b/scripts/check_ecosystem.py index cc1e741933c28..d7afcb764b605 100755 --- a/scripts/check_ecosystem.py +++ b/scripts/check_ecosystem.py @@ -19,6 +19,7 @@ from asyncio.subprocess import PIPE, create_subprocess_exec from contextlib import asynccontextmanager, nullcontext from pathlib import Path +from signal import SIGINT, SIGTERM from typing import TYPE_CHECKING, NamedTuple, Self if TYPE_CHECKING: @@ -36,6 +37,8 @@ class Repository(NamedTuple): select: str = "" ignore: str = "" exclude: str = "" + # Generating fixes is slow and verbose + show_fixes: bool = False @asynccontextmanager async def clone(self: Self, checkout_dir: Path) -> AsyncIterator[Path]: @@ -68,24 +71,26 @@ async def clone(self: Self, checkout_dir: Path) -> AsyncIterator[Path]: process = await create_subprocess_exec(*git_command) - await process.wait() + status_code = await process.wait() - logger.debug(f"Finished cloning {self.org}/{self.repo}") + logger.debug( + f"Finished cloning {self.org}/{self.repo} with status {status_code}", + ) yield Path(checkout_dir) -REPOSITORIES = { - "airflow": Repository("apache", "airflow", "main", select="ALL"), - "bokeh": Repository("bokeh", "bokeh", "branch-3.2", select="ALL"), - "build": Repository("pypa", "build", "main"), - "cibuildwheel": Repository("pypa", "cibuildwheel", "main"), - "disnake": Repository("DisnakeDev", "disnake", "master"), - "scikit-build": Repository("scikit-build", "scikit-build", "main"), - "scikit-build-core": Repository("scikit-build", "scikit-build-core", "main"), - "typeshed": Repository("python", "typeshed", "main", select="PYI"), - "zulip": Repository("zulip", "zulip", "main", select="ALL"), -} +REPOSITORIES: list[Repository] = [ + Repository("apache", "airflow", "main", select="ALL"), + Repository("bokeh", "bokeh", "branch-3.2", select="ALL"), + Repository("pypa", "build", "main"), + Repository("pypa", "cibuildwheel", "main"), + Repository("DisnakeDev", "disnake", "master"), + Repository("scikit-build", "scikit-build", "main"), + Repository("scikit-build", "scikit-build-core", "main"), + Repository("python", "typeshed", "main", select="PYI"), + Repository("zulip", "zulip", "main", select="ALL"), +] SUMMARY_LINE_RE = re.compile(r"^(Found \d+ error.*)|(.*potentially fixable with.*)$") @@ -102,6 +107,7 @@ async def check( select: str = "", ignore: str = "", exclude: str = "", + show_fixes: bool = False, ) -> Sequence[str]: """Run the given ruff binary against the specified path.""" logger.debug(f"Checking {name} with {ruff}") @@ -112,6 +118,8 @@ async def check( ruff_args.extend(["--ignore", ignore]) if exclude: ruff_args.extend(["--exclude", exclude]) + if show_fixes: + ruff_args.extend(["--show-fixes", "--ecosystem-ci"]) start = time.time() proc = await create_subprocess_exec( @@ -169,16 +177,16 @@ async def compare( # Allows to keep the checkouts locations if checkouts: - checkout_dir = checkouts.joinpath(repo.org).joinpath(repo.repo) + checkout_parent = checkouts.joinpath(repo.org) # Don't create the repodir itself, we need that for checking for existing # clones - checkout_dir.parent.mkdir(exist_ok=True, parents=True) - location_context = nullcontext(checkout_dir) + checkout_parent.mkdir(exist_ok=True, parents=True) + location_context = nullcontext(checkout_parent) else: location_context = tempfile.TemporaryDirectory() - with location_context as checkout_dir: - checkout_dir = Path(checkout_dir) + with location_context as checkout_parent: + checkout_dir = Path(checkout_parent).joinpath(repo.repo) async with repo.clone(checkout_dir) as path: try: async with asyncio.TaskGroup() as tg: @@ -190,6 +198,7 @@ async def compare( select=repo.select, ignore=repo.ignore, exclude=repo.exclude, + show_fixes=repo.show_fixes, ), ) check2 = tg.create_task( @@ -200,6 +209,7 @@ async def compare( select=repo.select, ignore=repo.ignore, exclude=repo.exclude, + show_fixes=repo.show_fixes, ), ) except ExceptionGroup as e: @@ -214,7 +224,7 @@ async def compare( return Diff(removed, added) -def read_projects_jsonl(projects_jsonl: Path) -> dict[str, Repository]: +def read_projects_jsonl(projects_jsonl: Path) -> dict[tuple[str, str], Repository]: """Read either of the two formats of https://github.com/akx/ruff-usage-aggregate.""" repositories = {} for line in projects_jsonl.read_text().splitlines(): @@ -233,20 +243,26 @@ def read_projects_jsonl(projects_jsonl: Path) -> dict[str, Repository]: # us the revision, but there's no way with git to just do # `git clone --depth 1` with a specific ref. # `ref = item["url"].split("?ref=")[1]` would be exact - repositories[repository["name"]] = Repository( + repositories[(repository["owner"], repository["repo"])] = Repository( repository["owner"]["login"], repository["name"], None, + select=repository.get("select"), + ignore=repository.get("ignore"), + exclude=repository.get("exclude"), ) else: assert "owner" in data, "Unknown ruff-usage-aggregate format" # Pick only the easier case for now. if data["path"] != "pyproject.toml": continue - repositories[data["repo"]] = Repository( + repositories[(data["owner"], data["repo"])] = Repository( data["owner"], data["repo"], data.get("ref"), + select=data.get("select"), + ignore=data.get("ignore"), + exclude=data.get("exclude"), ) return repositories @@ -262,7 +278,7 @@ async def main( if projects_jsonl: repositories = read_projects_jsonl(projects_jsonl) else: - repositories = REPOSITORIES + repositories = {(repo.org, repo.repo): repo for repo in REPOSITORIES} logger.debug(f"Checking {len(repositories)} projects") @@ -292,11 +308,11 @@ async def main( print(f"\u2139\ufe0f ecosystem check **detected changes**. {changes}") print() - for name, diff in diffs.items(): + for (org, repo), diff in diffs.items(): if isinstance(diff, Exception): changes = "error" - print(f"
{name} ({changes})") - repo = repositories[name] + print(f"
{repo} ({changes})") + repo = repositories[(org, repo)] print( f"https://github.com/{repo.org}/{repo.repo} ref {repo.ref} " f"select {repo.select} ignore {repo.ignore} exclude {repo.exclude}", @@ -313,7 +329,7 @@ async def main( print("
") elif diff: changes = f"+{len(diff.added)}, -{len(diff.removed)}" - print(f"
{name} ({changes})") + print(f"
{repo} ({changes})") print("

") print() @@ -414,7 +430,8 @@ async def main( else: logging.basicConfig(level=logging.INFO) - asyncio.run( + loop = asyncio.get_event_loop() + main_task = asyncio.ensure_future( main( ruff1=args.ruff1, ruff2=args.ruff2, @@ -422,3 +439,10 @@ async def main( checkouts=args.checkouts, ), ) + # https://stackoverflow.com/a/58840987/3549270 + for signal in [SIGINT, SIGTERM]: + loop.add_signal_handler(signal, main_task.cancel) + try: + loop.run_until_complete(main_task) + finally: + loop.close() diff --git a/scripts/ecosystem_all_check.py b/scripts/ecosystem_all_check.py new file mode 100644 index 0000000000000..9f509c57ac9c0 --- /dev/null +++ b/scripts/ecosystem_all_check.py @@ -0,0 +1,78 @@ +"""This is @konstin's scripts for checking an entire checkout of ~2.1k packages for +panics, autofix errors and similar problems. + +It's a less elaborate, more hacky version of check_ecosystem.py +""" + +import json +import subprocess +import sys +from pathlib import Path +from subprocess import CalledProcessError +from typing import NamedTuple, Optional + +from tqdm import tqdm + + +class Repository(NamedTuple): + """A GitHub repository at a specific ref.""" + + org: str + repo: str + ref: Optional[str] + + +def main() -> None: + ruff_args = sys.argv[1:] + checkouts = Path("checkouts") + out_dir = Path("ecosystem_all_results") + github_search_json = Path("github_search.jsonl") + # Somehow it doesn't like plain ruff + ruff = Path.cwd().joinpath("ruff") + + out_dir.mkdir(parents=True, exist_ok=True) + + repositories = [] + for line in github_search_json.read_text().splitlines(): + item = json.loads(line) + # Pick only the easier case for now. + if item["path"] != "pyproject.toml": + continue + repositories.append( + Repository( + item["owner"], + item["repo"], + item.get("ref"), + ), + ) + + successes = 0 + errors = 0 + for repository in tqdm(repositories): + project_dir = checkouts.joinpath(repository.org).joinpath(repository.repo) + if not project_dir.is_dir(): + tqdm.write(f"Missing {project_dir}") + errors += 1 + continue + + try: + output = subprocess.run( + [ruff, *ruff_args, "."], + cwd=project_dir, + capture_output=True, + text=True, + ) + except CalledProcessError as e: + tqdm.write(f"Ruff failed on {project_dir}: {e}") + errors += 1 + continue + + org_repo = f"{repository.org}:{repository.repo}" + out_dir.joinpath(f"{org_repo}.stdout.txt").write_text(output.stdout) + out_dir.joinpath(f"{org_repo}.stderr.txt").write_text(output.stderr) + successes += 1 + print(f"Success: {successes} Error {errors}") + + +if __name__ == "__main__": + main() diff --git a/scripts/ecosystem_all_check.sh b/scripts/ecosystem_all_check.sh new file mode 100755 index 0000000000000..315c52dc30c86 --- /dev/null +++ b/scripts/ecosystem_all_check.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# This is @konstin's setup for checking an entire checkout of ~3k packages for +# panics, autofix errors and similar problems. +# +# We put this in a docker container because processing random scraped code from GitHub is +# [kinda dangerous](https://moyix.blogspot.com/2022/09/someones-been-messing-with-my-subnormals.html) +# +# Usage: +# ``` +# scripts/ecosystem_all_check.sh check --select RUF200 +# ``` + +# https://stackoverflow.com/a/246128/3549270 +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +time docker run --rm -it \ + -w /app \ + -v "${SCRIPT_DIR}/../target/checkouts:/app/checkouts" \ + -v "${SCRIPT_DIR}/../target/ecosystem_all_results:/app/ecosystem_all_results" \ + -v "${SCRIPT_DIR}/../target/x86_64-unknown-linux-musl/release/ruff:/app/ruff" \ + -v "${SCRIPT_DIR}/../ecosystem_all.py:/app/ecosystem_all.py" \ + -v "${SCRIPT_DIR}/../github_search.jsonl:/app/github_search.jsonl" \ + -v "${SCRIPT_DIR}/../.venv-3.11:/app/.venv" \ + -v "${SCRIPT_DIR}/ecosystem_all_check_entrypoint.sh:/app/ecosystem_all_check_entrypoint.sh" \ + -v "${SCRIPT_DIR}/ecosystem_all_check.py:/app/ecosystem_all_check.py" \ + python:3.11 ./ecosystem_all_check_entrypoint.sh "$@" + +# grep the autofix errors +grep -R "the rule codes" "${SCRIPT_DIR}/../target/ecosystem_all_results" | sort > "${SCRIPT_DIR}/../target/autofix-errors.txt" +# Make sure we didn't have an early exit +echo "Done" diff --git a/scripts/ecosystem_all_check_entrypoint.sh b/scripts/ecosystem_all_check_entrypoint.sh new file mode 100755 index 0000000000000..edbd8dd36ce47 --- /dev/null +++ b/scripts/ecosystem_all_check_entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# Wrapper for ecosystem_all_check.py + +if [ ! -d ".venv/bin" ]; then + python -m venv .venv + .venv/bin/pip install tqdm +fi + +.venv/bin/python ecosystem_all_check.py "$@" diff --git a/scripts/generate_known_standard_library.py b/scripts/generate_known_standard_library.py index 1489939ab94e1..7fd7ae7222aed 100644 --- a/scripts/generate_known_standard_library.py +++ b/scripts/generate_known_standard_library.py @@ -21,13 +21,13 @@ ] -class FakeConfig: # noqa: D101 +class FakeConfig: intersphinx_timeout = None tls_verify = True user_agent = "" -class FakeApp: # noqa: D101 +class FakeApp: srcdir = "" config = FakeConfig() diff --git a/scripts/pyproject.toml b/scripts/pyproject.toml index c7665f5d74a50..c34ba92a24a2f 100644 --- a/scripts/pyproject.toml +++ b/scripts/pyproject.toml @@ -12,6 +12,7 @@ line-length = 88 select = ["ALL"] ignore = [ "C901", # McCabe complexity + "D", # pydocstyle "PL", # pylint "S", # bandit "G", # flake8-logging diff --git a/scripts/update_ambiguous_characters.py b/scripts/update_ambiguous_characters.py index ff39ee8a10174..8413c5bcfb082 100644 --- a/scripts/update_ambiguous_characters.py +++ b/scripts/update_ambiguous_characters.py @@ -47,7 +47,7 @@ def format_confusables_rs(raw_data: dict) -> str: return prelude + "\n".join(tuples) + postlude -def main() -> None: # noqa: D103 +def main() -> None: print("Retrieving data...") mapping_data = get_mapping_data() formatted_data = format_confusables_rs(mapping_data) diff --git a/scripts/update_schemastore.py b/scripts/update_schemastore.py new file mode 100644 index 0000000000000..ace0f715c6519 --- /dev/null +++ b/scripts/update_schemastore.py @@ -0,0 +1,97 @@ +"""Update ruff.json in schemastore. + +This script will clone astral-sh/schemastore, update the schema and push the changes +to a new branch tagged with the ruff git hash. You should see a URL to create the PR +to schemastore in the CLI. +""" + +import json +from pathlib import Path +from subprocess import check_call, check_output +from tempfile import TemporaryDirectory + +schemastore_fork = "https://github.com/astral-sh/schemastore" +schemastore_upstream = "https://github.com/SchemaStore/schemastore" +ruff_repo = "https://github.com/charliermarsh/ruff" +root = Path( + check_output(["git", "rev-parse", "--show-toplevel"], text=True).strip(), +) +ruff_json = Path("src/schemas/json/ruff.json") + + +def update_schemastore(schemastore: Path) -> None: + if not schemastore.is_dir(): + check_call(["git", "clone", schemastore_fork, schemastore]) + check_call( + [ + "git", + "remote", + "add", + "upstream", + schemastore_upstream, + ], + cwd=schemastore, + ) + # Create a new branch tagged with the current ruff commit up to date with the latest + # upstream schemastore + check_call(["git", "fetch", "upstream"], cwd=schemastore) + current_sha = check_output(["git", "rev-parse", "HEAD"], text=True).strip() + branch = f"update-ruff-{current_sha}" + check_call( + ["git", "switch", "-c", branch], + cwd=schemastore, + ) + check_call( + ["git", "reset", "--hard", "upstream/master"], + cwd=schemastore, + ) + + # Update the schema and format appropriately + schema = json.loads(root.joinpath("ruff.schema.json").read_text()) + schema["$id"] = "https://json.schemastore.org/ruff.json" + schemastore.joinpath(ruff_json).write_text( + json.dumps(dict(sorted(schema.items())), indent=2, ensure_ascii=False), + ) + check_call(["prettier", "--write", ruff_json], cwd=schemastore) + + # Check if the schema has changed + # https://stackoverflow.com/a/9393642/3549270 + if check_output(["git", "status", "-s"], cwd=schemastore).strip(): + # Schema has changed, commit and push + commit_url = f"{ruff_repo}/commit/{current_sha}" + commit_body = ( + f"This updates ruff's JSON schema to [{current_sha}]({commit_url})" + ) + # https://stackoverflow.com/a/22909204/3549270 + check_call( + [ + "git", + "commit", + "-a", + "-m", + "Update ruff's JSON schema", + "-m", + commit_body, + ], + cwd=schemastore, + ) + # This should show the link to create a PR + check_call( + ["git", "push", "--set-upstream", "origin", branch], + cwd=schemastore, + ) + else: + print("No changes") + + +def main() -> None: + schemastore_existing = root.joinpath("schemastore") + if schemastore_existing.is_dir(): + update_schemastore(schemastore_existing) + else: + with TemporaryDirectory() as temp_dir: + update_schemastore(Path(temp_dir).joinpath("schemastore")) + + +if __name__ == "__main__": + main()