Skip to content

Commit 071a242

Browse files
committed
Apply ruff
1 parent bc31e19 commit 071a242

15 files changed

+86
-101
lines changed

pandas/core/algorithms.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -1689,8 +1689,10 @@ def map_array(
16891689
indexer = mapper.index.get_indexer(arr)
16901690

16911691
if na_action == "raise" and (indexer == -1).any():
1692-
raise ValueError("Provided mapping is not sufficient to cover"
1693-
"all values in the input array!")
1692+
raise ValueError(
1693+
"Provided mapping is not sufficient to cover"
1694+
"all values in the input array!"
1695+
)
16941696

16951697
new_values = take_nd(mapper._values, indexer)
16961698

pandas/tests/frame/test_query_eval.py

+5-15
Original file line numberDiff line numberDiff line change
@@ -160,21 +160,13 @@ def test_query_empty_string(self):
160160
df.query("")
161161

162162
def test_query_duplicate_column_name(self, engine, parser):
163-
df = DataFrame(
164-
{
165-
"A": range(3),
166-
"B": range(3),
167-
"C": range(3)
168-
}
169-
).rename(columns={"B": "A"})
163+
df = DataFrame({"A": range(3), "B": range(3), "C": range(3)}).rename(
164+
columns={"B": "A"}
165+
)
170166

171167
res = df.query("C == 1", engine=engine, parser=parser)
172168

173-
expect = DataFrame(
174-
[[1, 1, 1]],
175-
columns=["A", "A", "C"],
176-
index=[1]
177-
)
169+
expect = DataFrame([[1, 1, 1]], columns=["A", "A", "C"], index=[1])
178170

179171
tm.assert_frame_equal(res, expect)
180172

@@ -1140,9 +1132,7 @@ def test_query_with_nested_special_character(self, parser, engine):
11401132
[">=", operator.ge],
11411133
],
11421134
)
1143-
def test_query_lex_compare_strings(
1144-
self, parser, engine, op, func
1145-
):
1135+
def test_query_lex_compare_strings(self, parser, engine, op, func):
11461136
a = Series(np.random.default_rng(2).choice(list("abcde"), 20))
11471137
b = Series(np.arange(a.size))
11481138
df = DataFrame({"X": a, "Y": b})

scripts/check_for_inconsistent_pandas_namespace.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@
3030
from typing import NamedTuple
3131

3232
ERROR_MESSAGE = (
33-
"{path}:{lineno}:{col_offset}: "
34-
"Found both '{prefix}.{name}' and '{name}' in {path}"
33+
"{path}:{lineno}:{col_offset}: Found both '{prefix}.{name}' and '{name}' in {path}"
3534
)
3635

3736

scripts/check_test_naming.py

+1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
NOTE: if this finds a false positive, you can add the comment `# not a test` to the
99
class or function definition. Though hopefully that shouldn't be necessary.
1010
"""
11+
1112
from __future__ import annotations
1213

1314
import argparse

scripts/generate_pip_deps_from_conda.py

+1
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
generated with this script:
1313
$ python scripts/generate_pip_deps_from_conda.py --compare
1414
"""
15+
1516
import argparse
1617
import pathlib
1718
import re

scripts/pandas_errors_documented.py

+1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
77
pre-commit run pandas-errors-documented --all-files
88
"""
9+
910
from __future__ import annotations
1011

1112
import argparse

scripts/sort_whatsnew_note.py

+1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
2424
pre-commit run sort-whatsnew-items --all-files
2525
"""
26+
2627
from __future__ import annotations
2728

2829
import argparse

scripts/tests/test_check_test_naming.py

+1-4
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,7 @@
2424
0,
2525
),
2626
(
27-
"class Foo: # not a test\n"
28-
" pass\n"
29-
"def test_foo():\n"
30-
" Class.foo()\n",
27+
"class Foo: # not a test\n pass\ndef test_foo():\n Class.foo()\n",
3128
"",
3229
0,
3330
),

scripts/tests/test_inconsistent_namespace_check.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,10 @@
55
)
66

77
BAD_FILE_0 = (
8-
"from pandas import Categorical\n"
9-
"cat_0 = Categorical()\n"
10-
"cat_1 = pd.Categorical()"
8+
"from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = pd.Categorical()"
119
)
1210
BAD_FILE_1 = (
13-
"from pandas import Categorical\n"
14-
"cat_0 = pd.Categorical()\n"
15-
"cat_1 = Categorical()"
11+
"from pandas import Categorical\ncat_0 = pd.Categorical()\ncat_1 = Categorical()"
1612
)
1713
BAD_FILE_2 = (
1814
"from pandas import Categorical\n"

scripts/tests/test_validate_docstrings.py

+9-11
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,7 @@ def redundant_import(self, paramx=None, paramy=None) -> None:
3434
--------
3535
>>> import numpy as np
3636
>>> import pandas as pd
37-
>>> df = pd.DataFrame(np.ones((3, 3)),
38-
... columns=('a', 'b', 'c'))
37+
>>> df = pd.DataFrame(np.ones((3, 3)), columns=("a", "b", "c"))
3938
>>> df.all(axis=1)
4039
0 True
4140
1 True
@@ -50,14 +49,14 @@ def unused_import(self) -> None:
5049
Examples
5150
--------
5251
>>> import pandas as pdf
53-
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
52+
>>> df = pd.DataFrame(np.ones((3, 3)), columns=("a", "b", "c"))
5453
"""
5554

5655
def missing_whitespace_around_arithmetic_operator(self) -> None:
5756
"""
5857
Examples
5958
--------
60-
>>> 2+5
59+
>>> 2 + 5
6160
7
6261
"""
6362

@@ -66,14 +65,14 @@ def indentation_is_not_a_multiple_of_four(self) -> None:
6665
Examples
6766
--------
6867
>>> if 2 + 5:
69-
... pass
68+
... pass
7069
"""
7170

7271
def missing_whitespace_after_comma(self) -> None:
7372
"""
7473
Examples
7574
--------
76-
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
75+
>>> df = pd.DataFrame(np.ones((3, 3)), columns=("a", "b", "c"))
7776
"""
7877

7978
def write_array_like_with_hyphen_not_underscore(self) -> None:
@@ -227,13 +226,13 @@ def test_validate_all_ignore_errors(self, monkeypatch):
227226
"errors": [
228227
("ER01", "err desc"),
229228
("ER02", "err desc"),
230-
("ER03", "err desc")
229+
("ER03", "err desc"),
231230
],
232231
"warnings": [],
233232
"examples_errors": "",
234233
"deprecated": True,
235234
"file": "file1",
236-
"file_line": "file_line1"
235+
"file_line": "file_line1",
237236
},
238237
)
239238
monkeypatch.setattr(
@@ -272,14 +271,13 @@ def test_validate_all_ignore_errors(self, monkeypatch):
272271
None: {"ER03"},
273272
"pandas.DataFrame.align": {"ER01"},
274273
# ignoring an error that is not requested should be of no effect
275-
"pandas.Index.all": {"ER03"}
276-
}
274+
"pandas.Index.all": {"ER03"},
275+
},
277276
)
278277
# two functions * two not global ignored errors - one function ignored error
279278
assert exit_status == 2 * 2 - 1
280279

281280

282-
283281
class TestApiItems:
284282
@property
285283
def api_doc(self):

scripts/validate_docstrings.py

+36-33
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
$ ./validate_docstrings.py
1414
$ ./validate_docstrings.py pandas.DataFrame.head
1515
"""
16+
1617
from __future__ import annotations
1718

1819
import argparse
@@ -69,8 +70,10 @@
6970
}
7071
ALL_ERRORS = set(NUMPYDOC_ERROR_MSGS).union(set(ERROR_MSGS))
7172
duplicated_errors = set(NUMPYDOC_ERROR_MSGS).intersection(set(ERROR_MSGS))
72-
assert not duplicated_errors, (f"Errors {duplicated_errors} exist in both pandas "
73-
"and numpydoc, should they be removed from pandas?")
73+
assert not duplicated_errors, (
74+
f"Errors {duplicated_errors} exist in both pandas "
75+
"and numpydoc, should they be removed from pandas?"
76+
)
7477

7578

7679
def pandas_error(code, **kwargs):
@@ -257,7 +260,7 @@ def pandas_validate(func_name: str):
257260
pandas_error(
258261
"SA05",
259262
reference_name=rel_name,
260-
right_reference=rel_name[len("pandas."):],
263+
right_reference=rel_name[len("pandas.") :],
261264
)
262265
for rel_name in doc.see_also
263266
if rel_name.startswith("pandas.")
@@ -365,17 +368,18 @@ def print_validate_all_results(
365368
for func_name, res in result.items():
366369
error_messages = dict(res["errors"])
367370
actual_failures = set(error_messages)
368-
expected_failures = (ignore_errors.get(func_name, set())
369-
| ignore_errors.get(None, set()))
371+
expected_failures = ignore_errors.get(func_name, set()) | ignore_errors.get(
372+
None, set()
373+
)
370374
for err_code in actual_failures - expected_failures:
371375
sys.stdout.write(
372-
f'{prefix}{res["file"]}:{res["file_line"]}:'
373-
f'{err_code}:{func_name}:{error_messages[err_code]}\n'
376+
f"{prefix}{res['file']}:{res['file_line']}:"
377+
f"{err_code}:{func_name}:{error_messages[err_code]}\n"
374378
)
375379
exit_status += 1
376380
for err_code in ignore_errors.get(func_name, set()) - actual_failures:
377381
sys.stdout.write(
378-
f'{prefix}{res["file"]}:{res["file_line"]}:'
382+
f"{prefix}{res['file']}:{res['file_line']}:"
379383
f"{err_code}:{func_name}:"
380384
"EXPECTED TO FAIL, BUT NOT FAILING\n"
381385
)
@@ -384,8 +388,9 @@ def print_validate_all_results(
384388
return exit_status
385389

386390

387-
def print_validate_one_results(func_name: str,
388-
ignore_errors: dict[str, set[str]]) -> int:
391+
def print_validate_one_results(
392+
func_name: str, ignore_errors: dict[str, set[str]]
393+
) -> int:
389394
def header(title, width=80, char="#") -> str:
390395
full_line = char * width
391396
side_len = (width - len(title) - 2) // 2
@@ -396,15 +401,18 @@ def header(title, width=80, char="#") -> str:
396401

397402
result = pandas_validate(func_name)
398403

399-
result["errors"] = [(code, message) for code, message in result["errors"]
400-
if code not in ignore_errors.get(None, set())]
404+
result["errors"] = [
405+
(code, message)
406+
for code, message in result["errors"]
407+
if code not in ignore_errors.get(None, set())
408+
]
401409

402410
sys.stderr.write(header(f"Docstring ({func_name})"))
403411
sys.stderr.write(f"{result['docstring']}\n")
404412

405413
sys.stderr.write(header("Validation"))
406414
if result["errors"]:
407-
sys.stderr.write(f'{len(result["errors"])} Errors found for `{func_name}`:\n')
415+
sys.stderr.write(f"{len(result['errors'])} Errors found for `{func_name}`:\n")
408416
for err_code, err_desc in result["errors"]:
409417
sys.stderr.write(f"\t{err_code}\t{err_desc}\n")
410418
else:
@@ -431,14 +439,16 @@ def _format_ignore_errors(raw_ignore_errors):
431439
raise ValueError(
432440
f"Object `{obj_name}` is present in more than one "
433441
"--ignore_errors argument. Please use it once and specify "
434-
"the errors separated by commas.")
442+
"the errors separated by commas."
443+
)
435444
ignore_errors[obj_name] = set(error_codes.split(","))
436445

437446
unknown_errors = ignore_errors[obj_name] - ALL_ERRORS
438447
if unknown_errors:
439448
raise ValueError(
440449
f"Object `{obj_name}` is ignoring errors {unknown_errors} "
441-
f"which are not known. Known errors are: {ALL_ERRORS}")
450+
f"which are not known. Known errors are: {ALL_ERRORS}"
451+
)
442452

443453
# global errors "PR02,ES01"
444454
else:
@@ -448,27 +458,19 @@ def _format_ignore_errors(raw_ignore_errors):
448458
if unknown_errors:
449459
raise ValueError(
450460
f"Unknown errors {unknown_errors} specified using --ignore_errors "
451-
"Known errors are: {ALL_ERRORS}")
461+
"Known errors are: {ALL_ERRORS}"
462+
)
452463

453464
return ignore_errors
454465

455466

456-
def main(
457-
func_name,
458-
output_format,
459-
prefix,
460-
ignore_deprecated,
461-
ignore_errors
462-
):
467+
def main(func_name, output_format, prefix, ignore_deprecated, ignore_errors):
463468
"""
464469
Main entry point. Call the validation for one or for all docstrings.
465470
"""
466471
if func_name is None:
467472
return print_validate_all_results(
468-
output_format,
469-
prefix,
470-
ignore_deprecated,
471-
ignore_errors
473+
output_format, prefix, ignore_deprecated, ignore_errors
472474
)
473475
else:
474476
return print_validate_one_results(func_name, ignore_errors)
@@ -524,10 +526,11 @@ def main(
524526
args = argparser.parse_args(sys.argv[1:])
525527

526528
sys.exit(
527-
main(args.function,
528-
args.format,
529-
args.prefix,
530-
args.ignore_deprecated,
531-
_format_ignore_errors(args.ignore_errors),
532-
)
529+
main(
530+
args.function,
531+
args.format,
532+
args.prefix,
533+
args.ignore_deprecated,
534+
_format_ignore_errors(args.ignore_errors),
535+
)
533536
)

scripts/validate_exception_location.py

+1
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
As a pre-commit hook:
1919
pre-commit run validate-errors-locations --all-files
2020
"""
21+
2122
from __future__ import annotations
2223

2324
import argparse

scripts/validate_min_versions_in_sync.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
1313
pre-commit run validate-min-versions-in-sync --all-files
1414
"""
15+
1516
from __future__ import annotations
1617

1718
import pathlib
@@ -105,7 +106,7 @@ def get_operator_from(dependency: str) -> str | None:
105106

106107

107108
def get_yaml_map_from(
108-
yaml_dic: list[str | dict[str, list[str]]]
109+
yaml_dic: list[str | dict[str, list[str]]],
109110
) -> dict[str, list[str] | None]:
110111
yaml_map: dict[str, list[str] | None] = {}
111112
for dependency in yaml_dic:

scripts/validate_rst_title_capitalization.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
From the command-line:
1212
python scripts/validate_rst_title_capitalization.py <rst file>
1313
"""
14+
1415
from __future__ import annotations
1516

1617
import argparse
@@ -266,7 +267,8 @@ def main(source_paths: list[str]) -> int:
266267
if title != correct_title_capitalization(title):
267268
print(
268269
f"""{filename}:{line_number}:{err_msg} "{title}" to "{
269-
correct_title_capitalization(title)}" """
270+
correct_title_capitalization(title)
271+
}" """
270272
)
271273
number_of_errors += 1
272274

0 commit comments

Comments
 (0)