diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index d112b44c51c07..68c09f83e1cdf 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -947,7 +947,7 @@ def rank_1d( N = len(values) if labels is not None: - # TODO Cython 3.0: cast won't be necessary (#2992) + # TODO(cython3): cast won't be necessary (#2992) assert len(labels) == N out = np.empty(N) grp_sizes = np.ones(N, dtype=np.int64) @@ -1086,7 +1086,7 @@ cdef void rank_sorted_1d( # array that we sorted previously, which gives us the location of # that sorted value for retrieval back from the original # values / masked_vals arrays - # TODO: de-duplicate once cython supports conditional nogil + # TODO(cython3): de-duplicate once cython supports conditional nogil if iu_64_floating_obj_t is object: with gil: for i in range(N): @@ -1413,7 +1413,7 @@ ctypedef fused out_t: @cython.boundscheck(False) @cython.wraparound(False) def diff_2d( - ndarray[diff_t, ndim=2] arr, # TODO(cython 3) update to "const diff_t[:, :] arr" + ndarray[diff_t, ndim=2] arr, # TODO(cython3) update to "const diff_t[:, :] arr" ndarray[out_t, ndim=2] out, Py_ssize_t periods, int axis, @@ -1422,7 +1422,7 @@ def diff_2d( cdef: Py_ssize_t i, j, sx, sy, start, stop bint f_contig = arr.flags.f_contiguous - # bint f_contig = arr.is_f_contig() # TODO(cython 3) + # bint f_contig = arr.is_f_contig() # TODO(cython3) diff_t left, right # Disable for unsupported dtype combinations, diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index c229c67519a66..078cb8e02e824 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -963,7 +963,7 @@ def group_last(iu_64_floating_obj_t[:, ::1] out, ndarray[int64_t, ndim=2] nobs bint runtime_error = False - # TODO(cython 3.0): + # TODO(cython3): # Instead of `labels.shape[0]` use `len(labels)` if not len(values) == labels.shape[0]: raise AssertionError("len(index) != len(labels)") @@ -978,7 +978,7 @@ def group_last(iu_64_floating_obj_t[:, ::1] out, N, K = (values).shape if iu_64_floating_obj_t is object: - # TODO: De-duplicate once conditional-nogil is available + # TODO(cython3): De-duplicate once conditional-nogil is available for i in range(N): lab = labels[i] if lab < 0: @@ -1057,7 +1057,7 @@ def group_nth(iu_64_floating_obj_t[:, ::1] out, ndarray[int64_t, ndim=2] nobs bint runtime_error = False - # TODO(cython 3.0): + # TODO(cython3): # Instead of `labels.shape[0]` use `len(labels)` if not len(values) == labels.shape[0]: raise AssertionError("len(index) != len(labels)") @@ -1072,7 +1072,7 @@ def group_nth(iu_64_floating_obj_t[:, ::1] out, N, K = (values).shape if iu_64_floating_obj_t is object: - # TODO: De-duplicate once conditional-nogil is available + # TODO(cython3): De-duplicate once conditional-nogil is available for i in range(N): lab = labels[i] if lab < 0: @@ -1255,7 +1255,7 @@ cdef group_min_max(iu_64_floating_t[:, ::1] out, bint uses_mask = mask is not None bint isna_entry - # TODO(cython 3.0): + # TODO(cython3): # Instead of `labels.shape[0]` use `len(labels)` if not len(values) == labels.shape[0]: raise AssertionError("len(index) != len(labels)") diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 23094bdb90483..2aebf75ba35d4 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -166,7 +166,6 @@ cdef class _NaT(datetime): elif util.is_integer_object(other): # For Period compat - # TODO: the integer behavior is deprecated, remove it return c_NaT elif util.is_array(other): @@ -201,7 +200,6 @@ cdef class _NaT(datetime): elif util.is_integer_object(other): # For Period compat - # TODO: the integer behavior is deprecated, remove it return c_NaT elif util.is_array(other): diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 6cf1fdbfa4585..b8f957a4c2ea8 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -671,7 +671,7 @@ def try_parse_date_and_time( object[:] result n = len(dates) - # TODO(cython 3.0): Use len instead of `shape[0]` + # TODO(cython3): Use len instead of `shape[0]` if times.shape[0] != n: raise ValueError('Length of dates and times must be equal') result = np.empty(n, dtype='O') @@ -709,7 +709,7 @@ def try_parse_year_month_day( object[:] result n = len(years) - # TODO(cython 3.0): Use len instead of `shape[0]` + # TODO(cython3): Use len instead of `shape[0]` if months.shape[0] != n or days.shape[0] != n: raise ValueError('Length of years/months/days must all be equal') result = np.empty(n, dtype='O') @@ -735,7 +735,7 @@ def try_parse_datetime_components(object[:] years, double micros n = len(years) - # TODO(cython 3.0): Use len instead of `shape[0]` + # TODO(cython3): Use len instead of `shape[0]` if ( months.shape[0] != n or days.shape[0] != n diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 39aa5da95cc29..274c78c30aec4 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -136,7 +136,7 @@ def __init__( self.window = window self.min_periods = min_periods self.center = center - # TODO: Change this back to self.win_type once deprecation is enforced + # TODO(2.0): Change this back to self.win_type once deprecation is enforced self._win_type = win_type self.axis = obj._get_axis_number(axis) if axis is not None else None self.method = method @@ -262,7 +262,7 @@ def _gotitem(self, key, ndim, subset=None): # we need to make a shallow copy of ourselves # with the same groupby with warnings.catch_warnings(): - # TODO: Remove once win_type deprecation is enforced + # TODO(2.0): Remove once win_type deprecation is enforced warnings.filterwarnings("ignore", "win_type", FutureWarning) kwargs = {attr: getattr(self, attr) for attr in self._attributes} diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index afbe37b2ef44c..49c2b28207ed5 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -606,7 +606,7 @@ def read_csv( # Error Handling error_bad_lines=None, warn_bad_lines=None, - # TODO (2.0): set on_bad_lines to "error". + # TODO(2.0): set on_bad_lines to "error". # See _refine_defaults_read comment for why we do this. on_bad_lines=None, # Internal @@ -704,7 +704,7 @@ def read_table( # Error Handling error_bad_lines=None, warn_bad_lines=None, - # TODO (2.0): set on_bad_lines to "error". + # TODO(2.0): set on_bad_lines to "error". # See _refine_defaults_read comment for why we do this. on_bad_lines=None, # Internal diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ef34aa1e34596..34bcc6687e902 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -338,8 +338,8 @@ def convert_delta_safe(base, deltas, unit) -> Series: deltas = to_timedelta(deltas, unit=unit) return base + deltas - # TODO: If/when pandas supports more than datetime64[ns], this should be - # improved to use correct range, e.g. datetime[Y] for yearly + # TODO(non-nano): If/when pandas supports more than datetime64[ns], this + # should be improved to use correct range, e.g. datetime[Y] for yearly bad_locs = np.isnan(dates) has_bad_values = False if bad_locs.any(): diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 3a3103b095e45..07ae7511bb333 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1203,7 +1203,7 @@ def test_from_coo(self): row = [0, 3, 1, 0] col = [0, 3, 1, 2] data = [4, 5, 7, 9] - # TODO: Remove dtype when scipy is fixed + # TODO(scipy#13585): Remove dtype when scipy is fixed # https://github.com/scipy/scipy/issues/13585 sp_array = scipy.sparse.coo_matrix((data, (row, col)), dtype="int") result = pd.Series.sparse.from_coo(sp_array) diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 989a9be181a3f..30118d20f67a9 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -143,15 +143,14 @@ def test_join_index_more(float_frame): def test_join_index_series(float_frame): df = float_frame.copy() - s = df.pop(float_frame.columns[-1]) - joined = df.join(s) + ser = df.pop(float_frame.columns[-1]) + joined = df.join(ser) - # TODO should this check_names ? - tm.assert_frame_equal(joined, float_frame, check_names=False) + tm.assert_frame_equal(joined, float_frame) - s.name = None + ser.name = None with pytest.raises(ValueError, match="must have a name"): - df.join(s) + df.join(ser) def test_join_overlap(float_frame): @@ -241,8 +240,7 @@ def test_join(self, multiindex_dataframe_random_data): assert not np.isnan(joined.values).all() - # TODO what should join do with names ? - tm.assert_frame_equal(joined, expected, check_names=False) + tm.assert_frame_equal(joined, expected) def test_join_segfault(self): # GH#1532 diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 76d259707787d..43af48cf4a654 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -144,32 +144,31 @@ def test_reset_index(self, float_frame): df = float_frame.reset_index().set_index(["index", "A", "B"]) rs = df.reset_index(["A", "B"]) - # TODO should reset_index check_names ? - tm.assert_frame_equal(rs, float_frame, check_names=False) + tm.assert_frame_equal(rs, float_frame) rs = df.reset_index(["index", "A", "B"]) - tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) + tm.assert_frame_equal(rs, float_frame.reset_index()) rs = df.reset_index(["index", "A", "B"]) - tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) + tm.assert_frame_equal(rs, float_frame.reset_index()) rs = df.reset_index("A") xp = float_frame.reset_index().set_index(["index", "B"]) - tm.assert_frame_equal(rs, xp, check_names=False) + tm.assert_frame_equal(rs, xp) # test resetting in place df = float_frame.copy() reset = float_frame.reset_index() return_value = df.reset_index(inplace=True) assert return_value is None - tm.assert_frame_equal(df, reset, check_names=False) + tm.assert_frame_equal(df, reset) df = float_frame.reset_index().set_index(["index", "A", "B"]) rs = df.reset_index("A", drop=True) xp = float_frame.copy() del xp["A"] xp = xp.set_index(["B"], append=True) - tm.assert_frame_equal(rs, xp, check_names=False) + tm.assert_frame_equal(rs, xp) def test_reset_index_name(self): df = DataFrame( diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 02ff93bf67a4f..704af61ee2390 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2528,7 +2528,7 @@ def check_views(): else: # TODO: we can call check_views if we stop consolidating # in setitem_with_indexer - # FIXME: enable after GH#35417 + # FIXME(GH#35417): enable after GH#35417 # assert b[0] == 0 assert df.iloc[0, 2] == 0 diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py index 8be721c13eea8..59ccd93e44af7 100644 --- a/pandas/tests/groupby/test_allowlist.py +++ b/pandas/tests/groupby/test_allowlist.py @@ -8,8 +8,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - from pandas import ( DataFrame, Index, @@ -359,8 +357,7 @@ def test_groupby_function_rename(mframe): "cummax", "cummin", "cumprod", - # TODO(ArrayManager) quantile - pytest.param("describe", marks=td.skip_array_manager_not_yet_implemented), + "describe", "rank", "quantile", "diff", diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py index 0a001008c2f1b..f7cffe48d1722 100644 --- a/pandas/tests/indexes/test_indexing.py +++ b/pandas/tests/indexes/test_indexing.py @@ -172,7 +172,7 @@ class TestGetValue: "index", ["string", "int", "datetime", "timedelta"], indirect=True ) def test_get_value(self, index): - # TODO: Remove function? GH#19728 + # TODO(2.0): can remove once get_value deprecation is enforced GH#19728 values = np.random.randn(100) value = index[67] diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index ba8a9ed070236..97ebb3a0d39ba 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -89,9 +89,7 @@ def test_basic(self): ) df["periods"] = pd.period_range("2013", freq="M", periods=3) df["timedeltas"] = pd.timedelta_range("1 day", periods=3) - # TODO temporary disable due to regression in pyarrow 0.17.1 - # https://github.com/pandas-dev/pandas/issues/34255 - # df["intervals"] = pd.interval_range(0, 3, 3) + df["intervals"] = pd.interval_range(0, 3, 3) assert df.dttz.dtype.tz.zone == "US/Eastern" self.check_round_trip(df) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index eb3097618e158..386f11b3dd794 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2042,7 +2042,7 @@ def test_to_sql_with_negative_npinf(self, input, request): # GH 36465 # The input {"foo": [-np.inf], "infe0": ["bar"]} does not raise any error # for pymysql version >= 0.10 - # TODO: remove this version check after GH 36465 is fixed + # TODO(GH#36465): remove this version check after GH 36465 is fixed import pymysql if pymysql.VERSION[0:3] >= (0, 10, 0) and "infe0" in df.columns: