Skip to content

Commit bc6ab05

Browse files
authored
CLN: Some code cleanups (#31792)
1 parent c2f3ce3 commit bc6ab05

File tree

5 files changed

+17
-32
lines changed

5 files changed

+17
-32
lines changed

pandas/core/internals/blocks.py

+5-14
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,6 @@
8585
import pandas.core.missing as missing
8686
from pandas.core.nanops import nanpercentile
8787

88-
from pandas.io.formats.printing import pprint_thing
89-
9088

9189
class Block(PandasObject):
9290
"""
@@ -159,7 +157,8 @@ def _check_ndim(self, values, ndim):
159157

160158
@property
161159
def _holder(self):
162-
"""The array-like that can hold the underlying values.
160+
"""
161+
The array-like that can hold the underlying values.
163162
164163
None for 'Block', overridden by subclasses that don't
165164
use an ndarray.
@@ -284,16 +283,11 @@ def __repr__(self) -> str:
284283
# don't want to print out all of the items here
285284
name = type(self).__name__
286285
if self._is_single_block:
287-
288286
result = f"{name}: {len(self)} dtype: {self.dtype}"
289-
290287
else:
291288

292-
shape = " x ".join(pprint_thing(s) for s in self.shape)
293-
result = (
294-
f"{name}: {pprint_thing(self.mgr_locs.indexer)}, "
295-
f"{shape}, dtype: {self.dtype}"
296-
)
289+
shape = " x ".join(str(s) for s in self.shape)
290+
result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}"
297291

298292
return result
299293

@@ -319,10 +313,7 @@ def getitem_block(self, slicer, new_mgr_locs=None):
319313
As of now, only supports slices that preserve dimensionality.
320314
"""
321315
if new_mgr_locs is None:
322-
if isinstance(slicer, tuple):
323-
axis0_slicer = slicer[0]
324-
else:
325-
axis0_slicer = slicer
316+
axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer
326317
new_mgr_locs = self.mgr_locs[axis0_slicer]
327318

328319
new_values = self._slice(slicer)

pandas/core/internals/concat.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -204,10 +204,9 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
204204
missing_arr.fill(fill_value)
205205
return missing_arr
206206

207-
if not self.indexers:
208-
if not self.block._can_consolidate:
209-
# preserve these for validation in concat_compat
210-
return self.block.values
207+
if (not self.indexers) and (not self.block._can_consolidate):
208+
# preserve these for validation in concat_compat
209+
return self.block.values
211210

212211
if self.block.is_bool and not self.block.is_categorical:
213212
# External code requested filling/upcasting, bool values must
@@ -372,7 +371,7 @@ def _get_empty_dtype_and_na(join_units):
372371
raise AssertionError(msg)
373372

374373

375-
def is_uniform_join_units(join_units):
374+
def is_uniform_join_units(join_units) -> bool:
376375
"""
377376
Check if the join units consist of blocks of uniform type that can
378377
be concatenated using Block.concat_same_type instead of the generic

pandas/core/internals/managers.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,7 @@ def comp(s, regex=False):
589589
)
590590
return _compare_or_regex_search(values, s, regex)
591591

592-
masks = [comp(s, regex) for i, s in enumerate(src_list)]
592+
masks = [comp(s, regex) for s in src_list]
593593

594594
result_blocks = []
595595
src_len = len(src_list) - 1
@@ -755,10 +755,7 @@ def copy(self, deep=True):
755755
# hit in e.g. tests.io.json.test_pandas
756756

757757
def copy_func(ax):
758-
if deep == "all":
759-
return ax.copy(deep=True)
760-
else:
761-
return ax.view()
758+
return ax.copy(deep=True) if deep == "all" else ax.view()
762759

763760
new_axes = [copy_func(ax) for ax in self.axes]
764761
else:

pandas/io/parsers.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -1493,11 +1493,10 @@ def extract(r):
14931493
# level, then our header was too long.
14941494
for n in range(len(columns[0])):
14951495
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
1496+
header = ",".join(str(x) for x in self.header)
14961497
raise ParserError(
1497-
"Passed header=[{header}] are too many rows for this "
1498-
"multi_index of columns".format(
1499-
header=",".join(str(x) for x in self.header)
1500-
)
1498+
f"Passed header=[{header}] are too many rows "
1499+
"for this multi_index of columns"
15011500
)
15021501

15031502
# Clean the column names (if we have an index_col).

pandas/io/pytables.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -3095,9 +3095,8 @@ def write(self, obj, **kwargs):
30953095

30963096
self.attrs.ndim = data.ndim
30973097
for i, ax in enumerate(data.axes):
3098-
if i == 0:
3099-
if not ax.is_unique:
3100-
raise ValueError("Columns index has to be unique for fixed format")
3098+
if i == 0 and (not ax.is_unique):
3099+
raise ValueError("Columns index has to be unique for fixed format")
31013100
self.write_index(f"axis{i}", ax)
31023101

31033102
# Supporting mixed-type DataFrame objects...nontrivial
@@ -4230,7 +4229,7 @@ def write_data(self, chunksize: Optional[int], dropna: bool = False):
42304229
chunksize = 100000
42314230

42324231
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
4233-
chunks = int(nrows / chunksize) + 1
4232+
chunks = nrows // chunksize + 1
42344233
for i in range(chunks):
42354234
start_i = i * chunksize
42364235
end_i = min((i + 1) * chunksize, nrows)

0 commit comments

Comments
 (0)