Skip to content

Fix exception causes all over the code #32322

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions doc/make.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def _process_single_doc(self, single_doc):
obj = pandas # noqa: F821
for name in single_doc.split("."):
obj = getattr(obj, name)
except AttributeError:
raise ImportError(f"Could not import {single_doc}")
except AttributeError as err:
raise ImportError(f"Could not import {single_doc}") from err
else:
return single_doc[len("pandas.") :]
else:
Expand Down
12 changes: 7 additions & 5 deletions pandas/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
f"C extension: {module} not built. If you want to import "
"pandas from the source directory, you may need to run "
"'python setup.py build_ext --inplace --force' to build the C extensions first."
)
) from e

from pandas._config import (
get_option,
Expand Down Expand Up @@ -290,8 +290,8 @@ def __getattr__(self, item):

try:
return getattr(self.np, item)
except AttributeError:
raise AttributeError(f"module numpy has no attribute {item}")
except AttributeError as err:
raise AttributeError(f"module numpy has no attribute {item}") from err

np = __numpy()

Expand All @@ -306,8 +306,10 @@ def __getattr__(cls, item):

try:
return getattr(cls.datetime, item)
except AttributeError:
raise AttributeError(f"module datetime has no attribute {item}")
except AttributeError as err:
raise AttributeError(
f"module datetime has no attribute {item}"
) from err

def __instancecheck__(cls, other):
return isinstance(other, cls.datetime)
Expand Down
4 changes: 2 additions & 2 deletions pandas/_config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,8 @@ def __getattr__(self, key: str):
prefix += key
try:
v = object.__getattribute__(self, "d")[key]
except KeyError:
raise OptionError("No such option")
except KeyError as err:
raise OptionError("No such option") from err
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,8 +686,8 @@ def value_counts(
values = Series(values)
try:
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
except TypeError as err:
raise TypeError("bins argument only works with numeric data.") from err

# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/_ranges.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,8 @@ def _generate_range_overflow_safe(
# we cannot salvage the operation by recursing, so raise
try:
addend = np.uint64(periods) * np.uint64(np.abs(stride))
except FloatingPointError:
raise OutOfBoundsDatetime(msg)
except FloatingPointError as err:
raise OutOfBoundsDatetime(msg) from err

if np.abs(addend) <= i64max:
# relatively easy case without casting concerns
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ def __init__(
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
Expand All @@ -359,13 +359,13 @@ def __init__(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
)
except ValueError:
) from err
except ValueError as err:

# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
)
) from err

# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2080,11 +2080,11 @@ def _infer_tz_from_endpoints(start, end, tz):
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except AssertionError:
except AssertionError as err:
# infer_tzinfo raises AssertionError if passed mismatched timezones
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
)
) from err

inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/integer.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,15 +154,15 @@ def safe_cast(values, dtype, copy: bool):
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError:
except TypeError as err:

casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted

raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
)
) from err


def coerce_to_array(
Expand Down Expand Up @@ -199,8 +199,8 @@ def coerce_to_array(
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError:
raise ValueError(f"invalid dtype specified {dtype}")
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err

if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
Expand Down
28 changes: 14 additions & 14 deletions pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -448,12 +448,12 @@ def from_tuples(cls, data, closed="right", copy=False, dtype=None):
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError:
except ValueError as err:
msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
raise ValueError(msg)
except TypeError:
raise ValueError(msg) from err
except TypeError as err:
msg = f"{name}.from_tuples received an invalid item, {d}"
raise TypeError(msg)
raise TypeError(msg) from err
left.append(lhs)
right.append(rhs)

Expand Down Expand Up @@ -538,10 +538,10 @@ def __setitem__(self, key, value):
try:
array = IntervalArray(value)
value_left, value_right = array.left, array.right
except TypeError:
except TypeError as err:
# wrong type: not interval or NA
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg)
raise TypeError(msg) from err

key = check_array_indexer(self, key)
# Need to ensure that left and right are updated atomically, so we're
Expand Down Expand Up @@ -688,20 +688,20 @@ def astype(self, dtype, copy=True):
try:
new_left = self.left.astype(dtype.subtype)
new_right = self.right.astype(dtype.subtype)
except TypeError:
except TypeError as err:
msg = (
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg)
raise TypeError(msg) from err
return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self))
# TODO: This try/except will be repeated.
try:
return np.asarray(self).astype(dtype, copy=copy)
except (TypeError, ValueError):
except (TypeError, ValueError) as err:
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
raise TypeError(msg) from err

@classmethod
def _concat_same_type(cls, to_concat):
Expand Down Expand Up @@ -1020,13 +1020,13 @@ def length(self):
"""
try:
return self.right - self.left
except TypeError:
except TypeError as err:
# length not defined for some types, e.g. string
msg = (
"IntervalArray contains Intervals without defined length, "
"e.g. Intervals with string endpoints"
)
raise TypeError(msg)
raise TypeError(msg) from err

@property
def mid(self):
Expand Down Expand Up @@ -1100,11 +1100,11 @@ def __arrow_array__(self, type=None):

try:
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
except TypeError:
except TypeError as err:
raise TypeError(
f"Conversion to arrow with subtype '{self.dtype.subtype}' "
"is not supported"
)
) from err
interval_type = ArrowIntervalType(subtype, self.closed)
storage_array = pyarrow.StructArray.from_arrays(
[
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/sparse/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,8 +217,8 @@ def construct_from_string(cls, string: str) -> "SparseDtype":
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
except ValueError:
raise TypeError(msg)
except ValueError as err:
raise TypeError(msg) from err
else:
result = SparseDtype(sub_type)
msg = (
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/arrays/sparse/scipy_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,10 @@ def _coo_to_sparse_series(A, dense_index: bool = False):

try:
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
except AttributeError:
raise TypeError(f"Expected coo_matrix. Got {type(A).__name__} instead.")
except AttributeError as err:
raise TypeError(
f"Expected coo_matrix. Got {type(A).__name__} instead."
) from err
s = s.sort_index()
s = s.astype(SparseDtype(s.dtype))
if dense_index:
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,10 +451,10 @@ def _addsub_object_array(self, other, op):
# subclasses. Incompatible classes will raise AttributeError,
# which we re-raise as TypeError
return super()._addsub_object_array(other, op)
except AttributeError:
except AttributeError as err:
raise TypeError(
f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}"
)
) from err

def __mul__(self, other):
other = lib.item_from_zerodim(other)
Expand Down
10 changes: 6 additions & 4 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def is_any_frame() -> bool:
# return a MI Series
try:
result = concat(result)
except TypeError:
except TypeError as err:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
Expand All @@ -467,7 +467,7 @@ def is_any_frame() -> bool:
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
) from err

return result, True

Expand Down Expand Up @@ -553,7 +553,7 @@ def _aggregate_multiple_funcs(self, arg, _axis):

try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
except TypeError as err:

# we are concatting non-NDFrame objects,
# e.g. a list of scalars
Expand All @@ -562,7 +562,9 @@ def _aggregate_multiple_funcs(self, arg, _axis):

result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and aggregation operations")
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result

def _get_cython_func(self, arg: str) -> Optional[str]:
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,8 +362,8 @@ def eval(
if not inplace and first_expr:
try:
target = env.target.copy()
except AttributeError:
raise ValueError("Cannot return a copy of the target")
except AttributeError as err:
raise ValueError("Cannot return a copy of the target") from err
else:
target = env.target

Expand All @@ -375,8 +375,8 @@ def eval(
with warnings.catch_warnings(record=True):
# TODO: Filter the warnings we actually care about here.
target[assigner] = ret
except (TypeError, IndexError):
raise ValueError("Cannot assign expression output to target")
except (TypeError, IndexError) as err:
raise ValueError("Cannot assign expression output to target") from err

if not resolvers:
resolvers = ({assigner: ret},)
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,12 +372,12 @@ def __init__(self, op: str, lhs, rhs):

try:
self.func = _binary_ops_dict[op]
except KeyError:
except KeyError as err:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError(
f"Invalid binary operator {repr(op)}, valid operators are {keys}"
)
) from err

def __call__(self, env):
"""
Expand Down Expand Up @@ -550,11 +550,11 @@ def __init__(self, op: str, operand):

try:
self.func = _unary_ops_dict[op]
except KeyError:
except KeyError as err:
raise ValueError(
f"Invalid unary operator {repr(op)}, "
f"valid operators are {_unary_ops_syms}"
)
) from err

def __call__(self, env):
operand = self.operand(env)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/computation/parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def tokenize_string(source: str) -> Iterator[Tuple[int, str]]:
yield tokenize_backtick_quoted_string(
token_generator, source, string_start=start[1] + 1
)
except Exception:
raise SyntaxError(f"Failed to parse backticks in '{source}'.")
except Exception as err:
raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err
else:
yield toknum, tokval
Loading