diff --git a/doc/make.py b/doc/make.py index 024a748cd28ca..db729853e5834 100755 --- a/doc/make.py +++ b/doc/make.py @@ -83,8 +83,8 @@ def _process_single_doc(self, single_doc): obj = pandas # noqa: F821 for name in single_doc.split("."): obj = getattr(obj, name) - except AttributeError: - raise ImportError(f"Could not import {single_doc}") + except AttributeError as err: + raise ImportError(f"Could not import {single_doc}") from err else: return single_doc[len("pandas.") :] else: diff --git a/pandas/__init__.py b/pandas/__init__.py index 2d3d3f7d92a9c..2b9a461e0e95d 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -37,7 +37,7 @@ f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --inplace --force' to build the C extensions first." - ) + ) from e from pandas._config import ( get_option, @@ -290,8 +290,8 @@ def __getattr__(self, item): try: return getattr(self.np, item) - except AttributeError: - raise AttributeError(f"module numpy has no attribute {item}") + except AttributeError as err: + raise AttributeError(f"module numpy has no attribute {item}") from err np = __numpy() @@ -306,8 +306,10 @@ def __getattr__(cls, item): try: return getattr(cls.datetime, item) - except AttributeError: - raise AttributeError(f"module datetime has no attribute {item}") + except AttributeError as err: + raise AttributeError( + f"module datetime has no attribute {item}" + ) from err def __instancecheck__(cls, other): return isinstance(other, cls.datetime) diff --git a/pandas/_config/config.py b/pandas/_config/config.py index f1959cd70ed3a..df706bf25097e 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -213,8 +213,8 @@ def __getattr__(self, key: str): prefix += key try: v = object.__getattribute__(self, "d")[key] - except KeyError: - raise OptionError("No such option") + except KeyError as err: + raise OptionError("No such option") from err if isinstance(v, dict): return DictWrapper(v, prefix) else: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 02a979aea6c6b..7201629cb086e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -686,8 +686,8 @@ def value_counts( values = Series(values) try: ii = cut(values, bins, include_lowest=True) - except TypeError: - raise TypeError("bins argument only works with numeric data.") + except TypeError as err: + raise TypeError("bins argument only works with numeric data.") from err # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py index 20e4cf70eddcf..471bfa736d4b9 100644 --- a/pandas/core/arrays/_ranges.py +++ b/pandas/core/arrays/_ranges.py @@ -121,8 +121,8 @@ def _generate_range_overflow_safe( # we cannot salvage the operation by recursing, so raise try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) - except FloatingPointError: - raise OutOfBoundsDatetime(msg) + except FloatingPointError as err: + raise OutOfBoundsDatetime(msg) from err if np.abs(addend) <= i64max: # relatively easy case without casting concerns diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a5048e3aae899..4167c75eb5782 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -350,7 +350,7 @@ def __init__( if dtype.categories is None: try: codes, categories = factorize(values, sort=True) - except TypeError: + except TypeError as err: codes, categories = factorize(values, sort=False) if dtype.ordered: # raise, as we don't have a sortable data structure and so @@ -359,13 +359,13 @@ def __init__( "'values' is not ordered, please " "explicitly specify the categories order " "by passing in a categories argument." - ) - except ValueError: + ) from err + except ValueError as err: # FIXME raise NotImplementedError( "> 1 ndim Categorical are not supported at this time" - ) + ) from err # we're inferring from values dtype = CategoricalDtype(categories, dtype.ordered) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a75536e46e60d..56939cda6d21c 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2080,11 +2080,11 @@ def _infer_tz_from_endpoints(start, end, tz): """ try: inferred_tz = timezones.infer_tzinfo(start, end) - except AssertionError: + except AssertionError as err: # infer_tzinfo raises AssertionError if passed mismatched timezones raise TypeError( "Start and end cannot both be tz-aware with different timezones" - ) + ) from err inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index f1e0882def13b..e2b66b1a006e4 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -154,7 +154,7 @@ def safe_cast(values, dtype, copy: bool): """ try: return values.astype(dtype, casting="safe", copy=copy) - except TypeError: + except TypeError as err: casted = values.astype(dtype, copy=copy) if (casted == values).all(): @@ -162,7 +162,7 @@ def safe_cast(values, dtype, copy: bool): raise TypeError( f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}" - ) + ) from err def coerce_to_array( @@ -199,8 +199,8 @@ def coerce_to_array( if not issubclass(type(dtype), _IntegerDtype): try: dtype = _dtypes[str(np.dtype(dtype))] - except KeyError: - raise ValueError(f"invalid dtype specified {dtype}") + except KeyError as err: + raise ValueError(f"invalid dtype specified {dtype}") from err if isinstance(values, IntegerArray): values, mask = values._data, values._mask diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index f5167f470b056..51c94d5059f8b 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -448,12 +448,12 @@ def from_tuples(cls, data, closed="right", copy=False, dtype=None): try: # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] lhs, rhs = d - except ValueError: + except ValueError as err: msg = f"{name}.from_tuples requires tuples of length 2, got {d}" - raise ValueError(msg) - except TypeError: + raise ValueError(msg) from err + except TypeError as err: msg = f"{name}.from_tuples received an invalid item, {d}" - raise TypeError(msg) + raise TypeError(msg) from err left.append(lhs) right.append(rhs) @@ -538,10 +538,10 @@ def __setitem__(self, key, value): try: array = IntervalArray(value) value_left, value_right = array.left, array.right - except TypeError: + except TypeError as err: # wrong type: not interval or NA msg = f"'value' should be an interval type, got {type(value)} instead." - raise TypeError(msg) + raise TypeError(msg) from err key = check_array_indexer(self, key) # Need to ensure that left and right are updated atomically, so we're @@ -688,20 +688,20 @@ def astype(self, dtype, copy=True): try: new_left = self.left.astype(dtype.subtype) new_right = self.right.astype(dtype.subtype) - except TypeError: + except TypeError as err: msg = ( f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" ) - raise TypeError(msg) + raise TypeError(msg) from err return self._shallow_copy(new_left, new_right) elif is_categorical_dtype(dtype): return Categorical(np.asarray(self)) # TODO: This try/except will be repeated. try: return np.asarray(self).astype(dtype, copy=copy) - except (TypeError, ValueError): + except (TypeError, ValueError) as err: msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" - raise TypeError(msg) + raise TypeError(msg) from err @classmethod def _concat_same_type(cls, to_concat): @@ -1020,13 +1020,13 @@ def length(self): """ try: return self.right - self.left - except TypeError: + except TypeError as err: # length not defined for some types, e.g. string msg = ( "IntervalArray contains Intervals without defined length, " "e.g. Intervals with string endpoints" ) - raise TypeError(msg) + raise TypeError(msg) from err @property def mid(self): @@ -1100,11 +1100,11 @@ def __arrow_array__(self, type=None): try: subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) - except TypeError: + except TypeError as err: raise TypeError( f"Conversion to arrow with subtype '{self.dtype.subtype}' " "is not supported" - ) + ) from err interval_type = ArrowIntervalType(subtype, self.closed) storage_array = pyarrow.StructArray.from_arrays( [ diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 86869f50aab8e..135514e334920 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -217,8 +217,8 @@ def construct_from_string(cls, string: str) -> "SparseDtype": if string.startswith("Sparse"): try: sub_type, has_fill_value = cls._parse_subtype(string) - except ValueError: - raise TypeError(msg) + except ValueError as err: + raise TypeError(msg) from err else: result = SparseDtype(sub_type) msg = ( diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index e77256a5aaadd..eafd782dc9b9c 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -134,8 +134,10 @@ def _coo_to_sparse_series(A, dense_index: bool = False): try: s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) - except AttributeError: - raise TypeError(f"Expected coo_matrix. Got {type(A).__name__} instead.") + except AttributeError as err: + raise TypeError( + f"Expected coo_matrix. Got {type(A).__name__} instead." + ) from err s = s.sort_index() s = s.astype(SparseDtype(s.dtype)) if dense_index: diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a7b16fd86468e..81fc934748d3e 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -451,10 +451,10 @@ def _addsub_object_array(self, other, op): # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError return super()._addsub_object_array(other, op) - except AttributeError: + except AttributeError as err: raise TypeError( f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}" - ) + ) from err def __mul__(self, other): other = lib.item_from_zerodim(other) diff --git a/pandas/core/base.py b/pandas/core/base.py index 85424e35fa0e0..3c6f24dbe363a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -458,7 +458,7 @@ def is_any_frame() -> bool: # return a MI Series try: result = concat(result) - except TypeError: + except TypeError as err: # we want to give a nice error here if # we have non-same sized objects, so # we don't automatically broadcast @@ -467,7 +467,7 @@ def is_any_frame() -> bool: "cannot perform both aggregation " "and transformation operations " "simultaneously" - ) + ) from err return result, True @@ -553,7 +553,7 @@ def _aggregate_multiple_funcs(self, arg, _axis): try: return concat(results, keys=keys, axis=1, sort=False) - except TypeError: + except TypeError as err: # we are concatting non-NDFrame objects, # e.g. a list of scalars @@ -562,7 +562,9 @@ def _aggregate_multiple_funcs(self, arg, _axis): result = Series(results, index=keys, name=self.name) if is_nested_object(result): - raise ValueError("cannot combine transform and aggregation operations") + raise ValueError( + "cannot combine transform and aggregation operations" + ) from err return result def _get_cython_func(self, arg: str) -> Optional[str]: diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index f6947d5ec6233..fe3d3f49f16a7 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -362,8 +362,8 @@ def eval( if not inplace and first_expr: try: target = env.target.copy() - except AttributeError: - raise ValueError("Cannot return a copy of the target") + except AttributeError as err: + raise ValueError("Cannot return a copy of the target") from err else: target = env.target @@ -375,8 +375,8 @@ def eval( with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret - except (TypeError, IndexError): - raise ValueError("Cannot assign expression output to target") + except (TypeError, IndexError) as err: + raise ValueError("Cannot assign expression output to target") from err if not resolvers: resolvers = ({assigner: ret},) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 7ed089b283903..bc9ff7c44b689 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -372,12 +372,12 @@ def __init__(self, op: str, lhs, rhs): try: self.func = _binary_ops_dict[op] - except KeyError: + except KeyError as err: # has to be made a list for python3 keys = list(_binary_ops_dict.keys()) raise ValueError( f"Invalid binary operator {repr(op)}, valid operators are {keys}" - ) + ) from err def __call__(self, env): """ @@ -550,11 +550,11 @@ def __init__(self, op: str, operand): try: self.func = _unary_ops_dict[op] - except KeyError: + except KeyError as err: raise ValueError( f"Invalid unary operator {repr(op)}, " f"valid operators are {_unary_ops_syms}" - ) + ) from err def __call__(self, env): operand = self.operand(env) diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index 92a2c20cd2a9e..418fc7d38d08f 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -185,7 +185,7 @@ def tokenize_string(source: str) -> Iterator[Tuple[int, str]]: yield tokenize_backtick_quoted_string( token_generator, source, string_start=start[1] + 1 ) - except Exception: - raise SyntaxError(f"Failed to parse backticks in '{source}'.") + except Exception as err: + raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err else: yield toknum, tokval diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 828ec11c2bd38..653d014775386 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -424,8 +424,10 @@ def visit_Subscript(self, node, **kwargs): try: return self.const_type(value[slobj], self.env) - except TypeError: - raise ValueError(f"cannot subscript {repr(value)} with {repr(slobj)}") + except TypeError as err: + raise ValueError( + f"cannot subscript {repr(value)} with {repr(slobj)}" + ) from err def visit_Attribute(self, node, **kwargs): attr = node.attr @@ -575,18 +577,18 @@ def evaluate(self): """ create and return the numexpr condition and filter """ try: self.condition = self.terms.prune(ConditionBinOp) - except AttributeError: + except AttributeError as err: raise ValueError( f"cannot process expression [{self.expr}], [{self}] " "is not a valid condition" - ) + ) from err try: self.filter = self.terms.prune(FilterBinOp) - except AttributeError: + except AttributeError as err: raise ValueError( f"cannot process expression [{self.expr}], [{self}] " "is not a valid filter" - ) + ) from err return self.condition, self.filter diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 937c81fdeb8d6..83bf92ad737e4 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -197,11 +197,11 @@ def resolve(self, key: str, is_local: bool): # these are created when parsing indexing expressions # e.g., df[df > 0] return self.temps[key] - except KeyError: + except KeyError as err: # runtime import because ops imports from scope from pandas.core.computation.ops import UndefinedVariableError - raise UndefinedVariableError(key, is_local) + raise UndefinedVariableError(key, is_local) from err def swapkey(self, old_key: str, new_key: str, new_value=None): """ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c2b600b5d8c5b..c06bd8a1d6e36 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1573,11 +1573,11 @@ def maybe_cast_to_integer_array(arr, dtype, copy: bool = False): casted = np.array(arr, dtype=dtype, copy=copy) else: casted = arr.astype(dtype, copy=copy) - except OverflowError: + except OverflowError as err: raise OverflowError( "The elements provided in the data cannot all be " f"casted to the dtype {dtype}" - ) + ) from err if np.array_equal(arr, casted): return casted diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c0420244f671e..df5bac1071985 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -198,8 +198,8 @@ def ensure_python_int(value: Union[int, np.integer]) -> int: try: new_value = int(value) assert new_value == value - except (TypeError, ValueError, AssertionError): - raise TypeError(f"Wrong type {type(value)} for value {value}") + except (TypeError, ValueError, AssertionError) as err: + raise TypeError(f"Wrong type {type(value)} for value {value}") from err return new_value @@ -1801,7 +1801,7 @@ def _validate_date_like_dtype(dtype) -> None: try: typ = np.datetime_data(dtype)[0] except ValueError as e: - raise TypeError(e) + raise TypeError(e) from e if typ != "generic" and typ != "ns": raise ValueError( f"{repr(dtype.name)} is too specific of a frequency, " @@ -1840,9 +1840,9 @@ def pandas_dtype(dtype) -> DtypeObj: # raise a consistent TypeError if failed try: npdtype = np.dtype(dtype) - except SyntaxError: + except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError - raise TypeError(f"data type '{dtype}' not understood") + raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 0730de934b56c..33daf6627721f 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -1040,8 +1040,8 @@ def __new__(cls, subtype=None): try: subtype = pandas_dtype(subtype) - except TypeError: - raise TypeError("could not construct IntervalDtype") + except TypeError as err: + raise TypeError("could not construct IntervalDtype") from err if is_categorical_dtype(subtype) or is_string_dtype(subtype): # GH 19016 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3fc10444ee064..990822913aecf 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2771,11 +2771,11 @@ def _ensure_valid_index(self, value): if not len(self.index) and is_list_like(value) and len(value): try: value = Series(value) - except (ValueError, NotImplementedError, TypeError): + except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" - ) + ) from err self._data = self._data.reindex_axis( value.index.copy(), axis=1, fill_value=np.nan @@ -3338,7 +3338,7 @@ def reindexer(value): # other raise TypeError( "incompatible index of inserted column with frame index" - ) + ) from err return value if isinstance(value, Series): @@ -4059,8 +4059,10 @@ def set_index( # everything else gets tried as a key; see GH 24969 try: found = col in self.columns - except TypeError: - raise TypeError(f"{err_msg}. Received column of type {type(col)}") + except TypeError as err: + raise TypeError( + f"{err_msg}. Received column of type {type(col)}" + ) from err else: if not found: missing.append(col) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ff7c481d550d4..25770c2c6470c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -335,9 +335,11 @@ def _construct_axes_from_arguments( if a not in kwargs: try: kwargs[a] = args.pop(0) - except IndexError: + except IndexError as err: if require_all: - raise TypeError("not enough/duplicate arguments specified!") + raise TypeError( + "not enough/duplicate arguments specified!" + ) from err axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS} return axes, kwargs @@ -4792,10 +4794,10 @@ def sample( if axis == 0: try: weights = self[weights] - except KeyError: + except KeyError as err: raise KeyError( "String passed to weights not a valid column" - ) + ) from err else: raise ValueError( "Strings can only be passed to " @@ -7521,8 +7523,8 @@ def at_time( index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) - except AttributeError: - raise TypeError("Index must be DatetimeIndex") + except AttributeError as err: + raise TypeError("Index must be DatetimeIndex") from err return self._take_with_is_copy(indexer, axis=axis) @@ -7609,8 +7611,8 @@ def between_time( include_start=include_start, include_end=include_end, ) - except AttributeError: - raise TypeError("Index must be DatetimeIndex") + except AttributeError as err: + raise TypeError("Index must be DatetimeIndex") from err return self._take_with_is_copy(indexer, axis=axis) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1bb512aee39e2..fb935c9065b83 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -572,8 +572,8 @@ def true_and_notna(x, *args, **kwargs) -> bool: indices = [ self._get_index(name) for name, group in self if true_and_notna(group) ] - except (ValueError, TypeError): - raise TypeError("the filter must return a boolean result") + except (ValueError, TypeError) as err: + raise TypeError("the filter must return a boolean result") from err filtered = self._apply_filter(indices, dropna) return filtered @@ -1371,9 +1371,9 @@ def _transform_general(self, func, *args, **kwargs): path, res = self._choose_path(fast_path, slow_path, group) except TypeError: return self._transform_item_by_item(obj, fast_path) - except ValueError: + except ValueError as err: msg = "transform must return a scalar value for each group" - raise ValueError(msg) + raise ValueError(msg) from err else: res = path(group) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f946f0e63a583..48c00140461b5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -482,13 +482,13 @@ def get_converter(s): try: # If the original grouper was a tuple return [self.indices[name] for name in names] - except KeyError: + except KeyError as err: # turns out it wasn't a tuple msg = ( "must supply a same-length tuple to get_group " "with multiple grouping keys" ) - raise ValueError(msg) + raise ValueError(msg) from err converters = [get_converter(s) for s in index_sample] names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index 5e53b061dd1c8..3858e750326b4 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -437,10 +437,10 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: elif is_integer_dtype(dtype): try: indexer = np.asarray(indexer, dtype=np.intp) - except ValueError: + except ValueError as err: raise ValueError( "Cannot index with an integer indexer containing NA values" - ) + ) from err else: raise IndexError("arrays used as indices must be of integer or boolean type") diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c215fdb475ed8..935339c62e218 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -670,8 +670,10 @@ def astype(self, dtype, copy=True): try: casted = self.values.astype(dtype, copy=copy) - except (TypeError, ValueError): - raise TypeError(f"Cannot cast {type(self).__name__} to dtype {dtype}") + except (TypeError, ValueError) as err: + raise TypeError( + f"Cannot cast {type(self).__name__} to dtype {dtype}" + ) from err return Index(casted, name=self.name, dtype=dtype) _index_shared_docs[ @@ -2856,8 +2858,8 @@ def get_loc(self, key, method=None, tolerance=None): casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) - except KeyError: - raise KeyError(key) + except KeyError as err: + raise KeyError(key) from err if tolerance is not None: tolerance = self._convert_tolerance(tolerance, np.asarray(key)) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 72a2aba2d8a88..b86d409d1f59b 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -967,11 +967,11 @@ def insert(self, loc, item): ) arr = type(self._data)._simple_new(new_i8s, dtype=self.dtype, freq=freq) return type(self)._simple_new(arr, name=self.name) - except (AttributeError, TypeError): + except (AttributeError, TypeError) as err: # fall back to object index if isinstance(item, str): return self.astype(object).insert(loc, item) raise TypeError( f"cannot insert {type(self).__name__} with incompatible label" - ) + ) from err diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e303e487b1a7d..c9fefd46e55c7 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -552,8 +552,8 @@ def get_loc(self, key, method=None, tolerance=None): try: key = self._maybe_cast_for_get_loc(key) - except ValueError: - raise KeyError(key) + except ValueError as err: + raise KeyError(key) from err elif isinstance(key, timedelta): # GH#20464 @@ -574,8 +574,8 @@ def get_loc(self, key, method=None, tolerance=None): try: return Index.get_loc(self, key, method, tolerance) - except KeyError: - raise KeyError(orig_key) + except KeyError as err: + raise KeyError(orig_key) from err def _maybe_cast_for_get_loc(self, key) -> Timestamp: # needed to localize naive datetimes @@ -1040,9 +1040,9 @@ def bdate_range( try: weekmask = weekmask or "Mon Tue Wed Thu Fri" freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) - except (KeyError, TypeError): + except (KeyError, TypeError) as err: msg = f"invalid custom frequency string: {freq}" - raise ValueError(msg) + raise ValueError(msg) from err elif holidays or weekmask: msg = ( "a custom frequency string is required when holidays or " diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index a7bb4237eab69..d396d1c76f357 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -724,9 +724,9 @@ def get_loc( op_right = le if self.closed_right else lt try: mask = op_left(self.left, key) & op_right(key, self.right) - except TypeError: + except TypeError as err: # scalar is not comparable to II subtype --> invalid label - raise KeyError(key) + raise KeyError(key) from err matches = mask.sum() if matches == 0: @@ -805,9 +805,9 @@ def get_indexer( loc = self.get_loc(key) except KeyError: loc = -1 - except InvalidIndexError: + except InvalidIndexError as err: # i.e. non-scalar key - raise TypeError(key) + raise TypeError(key) from err indexer.append(loc) return ensure_platform_int(indexer) @@ -1279,10 +1279,10 @@ def interval_range( if freq is not None and not is_number(freq): try: freq = to_offset(freq) - except ValueError: + except ValueError as err: raise ValueError( f"freq must be numeric or convertible to DateOffset, got {freq}" - ) + ) from err # verify type compatibility if not all( diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4bd462e83a5bc..f70975e19b9a4 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1373,9 +1373,9 @@ def _get_level_number(self, level) -> int: ) try: level = self.names.index(level) - except ValueError: + except ValueError as err: if not is_integer(level): - raise KeyError(f"Level {level} not found") + raise KeyError(f"Level {level} not found") from err elif level < 0: level += self.nlevels if level < 0: @@ -1383,13 +1383,13 @@ def _get_level_number(self, level) -> int: raise IndexError( f"Too many levels: Index has only {self.nlevels} levels, " f"{orig_level} is not a valid level number" - ) + ) from err # Note: levels are zero-based elif level >= self.nlevels: raise IndexError( f"Too many levels: Index has only {self.nlevels} levels, " f"not {level + 1}" - ) + ) from err return level @property @@ -3370,8 +3370,8 @@ def _convert_can_do_setop(self, other): msg = "other must be a MultiIndex or a list of tuples" try: other = MultiIndex.from_tuples(other) - except TypeError: - raise TypeError(msg) + except TypeError as err: + raise TypeError(msg) from err else: result_names = self.names if self.names == other.names else None return other, result_names diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 35a5d99abf4e6..9eeb41f735015 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -515,9 +515,9 @@ def get_loc(self, key, method=None, tolerance=None): try: asdt, reso = parse_time_string(key, self.freq) - except DateParseError: + except DateParseError as err: # A string with invalid format - raise KeyError(f"Cannot interpret '{key}' as period") + raise KeyError(f"Cannot interpret '{key}' as period") from err grp = resolution.Resolution.get_freq_group(reso) freqn = resolution.get_freq_group(self.freq) @@ -540,14 +540,14 @@ def get_loc(self, key, method=None, tolerance=None): try: key = Period(key, freq=self.freq) - except ValueError: + except ValueError as err: # we cannot construct the Period - raise KeyError(orig_key) + raise KeyError(orig_key) from err try: return Index.get_loc(self, key, method, tolerance) - except KeyError: - raise KeyError(orig_key) + except KeyError as err: + raise KeyError(orig_key) from err def _maybe_cast_slice_bound(self, label, side: str, kind: str): """ @@ -578,10 +578,10 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str): parsed, reso = parse_time_string(label, self.freq) bounds = self._parsed_string_to_bounds(reso, parsed) return bounds[0 if side == "left" else 1] - except ValueError: + except ValueError as err: # string cannot be parsed as datetime-like # TODO: we need tests for this case - raise KeyError(label) + raise KeyError(label) from err elif is_integer(label) or is_float(label): self._invalid_indexer("slice", label) @@ -611,8 +611,8 @@ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True try: return self._partial_date_slice(reso, parsed, use_lhs, use_rhs) - except KeyError: - raise KeyError(key) + except KeyError as err: + raise KeyError(key) from err def insert(self, loc, item): if not isinstance(item, Period) or self.freq != item.freq: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 71cc62e6a110b..f621a3c153adf 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -349,8 +349,8 @@ def get_loc(self, key, method=None, tolerance=None): new_key = int(key) try: return self._range.index(new_key) - except ValueError: - raise KeyError(key) + except ValueError as err: + raise KeyError(key) from err raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) @@ -695,10 +695,10 @@ def __getitem__(self, key): new_key = int(key) try: return self._range[new_key] - except IndexError: + except IndexError as err: raise IndexError( f"index {key} is out of bounds for axis 0 with size {len(self)}" - ) + ) from err elif is_scalar(key): raise IndexError( "only integers, slices (`:`), " diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index b3b2bc46f6659..5e4a8e83bd95b 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -232,8 +232,8 @@ def get_loc(self, key, method=None, tolerance=None): elif isinstance(key, str): try: key = Timedelta(key) - except ValueError: - raise KeyError(key) + except ValueError as err: + raise KeyError(key) from err elif isinstance(key, self._data._recognized_scalars) or key is NaT: key = Timedelta(key) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5adc65b488399..29cb62a4c591f 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -610,7 +610,7 @@ def _get_setitem_indexer(self, key): # invalid indexer type vs 'other' indexing errors if "cannot do" in str(e): raise - raise IndexingError(key) + raise IndexingError(key) from e def __setitem__(self, key, value): if isinstance(key, tuple): @@ -654,11 +654,11 @@ def _has_valid_tuple(self, key: Tuple): raise IndexingError("Too many indexers") try: self._validate_key(k, i) - except ValueError: + except ValueError as err: raise ValueError( "Location based indexing can only have " f"[{self._valid_types}] types" - ) + ) from err def _is_nested_tuple_indexer(self, tup: Tuple) -> bool: """ @@ -1455,9 +1455,9 @@ def _get_list_axis(self, key, axis: int): """ try: return self.obj._take_with_is_copy(key, axis=axis) - except IndexError: + except IndexError as err: # re-raise with different error message - raise IndexError("positional indexers are out-of-bounds") + raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): if isinstance(key, slice): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index b30a7a24f3495..c46aed999f45a 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -343,10 +343,10 @@ def _interpolate_scipy_wrapper( if method == "pchip": try: alt_methods["pchip"] = interpolate.pchip_interpolate - except AttributeError: + except AttributeError as err: raise ImportError( "Your version of Scipy does not support PCHIP interpolation." - ) + ) from err elif method == "akima": alt_methods["akima"] = _akima_interpolate diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a5c609473760d..4398a1569ac56 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -73,7 +73,7 @@ def _f(*args, **kwargs): # e.g. this is normally a disallowed function on # object arrays that contain strings if is_object_dtype(args[0]): - raise TypeError(e) + raise TypeError(e) from e raise return _f @@ -607,9 +607,9 @@ def get_median(x): if not is_float_dtype(values.dtype): try: values = values.astype("f8") - except ValueError: + except ValueError as err: # e.g. "could not convert string to float: 'a'" - raise TypeError + raise TypeError from err if mask is not None: values[mask] = np.nan @@ -1361,9 +1361,9 @@ def _ensure_numeric(x): except (TypeError, ValueError): try: x = x.astype(np.float64) - except ValueError: + except ValueError as err: # GH#29941 we get here with object arrays containing strs - raise TypeError(f"Could not convert {x} to numeric") + raise TypeError(f"Could not convert {x} to numeric") from err else: if not np.any(np.imag(x)): x = x.real @@ -1374,9 +1374,9 @@ def _ensure_numeric(x): # e.g. "1+1j" or "foo" try: x = complex(x) - except ValueError: + except ValueError as err: # e.g. "foo" - raise TypeError(f"Could not convert {x} to numeric") + raise TypeError(f"Could not convert {x} to numeric") from err return x diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index b216a927f65b3..2c9105c52cf9b 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -295,12 +295,12 @@ def na_logical_op(x: np.ndarray, y, op): AttributeError, OverflowError, NotImplementedError, - ): + ) as err: typ = type(y).__name__ raise TypeError( f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array " f"and scalar of type [{typ}]" - ) + ) from err return result.reshape(x.shape) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index d9f21f0b274ac..06a180d4a096e 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -620,8 +620,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde for key, index in zip(hlevel, indexes): try: i = level.get_loc(key) - except KeyError: - raise ValueError(f"Key {key} not in level {level}") + except KeyError as err: + raise ValueError(f"Key {key} not in level {level}") from err to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index b04e4e1ac4d48..61aa34f724307 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -631,8 +631,8 @@ def _normalize(table, normalize, margins: bool, margins_name="All"): axis_subs = {0: "index", 1: "columns"} try: normalize = axis_subs[normalize] - except KeyError: - raise ValueError("Not a valid normalize argument") + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err if margins is False: @@ -647,8 +647,8 @@ def _normalize(table, normalize, margins: bool, margins_name="All"): try: f = normalizers[normalize] - except KeyError: - raise ValueError("Not a valid normalize argument") + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err table = f(table) table = table.fillna(0) diff --git a/pandas/core/series.py b/pandas/core/series.py index 3ded02598963c..d984225f8fd89 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -992,11 +992,11 @@ def __setitem__(self, key, value): except TypeError as e: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): - raise ValueError("Can only tuple-index with a MultiIndex") + raise ValueError("Can only tuple-index with a MultiIndex") from e # python 3 type errors should be raised if _is_unorderable_exception(e): - raise IndexError(key) + raise IndexError(key) from e if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 4b0fc3e47356c..b0c5d6a48d99a 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2425,12 +2425,12 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): try: # turn anything in "others" into lists of Series others = self._get_series_list(others) - except ValueError: # do not catch TypeError raised by _get_series_list + except ValueError as err: # do not catch TypeError raised by _get_series_list raise ValueError( "If `others` contains arrays or lists (or other " "list-likes without an index), these must all be " "of the same length as the calling Series/Index." - ) + ) from err # align if required if any(not data.index.equals(x.index) for x in others): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index b10b736b9134e..5580146b37d25 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -391,8 +391,10 @@ def _convert_listlike_datetimes( # datetime64[ns] orig_arg = ensure_object(orig_arg) result = _attempt_YYYYMMDD(orig_arg, errors=errors) - except (ValueError, TypeError, tslibs.OutOfBoundsDatetime): - raise ValueError("cannot convert the input to '%Y%m%d' date format") + except (ValueError, TypeError, tslibs.OutOfBoundsDatetime) as err: + raise ValueError( + "cannot convert the input to '%Y%m%d' date format" + ) from err # fallback if result is None: @@ -484,8 +486,10 @@ def _adjust_to_origin(arg, origin, unit): raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 - except TypeError: - raise ValueError("incompatible 'arg' type for given 'origin'='julian'") + except TypeError as err: + raise ValueError( + "incompatible 'arg' type for given 'origin'='julian'" + ) from err # preemptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 @@ -508,10 +512,14 @@ def _adjust_to_origin(arg, origin, unit): # we are going to offset back to unix / epoch time try: offset = Timestamp(origin) - except tslibs.OutOfBoundsDatetime: - raise tslibs.OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") - except ValueError: - raise ValueError(f"origin {origin} cannot be converted to a Timestamp") + except tslibs.OutOfBoundsDatetime as err: + raise tslibs.OutOfBoundsDatetime( + f"origin {origin} is Out of Bounds" + ) from err + except ValueError as err: + raise ValueError( + f"origin {origin} cannot be converted to a Timestamp" + ) from err if offset.tz is not None: raise ValueError(f"origin offset {offset} must be tz-naive") @@ -861,7 +869,7 @@ def coerce(values): try: values = to_datetime(values, format="%Y%m%d", errors=errors, utc=tz) except (TypeError, ValueError) as err: - raise ValueError(f"cannot assemble the datetimes: {err}") + raise ValueError(f"cannot assemble the datetimes: {err}") from err for u in ["h", "m", "s", "ms", "us", "ns"]: value = unit_rev.get(u) @@ -869,7 +877,9 @@ def coerce(values): try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) except (TypeError, ValueError) as err: - raise ValueError(f"cannot assemble the datetimes [{value}]: {err}") + raise ValueError( + f"cannot assemble the datetimes [{value}]: {err}" + ) from err return values @@ -1001,13 +1011,13 @@ def _convert_listlike(arg, format): for element in arg: try: times.append(datetime.strptime(element, format).time()) - except (ValueError, TypeError): + except (ValueError, TypeError) as err: if errors == "raise": msg = ( f"Cannot convert {element} to a time with given " f"format {format}" ) - raise ValueError(msg) + raise ValueError(msg) from err elif errors == "ignore": return arg else: diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index e045d1c2211d7..c6096c24ecbc9 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -219,13 +219,13 @@ def _apply(self, func, **kwargs): try: values = self._prep_values(b.values) - except (TypeError, NotImplementedError): + except (TypeError, NotImplementedError) as err: if isinstance(obj, ABCDataFrame): exclude.extend(b.columns) del block_list[i] continue else: - raise DataError("No numeric types to aggregate") + raise DataError("No numeric types to aggregate") from err if values.size == 0: results.append(values.copy()) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 65ac064a1322e..3784989de10ab 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -267,8 +267,8 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: else: try: values = ensure_float64(values) - except (ValueError, TypeError): - raise TypeError(f"cannot handle this type -> {values.dtype}") + except (ValueError, TypeError) as err: + raise TypeError(f"cannot handle this type -> {values.dtype}") from err # Convert inf to nan for C funcs inf = np.isinf(values) @@ -449,13 +449,13 @@ def _apply( try: values = self._prep_values(b.values) - except (TypeError, NotImplementedError): + except (TypeError, NotImplementedError) as err: if isinstance(obj, ABCDataFrame): exclude.extend(b.columns) del block_list[i] continue else: - raise DataError("No numeric types to aggregate") + raise DataError("No numeric types to aggregate") from err if values.size == 0: results.append(values.copy()) @@ -1875,11 +1875,11 @@ def _validate_freq(self): try: return to_offset(self.window) - except (TypeError, ValueError): + except (TypeError, ValueError) as err: raise ValueError( f"passed window {self.window} is not " "compatible with a datetimelike index" - ) + ) from err _agg_see_also_doc = dedent( """ diff --git a/pandas/io/common.py b/pandas/io/common.py index c52583eed27ec..0fce8f5382686 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -265,8 +265,8 @@ def get_compression_method( compression_args = dict(compression) try: compression = compression_args.pop("method") - except KeyError: - raise ValueError("If mapping, compression must have key 'method'") + except KeyError as err: + raise ValueError("If mapping, compression must have key 'method'") from err else: compression_args = {} return compression, compression_args diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 97959bd125113..d2f9dd285582f 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -628,8 +628,8 @@ def __new__(cls, path, engine=None, **kwargs): engine = config.get_option(f"io.excel.{ext}.writer") if engine == "auto": engine = _get_default_writer(ext) - except KeyError: - raise ValueError(f"No engine for filetype: '{ext}'") + except KeyError as err: + raise ValueError(f"No engine for filetype: '{ext}'") from err cls = get_writer(engine) return object.__new__(cls) diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index c8d40d7141fc8..7c8e1abb497bc 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -47,8 +47,8 @@ def _get_default_writer(ext): def get_writer(engine_name): try: return _writers[engine_name] - except KeyError: - raise ValueError(f"No Excel writer '{engine_name}'") + except KeyError as err: + raise ValueError(f"No Excel writer '{engine_name}'") from err def _excel2num(x): diff --git a/pandas/io/html.py b/pandas/io/html.py index 561570f466b68..9efdacadce83e 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -904,7 +904,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): "Since you passed a non-rewindable file " "object, we can't rewind it to try " "another parser. Try read_html() with a different flavor." - ) + ) from caught retained = caught else: diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index f158ad6cd89e3..4b153d3cb69bf 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -315,7 +315,7 @@ def _recursive_extract(data, path, seen_meta, level=0): raise KeyError( "Try running with errors='ignore' as key " f"{e} is not always present" - ) + ) from e meta_vals[key].append(meta_val) records.extend(recs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 8a3ad6cb45b57..9d1687e20a949 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -814,8 +814,10 @@ def __init__(self, f, engine=None, **kwds): ): try: dialect_val = getattr(dialect, param) - except AttributeError: - raise ValueError(f"Invalid dialect {kwds['dialect']} provided") + except AttributeError as err: + raise ValueError( + f"Invalid dialect {kwds['dialect']} provided" + ) from err parser_default = _parser_defaults[param] provided = kwds.get(param, parser_default) @@ -1816,19 +1818,19 @@ def _cast_types(self, values, cast_type, column): array_type = cast_type.construct_array_type() try: return array_type._from_sequence_of_strings(values, dtype=cast_type) - except NotImplementedError: + except NotImplementedError as err: raise NotImplementedError( f"Extension Array: {array_type} must implement " "_from_sequence_of_strings in order to be used in parser methods" - ) + ) from err else: try: values = astype_nansafe(values, cast_type, copy=True, skipna=True) - except ValueError: + except ValueError as err: raise ValueError( f"Unable to convert column {column} to type {cast_type}" - ) + ) from err return values def _do_date_conversions(self, names, data): @@ -2552,12 +2554,12 @@ def _infer_columns(self): while self.line_pos <= hr: line = self._next_line() - except StopIteration: + except StopIteration as err: if self.line_pos < hr: raise ValueError( f"Passed header={hr} but only {self.line_pos + 1} lines in " "file" - ) + ) from err # We have an empty file, so check # if columns are provided. That will @@ -2569,7 +2571,7 @@ def _infer_columns(self): return columns, num_original_columns, unnamed_cols if not self.names: - raise EmptyDataError("No columns to parse from file") + raise EmptyDataError("No columns to parse from file") from err line = self.names[:] @@ -2650,9 +2652,9 @@ def _infer_columns(self): try: line = self._buffered_line() - except StopIteration: + except StopIteration as err: if not names: - raise EmptyDataError("No columns to parse from file") + raise EmptyDataError("No columns to parse from file") from err line = names[:] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 048aa8b1915d1..168666ea21f45 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -682,7 +682,7 @@ def open(self, mode: str = "a", **kwargs): # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == "r" and "Unable to open/create file" in str(err): - raise IOError(str(err)) + raise IOError(str(err)) from err raise def close(self): @@ -1069,14 +1069,14 @@ def remove(self, key: str, where=None, start=None, stop=None): except AssertionError: # surface any assertion errors for e.g. debugging raise - except Exception: + except Exception as err: # In tests we get here with ClosedFileError, TypeError, and # _table_mod.NoSuchNodeError. TODO: Catch only these? if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!" - ) + ) from err # we are actually trying to remove a node (with children) node = self.get_node(key) @@ -1521,8 +1521,8 @@ def _validate_format(self, format: str) -> str: # validate try: format = _FORMAT_MAP[format.lower()] - except KeyError: - raise TypeError(f"invalid HDFStore format specified [{format}]") + except KeyError as err: + raise TypeError(f"invalid HDFStore format specified [{format}]") from err return format @@ -1579,8 +1579,8 @@ def error(t): _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed} try: cls = _STORER_MAP[pt] - except KeyError: - raise error("_STORER_MAP") + except KeyError as err: + raise error("_STORER_MAP") from err return cls(self, group, encoding=encoding, errors=errors) # existing node (and must be a table) @@ -1614,8 +1614,8 @@ def error(t): } try: cls = _TABLE_MAP[tt] - except KeyError: - raise error("_TABLE_MAP") + except KeyError as err: + raise error("_TABLE_MAP") from err return cls(self, group, encoding=encoding, errors=errors) @@ -3233,10 +3233,10 @@ def validate_multiindex(self, obj): ] try: return obj.reset_index(), levels - except ValueError: + except ValueError as err: raise ValueError( "duplicate names/columns in the multi-index when storing as a table" - ) + ) from err @property def nrows_expected(self) -> int: @@ -3784,11 +3784,11 @@ def get_blk_items(mgr, blocks): if table_exists and validate: try: existing_col = self.values_axes[i] - except (IndexError, KeyError): + except (IndexError, KeyError) as err: raise ValueError( f"Incompatible appended table [{blocks}]" f"with existing table [{self.values_axes}]" - ) + ) from err else: existing_col = None @@ -3899,12 +3899,12 @@ def get_blk_items(mgr, blocks): b, b_items = by_items.pop(items) new_blocks.append(b) new_blk_items.append(b_items) - except (IndexError, KeyError): + except (IndexError, KeyError) as err: jitems = ",".join(pprint_thing(item) for item in items) raise ValueError( f"cannot match existing table structure for [{jitems}] " "on appending data" - ) + ) from err blocks = new_blocks blk_items = new_blk_items @@ -5061,7 +5061,7 @@ def generate(self, where): q = self.table.queryables() try: return PyTablesExpr(where, queryables=q, encoding=self.table.encoding) - except NameError: + except NameError as err: # raise a nice message, suggesting that the user should use # data_columns qkeys = ",".join(q.keys()) @@ -5073,7 +5073,7 @@ def generate(self, where): " an axis (e.g. 'index' or 'columns'), or a " "data_column\n" f" The currently defined references are: {qkeys}\n" - ) + ) from err def select(self): """