Skip to content

Fix typos, via a Levenshtein-style corrector #30341

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@
DeprecatedOption = namedtuple("DeprecatedOption", "key msg rkey removal_ver")
RegisteredOption = namedtuple("RegisteredOption", "key defval doc validator cb")

# holds deprecated option metdata
# holds deprecated option metadata
_deprecated_options: Dict[str, DeprecatedOption] = {}

# holds registered option metdata
# holds registered option metadata
_registered_options: Dict[str, RegisteredOption] = {}

# holds the current values for registered options
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/groupby.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -791,7 +791,7 @@ def group_quantile(ndarray[float64_t] out,
out[i] = NaN
else:
# Calculate where to retrieve the desired value
# Casting to int will intentionaly truncate result
# Casting to int will intentionally truncate result
idx = grp_start + <int64_t>(q * <float64_t>(non_na_sz - 1))

val = values[sort_arr[idx]]
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/index.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ cdef class IndexEngine:

def get_indexer_non_unique(self, targets):
"""
Return an indexer suitable for takng from a non unique index
Return an indexer suitable for taking from a non unique index
return the labels in the same order ast the target
and a missing indexer into the targets (which correspond
to the -1 indices in the results
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ def maybe_booleans_to_slice(ndarray[uint8_t] mask):
@cython.boundscheck(False)
def array_equivalent_object(left: object[:], right: object[:]) -> bool:
"""
Perform an element by element comparion on 1-d object arrays
Perform an element by element comparison on 1-d object arrays
taking into account nan positions.
"""
cdef:
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/src/klib/khash.h
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key)
*/
#define kh_n_buckets(h) ((h)->n_buckets)

/* More conenient interfaces */
/* More convenient interfaces */

/*! @function
@abstract Instantiate a hash set containing integer keys
Expand Down
6 changes: 3 additions & 3 deletions pandas/_libs/src/ujson/lib/ultrajsondec.c
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric(struct DecoderState *ds) {
case '7':
case '8':
case '9': {
// FIXME: Check for arithemtic overflow here
// FIXME: Check for arithmetic overflow here
// PERF: Don't do 64-bit arithmetic here unless we know we have
// to
intValue = intValue * 10ULL + (JSLONG)(chr - 48);
Expand Down Expand Up @@ -235,7 +235,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric(struct DecoderState *ds) {
}

BREAK_FRC_LOOP:
// FIXME: Check for arithemtic overflow here
// FIXME: Check for arithmetic overflow here
ds->lastType = JT_DOUBLE;
ds->start = offset;
return ds->dec->newDouble(
Expand Down Expand Up @@ -282,7 +282,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric(struct DecoderState *ds) {
}

BREAK_EXP_LOOP:
// FIXME: Check for arithemtic overflow here
// FIXME: Check for arithmetic overflow here
ds->lastType = JT_DOUBLE;
ds->start = offset;
return ds->dec->newDouble(
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/src/ujson/python/objToJSON.c
Original file line number Diff line number Diff line change
Expand Up @@ -1632,7 +1632,7 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc,
sprintf(buf, "%" NPY_INT64_FMT, value);
len = strlen(cLabel);
}
} else { // Fallack to string representation
} else { // Fallback to string representation
PyObject *str = PyObject_Str(item);
if (str == NULL) {
Py_DECREF(item);
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/timestamps.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ class Timestamp(_Timestamp):

Function is not implemented. Use pd.to_datetime().
"""
raise NotImplementedError("Timestamp.strptime() is not implmented."
raise NotImplementedError("Timestamp.strptime() is not implemented."
"Use to_datetime() to parse date strings.")

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def update_dtype(self, dtype):
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.

Raises
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/string_.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __from_arrow__(self, array):

results = []
for arr in chunks:
# using _from_sequence to ensure None is convered to NA
# using _from_sequence to ensure None is converted to NA
str_arr = StringArray._from_sequence(np.array(arr))
results.append(str_arr)

Expand Down Expand Up @@ -153,7 +153,7 @@ class StringArray(PandasArray):
...
ValueError: StringArray requires an object-dtype ndarray of strings.

For comparision methods, this returns a :class:`pandas.BooleanArray`
For comparison methods, this returns a :class:`pandas.BooleanArray`

>>> pd.array(["a", None, "c"], dtype="string") == "a"
<BooleanArray>
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/base.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Provide basic components for groupby. These defintiions
Provide basic components for groupby. These definitions
hold the whitelist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/ops/mask_ops.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Ops for masked ararys.
Ops for masked arrays.
"""
from typing import Optional, Union

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -1640,7 +1640,7 @@ def _get_format_datetime64_from_values(
""" given values and a date_format, return a string format """

if isinstance(values, np.ndarray) and values.ndim > 1:
# We don't actaully care about the order of values, and DatetimeIndex
# We don't actually care about the order of values, and DatetimeIndex
# only accepts 1D values
values = values.ravel()

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/gbq.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def read_gbq(

*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
Note: The default value is changing to 'standard' in a future version.

SQL syntax dialect to use. Value can be one of:

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/json/_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def __init__(
timedeltas = obj.select_dtypes(include=["timedelta"]).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
# Convert PeriodIndex to datetimes before serializing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3492,7 +3492,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None):
# 2) index_names (column names)
#
# Both must be non-null to ensure a successful construction. Otherwise,
# we have to create a generic emtpy Index.
# we have to create a generic empty Index.
if (index_col is None or index_col is False) or index_names is None:
index = Index([])
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/extension/json/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def _values_for_factorize(self):

def _values_for_argsort(self):
# Disable NumPy's shape inference by including an empty tuple...
# If all the elemnts of self are the same size P, NumPy will
# If all the elements of self are the same size P, NumPy will
# cast them to an (N, P) array, instead of an (N,) array of tuples.
frozen = [()] + [tuple(x.items()) for x in self]
return np.array(frozen, dtype=object)[1:]
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/indexes/datetimes/test_date_range.py
Original file line number Diff line number Diff line change
Expand Up @@ -798,7 +798,7 @@ def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011")
rng2 = bdate_range("12/2/2011", "12/5/2011")
rng2._data.freq = BDay() # TODO: shouldnt this already be set?
rng2._data.freq = BDay() # TODO: shouldn't this already be set?

result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
Expand Down Expand Up @@ -855,7 +855,7 @@ def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011", freq="C")
rng2 = bdate_range("12/2/2011", "12/5/2011", freq="C")
rng2._data.freq = CDay() # TODO: shouldnt this already be set?
rng2._data.freq = CDay() # TODO: shouldn't this already be set?

result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexes/datetimes/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with erros
# test proper behavior with errors

with pytest.raises(ValueError):
to_datetime([1], unit="D", format="%Y%m%d", cache=cache)
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/indexes/timedeltas/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def test_insert(self):
def test_delete(self):
idx = timedelta_range(start="1 Days", periods=5, freq="D", name="idx")

# prserve freq
# preserve freq
expected_0 = timedelta_range(start="2 Days", periods=4, freq="D", name="idx")
expected_4 = timedelta_range(start="1 Days", periods=4, freq="D", name="idx")

Expand Down Expand Up @@ -257,7 +257,7 @@ def test_delete(self):
def test_delete_slice(self):
idx = timedelta_range(start="1 days", periods=10, freq="D", name="idx")

# prserve freq
# preserve freq
expected_0_2 = timedelta_range(start="4 days", periods=7, freq="D", name="idx")
expected_7_9 = timedelta_range(start="1 days", periods=7, freq="D", name="idx")

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/plotting/test_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_matplotlib_formatters(self):
units = pytest.importorskip("matplotlib.units")

# Can't make any assertion about the start state.
# We we check that toggling converters off remvoes it, and toggling it
# We we check that toggling converters off removes it, and toggling it
# on restores it.

with cf.option_context("plotting.matplotlib.register_converters", True):
Expand Down