Skip to content

update minimum versions and associated code cleanup #2204

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 17 commits into from
Jul 8, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ matrix:
env: CONDA_ENV=py27-min
- python: 2.7
env: CONDA_ENV=py27-cdat+iris+pynio
- python: 3.4
env: CONDA_ENV=py34
- python: 3.5
env: CONDA_ENV=py35
- python: 3.6
Expand Down Expand Up @@ -102,7 +100,7 @@ install:

script:
# TODO: restore this check once the upstream pandas issue is fixed:
# https://github.com/pandas-dev/pandas/issues/21071
# https://github.com/pandas-dev/pandas/issues/21071
# - python -OO -c "import xarray"
- if [[ "$CONDA_ENV" == "docs" ]]; then
conda install -c conda-forge sphinx sphinx_rtd_theme sphinx-gallery numpydoc;
Expand Down
4 changes: 2 additions & 2 deletions ci/requirements-py27-min.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ dependencies:
- pytest
- flake8
- mock
- numpy==1.11
- pandas==0.18.0
- numpy==1.12
- pandas==0.19.0
- pip:
- coveralls
- pytest-cov
10 changes: 0 additions & 10 deletions ci/requirements-py34.yml

This file was deleted.

8 changes: 4 additions & 4 deletions doc/installing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ Installation
Required dependencies
---------------------

- Python 2.7 [1]_, 3.4, 3.5, or 3.6
- `numpy <http://www.numpy.org/>`__ (1.11 or later)
- `pandas <http://pandas.pydata.org/>`__ (0.18.0 or later)
- Python 2.7 [1]_, 3.5, or 3.6
- `numpy <http://www.numpy.org/>`__ (1.12 or later)
- `pandas <http://pandas.pydata.org/>`__ (0.19 or later)

Optional dependencies
---------------------
Expand Down Expand Up @@ -41,7 +41,7 @@ For accelerating xarray
For parallel computing
~~~~~~~~~~~~~~~~~~~~~~

- `dask.array <http://dask.pydata.org>`__ (0.9.0 or later): required for
- `dask.array <http://dask.pydata.org>`__ (0.XX or later): required for
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

was this intentional?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is the subject of #2203. I just wanted to make sure it was flagged for an update as part of this PR.

:ref:`dask`.

For plotting
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/duck_array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,9 +363,9 @@ def f(values, axis=None, skipna=None, **kwargs):
median = _create_nan_agg_method('median', numeric_only=True)
prod = _create_nan_agg_method('prod', numeric_only=True, no_bottleneck=True)
cumprod_1d = _create_nan_agg_method(
'cumprod', numeric_only=True, np_compat=True, no_bottleneck=True)
'cumprod', numeric_only=True, np_compat=False, no_bottleneck=True)
cumsum_1d = _create_nan_agg_method(
'cumsum', numeric_only=True, np_compat=True, no_bottleneck=True)
'cumsum', numeric_only=True, np_compat=False, no_bottleneck=True)


def _nd_cum_func(cum_func, array, axis, **kwargs):
Expand Down
5 changes: 2 additions & 3 deletions xarray/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

from . import rolling
from .computation import apply_ufunc
from .npcompat import flip
from .pycompat import iteritems
from .utils import is_scalar

Expand Down Expand Up @@ -242,13 +241,13 @@ def _bfill(arr, n=None, axis=-1):
'''inverse of ffill'''
import bottleneck as bn

arr = flip(arr, axis=axis)
arr = np.flip(arr, axis=axis)

# fill
arr = bn.push(arr, axis=axis, n=n)

# reverse back to original
return flip(arr, axis=axis)
return np.flip(arr, axis=axis)


def ffill(arr, dim=None, limit=None):
Expand Down
254 changes: 1 addition & 253 deletions xarray/core/npcompat.py
Original file line number Diff line number Diff line change
@@ -1,260 +1,8 @@
from __future__ import absolute_import, division, print_function

from distutils.version import LooseVersion

import numpy as np

if LooseVersion(np.__version__) >= LooseVersion('1.12'):
as_strided = np.lib.stride_tricks.as_strided
else:
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
array = np.lib.stride_tricks.as_strided(x, shape, strides, subok)
array.setflags(write=writeable)
return array


try:
from numpy import nancumsum, nancumprod, flip
except ImportError: # pragma: no cover
# Code copied from newer versions of NumPy (v1.12).
# Used under the terms of NumPy's license, see licenses/NUMPY_LICENSE.

def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.

Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.

Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.

Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations
of NaNs, otherwise return None.

"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)

mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask

def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero. The cumulative sum does not change when
NaNs are encountered and leading NaNs are replaced by zeros.

Zeros are returned for slices that are all-NaN or empty.

.. versionadded:: 1.12.0

Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.

Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.

See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.

Examples
--------
>>> np.nancumsum(1)
array([1])
>>> np.nancumsum([1])
array([1])
>>> np.nancumsum([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a)
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0)
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1)
array([[ 1., 3.],
[ 3., 3.]])

"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)

def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis
treating Not a Numbers (NaNs) as one. The cumulative product does not
change when NaNs are encountered and leading NaNs are replaced by ones.

Ones are returned for slices that are all-NaN or empty.

.. versionadded:: 1.12.0

Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.

Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.

See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.

Examples
--------
>>> np.nancumprod(1)
array([1])
>>> np.nancumprod([1])
array([1])
>>> np.nancumprod([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1)
array([[ 1., 2.],
[ 3., 3.]])

"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)

def flip(m, axis):
"""
Reverse the order of elements in an array along the given axis.

The shape of the array is preserved, but the elements are reordered.

.. versionadded:: 1.12.0

Parameters
----------
m : array_like
Input array.
axis : integer
Axis in array, which entries are reversed.


Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.

See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).

Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at index n.

Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],

[[4, 5],
[6, 7]]])

>>> flip(A, 0)
array([[[4, 5],
[6, 7]],

[[0, 1],
[2, 3]]])

>>> flip(A, 1)
array([[[2, 3],
[0, 1]],

[[6, 7],
[4, 5]]])

>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = np.asarray(m)
indexer = [slice(None)] * m.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional "
"input array" % (axis, m.ndim))
return m[tuple(indexer)]
as_strided = np.lib.stride_tricks.as_strided
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could push this down into the caller, rather than maintain a layer here (but also fine as-is!)


try:
from numpy import isin
Expand Down
6 changes: 2 additions & 4 deletions xarray/core/nputils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
import numpy as np
import pandas as pd

from . import npcompat


def _validate_axis(data, axis):
ndim = data.ndim
Expand Down Expand Up @@ -194,6 +192,6 @@ def _rolling_window(a, window, axis=-1):

shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
rolling = npcompat.as_strided(a, shape=shape, strides=strides,
writeable=False)
rolling = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides,
writeable=False)
return np.swapaxes(rolling, -2, axis)
2 changes: 1 addition & 1 deletion xarray/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _importorskip(modname, minversion=None):
has_rasterio, requires_rasterio = _importorskip('rasterio')
has_pathlib, requires_pathlib = _importorskip('pathlib')
has_zarr, requires_zarr = _importorskip('zarr', minversion='2.2')
has_np112, requires_np112 = _importorskip('numpy', minversion='1.12.0')
has_np113, requires_np113 = _importorskip('numpy', minversion='1.12.0')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be minversion='1.13.0'?


# some special cases
has_scipy_or_netCDF4 = has_scipy or has_netCDF4
Expand Down
Loading