diff --git a/.travis.yml b/.travis.yml index d797e9844bc..760d04480f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -91,7 +91,7 @@ install: script: - python -OO -c "import xarray" - py.test xarray --cov=xarray --cov-config ci/.coveragerc --cov-report term-missing --verbose $EXTRA_FLAGS - - git diff upstream/master **/*py | flake8 --diff --exit-zero || true + - flake8 -j auto xarray after_success: - coveralls diff --git a/setup.cfg b/setup.cfg index d2f336aa1d0..ce91b93fec9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,4 +5,25 @@ universal = 1 python_files=test_*.py [flake8] -max-line-length=79 +# References: +# https://flake8.readthedocs.io/en/latest/user/configuration.html +# https://flake8.readthedocs.io/en/latest/user/error-codes.html + +# Note: there cannot be spaces after comma's here +exclude = __init__.py +ignore = + # Extra space in brackets + E20, + # Multiple spaces around "," + E231,E241, + # Comments + E26, + # Import formatting + E4, + # Comparing types instead of isinstance + E721, + # Assigning lambda expression + E731, + # Ambiguous variable names + E741 +max-line-length = 120 diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 36c686e7a91..36d7a4e9899 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -27,18 +27,18 @@ def _get_default_engine(path, allow_remote=False): engine = 'netcdf4' except ImportError: try: - import pydap + import pydap # flake8: noqa engine = 'pydap' except ImportError: raise ValueError('netCDF4 or pydap is required for accessing ' 'remote datasets via OPeNDAP') else: try: - import netCDF4 + import netCDF4 # flake8: noqa engine = 'netcdf4' except ImportError: # pragma: no cover try: - import scipy.io.netcdf + import scipy.io.netcdf # flake8: noqa engine = 'scipy' except ImportError: raise ValueError('cannot read or write netCDF files without ' diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py index b4d2dc7e689..28c329bb820 100644 --- a/xarray/backends/h5netcdf_.py +++ b/xarray/backends/h5netcdf_.py @@ -55,6 +55,7 @@ def _open_h5netcdf_group(filename, mode, group): class H5NetCDFStore(WritableCFDataStore, DataStorePickleMixin): """Store for reading and writing data via h5netcdf """ + def __init__(self, filename, mode='r', format=None, group=None, writer=None, autoclose=False): if format not in [None, 'NETCDF4']: diff --git a/xarray/backends/memory.py b/xarray/backends/memory.py index f79e92439fe..4cecf1e7771 100644 --- a/xarray/backends/memory.py +++ b/xarray/backends/memory.py @@ -18,6 +18,7 @@ class InMemoryDataStore(AbstractWritableDataStore): This store exists purely for internal testing purposes. """ + def __init__(self, variables=None, attributes=None, writer=None): self._variables = OrderedDict() if variables is None else variables self._attributes = OrderedDict() if attributes is None else attributes diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py index 59e195b1c9a..a19ae6c3c3c 100644 --- a/xarray/backends/netCDF4_.py +++ b/xarray/backends/netCDF4_.py @@ -218,6 +218,7 @@ class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin): This store supports NetCDF3, NetCDF4 and OpenDAP datasets. """ + def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None, autoclose=False): diff --git a/xarray/backends/netcdf3.py b/xarray/backends/netcdf3.py index 7194e06186f..7aa054bc119 100644 --- a/xarray/backends/netcdf3.py +++ b/xarray/backends/netcdf3.py @@ -6,7 +6,6 @@ import numpy as np from .. import conventions, Variable -from ..core import duck_array_ops from ..core.pycompat import basestring, unicode_type, OrderedDict diff --git a/xarray/backends/pydap_.py b/xarray/backends/pydap_.py index 044dcc21e5f..8aefa5f6be2 100644 --- a/xarray/backends/pydap_.py +++ b/xarray/backends/pydap_.py @@ -57,6 +57,7 @@ class PydapDataStore(AbstractDataStore): This store provides an alternative way to access OpenDAP datasets that may be useful if the netCDF4 library is not available. """ + def __init__(self, ds): """ Parameters diff --git a/xarray/backends/pynio_.py b/xarray/backends/pynio_.py index ffa936c0466..37f1db1f6a7 100644 --- a/xarray/backends/pynio_.py +++ b/xarray/backends/pynio_.py @@ -40,6 +40,7 @@ def __getitem__(self, key): class NioDataStore(AbstractDataStore, DataStorePickleMixin): """Store for accessing datasets via PyNIO """ + def __init__(self, filename, mode='r', autoclose=False): import Nio opener = functools.partial(Nio.open_file, filename, mode=mode) diff --git a/xarray/backends/rasterio_.py b/xarray/backends/rasterio_.py index 13f9c0a3de6..46707c52598 100644 --- a/xarray/backends/rasterio_.py +++ b/xarray/backends/rasterio_.py @@ -20,6 +20,7 @@ class RasterioArrayWrapper(BackendArray): """A wrapper around rasterio dataset objects""" + def __init__(self, rasterio_ds): self.rasterio_ds = rasterio_ds self._shape = (rasterio_ds.count, rasterio_ds.height, @@ -63,9 +64,9 @@ def __getitem__(self, key): elif is_scalar(k): # windowed operations will always return an array # we will have to squeeze it later - squeeze_axis.append(i+1) + squeeze_axis.append(i + 1) start = k - stop = k+1 + stop = k + 1 else: k = np.asarray(k) start = k[0] @@ -134,10 +135,10 @@ def open_rasterio(filename, chunks=None, cache=None, lock=None): dx, dy = riods.res[0], -riods.res[1] x0 = riods.bounds.right if dx < 0 else riods.bounds.left y0 = riods.bounds.top if dy < 0 else riods.bounds.bottom - coords['y'] = np.linspace(start=y0 + dy/2, num=ny, - stop=(y0 + (ny - 1) * dy) + dy/2) - coords['x'] = np.linspace(start=x0 + dx/2, num=nx, - stop=(x0 + (nx - 1) * dx) + dx/2) + coords['y'] = np.linspace(start=y0 + dy / 2, num=ny, + stop=(y0 + (ny - 1) * dy) + dy / 2) + coords['x'] = np.linspace(start=x0 + dx / 2, num=nx, + stop=(x0 + (nx - 1) * dx) + dx / 2) # Attributes attrs = {} diff --git a/xarray/conventions.py b/xarray/conventions.py index 5b951ff694b..5add8e0b529 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -358,6 +358,7 @@ class MaskedAndScaledArray(indexing.ExplicitlyIndexedNDArrayMixin): ---------- http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html """ + def __init__(self, array, fill_value=None, scale_factor=None, add_offset=None, dtype=float): """ @@ -400,6 +401,7 @@ class DecodedCFDatetimeArray(indexing.ExplicitlyIndexedNDArrayMixin): values, when accessed, are automatically converted into datetime objects using decode_cf_datetime. """ + def __init__(self, array, units, calendar=None): self.array = indexing.as_indexable(array) self.units = units @@ -440,6 +442,7 @@ class DecodedCFTimedeltaArray(indexing.ExplicitlyIndexedNDArrayMixin): values, when accessed, are automatically converted into timedelta objects using decode_cf_timedelta. """ + def __init__(self, array, units): self.array = indexing.as_indexable(array) self.units = units @@ -460,6 +463,7 @@ class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin): array('abc', dtype='|S3') """ + def __init__(self, array): """ Parameters @@ -505,6 +509,7 @@ class BytesToStringArray(indexing.ExplicitlyIndexedNDArrayMixin): array(['abc'], dtype=object) """ + def __init__(self, array, encoding='utf-8'): """ Parameters @@ -555,6 +560,7 @@ class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin): >>> NativeEndianArray(x)[:].dtype dtype('int16') """ + def __init__(self, array): self.array = indexing.as_indexable(array) @@ -583,6 +589,7 @@ class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin): >>> BoolTypeArray(x)[:].dtype dtype('bool') """ + def __init__(self, array): self.array = indexing.as_indexable(array) @@ -610,6 +617,7 @@ class UnsignedIntTypeArray(indexing.ExplicitlyIndexedNDArrayMixin): >>> UnsignedIntTypeArray(sb)[:] array([ 0, 1, 127, 128, 255], dtype=uint8) """ + def __init__(self, array): self.array = indexing.as_indexable(array) self.unsigned_dtype = np.dtype('u%s' % array.dtype.itemsize) diff --git a/xarray/core/accessors.py b/xarray/core/accessors.py index 59aca6b67f0..5052b555c73 100644 --- a/xarray/core/accessors.py +++ b/xarray/core/accessors.py @@ -80,6 +80,7 @@ class DatetimeAccessor(object): `dayofyear` may not be accurate. """ + def __init__(self, xarray_obj): if not is_datetime_like(xarray_obj.dtype): raise TypeError("'dt' accessor only available for " diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index 24b7da88c6b..876245322fa 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -363,7 +363,7 @@ def var_indexers(var, indexers): "Indexer has dimensions {0:s} that are different " "from that to be indexed along {1:s}. " "This will behave differently in the future.".format( - str(indexer.dims), dim), + str(indexer.dims), dim), FutureWarning, stacklevel=3) if dim in variables: diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 97c36c85a3c..ff0c1edb3df 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -275,7 +275,6 @@ def insert_result_variable(k, v): raise ValueError( 'variable %s not equal across datasets' % k) - # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables dim_lengths = [ds.dims.get(dim, 1) for ds in datasets] diff --git a/xarray/core/common.py b/xarray/core/common.py index 298630121ec..75272e12e0e 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -91,7 +91,7 @@ def __complex__(self): return complex(self.values) def __long__(self): - return long(self.values) + return long(self.values) # flake8: noqa def __array__(self, dtype=None): return np.asarray(self.values, dtype=dtype) @@ -609,7 +609,7 @@ def _resample_immediately(self, freq, dim, how, skipna, "calculations. Instead of passing 'dim' and " "'how=\"{how}\", instead consider using " ".resample({dim}=\"{freq}\").{how}() ".format( - dim=dim, freq=freq, how=how + dim=dim, freq=freq, how=how ), DeprecationWarning, stacklevel=3) if isinstance(dim, basestring): diff --git a/xarray/core/computation.py b/xarray/core/computation.py index e58b072f752..6b0ed34b5e1 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -2,11 +2,9 @@ NOT PUBLIC API. """ -import collections import functools import itertools import operator -import re import numpy as np @@ -36,6 +34,7 @@ class _UFuncSignature(object): output_core_dims : tuple[tuple] Core dimension names on each output variable. """ + def __init__(self, input_core_dims, output_core_dims=((),)): self.input_core_dims = tuple(tuple(a) for a in input_core_dims) self.output_core_dims = tuple(tuple(a) for a in output_core_dims) diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 473611806b5..8322df88cef 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -151,6 +151,7 @@ class DatasetCoordinates(AbstractCoordinates): dimensions and the values given by the corresponding xarray.Coordinate objects. """ + def __init__(self, dataset): self._data = dataset @@ -209,6 +210,7 @@ class DataArrayCoordinates(AbstractCoordinates): Essentially an OrderedDict with keys given by the array's dimensions and the values given by corresponding DataArray objects. """ + def __init__(self, dataarray): self._data = dataarray @@ -255,6 +257,7 @@ class LevelCoordinatesSource(object): Used for attribute style lookup with AttrAccessMixin. Not returned directly by any public methods. """ + def __init__(self, data_object): self._data = data_object @@ -269,6 +272,7 @@ def __iter__(self): class Indexes(Mapping, formatting.ReprMixin): """Ordered Mapping[str, pandas.Index] for xarray objects. """ + def __init__(self, variables, sizes): """Not for public consumption. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 9c99213dc23..b7e547a9643 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1217,7 +1217,7 @@ def chunk(self, chunks=None, name_prefix='xarray-', token=None, try: from dask.base import tokenize except ImportError: - import dask # raise the usual error if dask is entirely missing + import dask # raise the usual error if dask is entirely missing # flake8: noqa raise ImportError('xarray requires dask version 0.6 or newer') if isinstance(chunks, Number): diff --git a/xarray/core/extensions.py b/xarray/core/extensions.py index affb55b3298..0daa9ef7556 100644 --- a/xarray/core/extensions.py +++ b/xarray/core/extensions.py @@ -15,6 +15,7 @@ class AccessorRegistrationWarning(Warning): class _CachedAccessor(object): """Custom property-like object (descriptor) for caching accessors.""" + def __init__(self, name, accessor): self._name = name self._accessor = accessor diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index 6f91f73738c..83f8e2719d6 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -21,7 +21,6 @@ from .options import OPTIONS from .pycompat import PY2, unicode_type, bytes_type, dask_array_type -from .indexing import BasicIndexer def pretty_print(x, numchars): @@ -60,6 +59,7 @@ def ensure_valid_repr(string): class ReprMixin(object): """Mixin that defines __repr__ for a class that already has __unicode__.""" + def __repr__(self): return ensure_valid_repr(self.__unicode__()) diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index b299380e6c6..ed3a2d5197d 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -80,9 +80,9 @@ def _consolidate_slices(slices): for slice_ in slices: if not isinstance(slice_, slice): raise ValueError('list element is not a slice: %r' % slice_) - if (result and last_slice.stop == slice_.start - and _is_one_or_none(last_slice.step) - and _is_one_or_none(slice_.step)): + if (result and last_slice.stop == slice_.start and + _is_one_or_none(last_slice.step) and + _is_one_or_none(slice_.step)): last_slice = slice(last_slice.start, slice_.stop, slice_.step) result[-1] = last_slice else: @@ -172,6 +172,7 @@ class GroupBy(object): Dataset.groupby DataArray.groupby """ + def __init__(self, obj, group, squeeze=False, grouper=None, bins=None, cut_kwargs={}): """Create a GroupBy object @@ -441,6 +442,7 @@ def _maybe_reorder(xarray_obj, dim, positions): class DataArrayGroupBy(GroupBy, ImplementsArrayReduce): """GroupBy object specialized to grouping DataArray objects """ + def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata @@ -573,6 +575,7 @@ def reduce_array(ar): return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs) return self.apply(reduce_array, shortcut=shortcut) + ops.inject_reduce_methods(DataArrayGroupBy) ops.inject_binary_ops(DataArrayGroupBy) @@ -663,5 +666,6 @@ def assign(self, **kwargs): """ return self.apply(lambda ds: ds.assign(**kwargs)) + ops.inject_reduce_methods(DatasetGroupBy) ops.inject_binary_ops(DatasetGroupBy) diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index 3aea8ca6b8a..7e566367e4a 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -74,9 +74,9 @@ def _asarray_tuplesafe(values): def _is_nested_tuple(possible_tuple): - return (isinstance(possible_tuple, tuple) - and any(isinstance(value, (tuple, list, slice)) - for value in possible_tuple)) + return (isinstance(possible_tuple, tuple) and + any(isinstance(value, (tuple, list, slice)) + for value in possible_tuple)) def _index_method_kwargs(method, tolerance): @@ -143,7 +143,7 @@ def convert_label_indexer(index, label, index_name='', method=None, raise ValueError('Vectorized selection is not ' 'available along level variable: ' + k) indexer, new_index = index.get_loc_level( - tuple(label.values()), level=tuple(label.keys())) + tuple(label.values()), level=tuple(label.keys())) elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex): if _is_nested_tuple(label): @@ -288,6 +288,7 @@ class ExplicitIndexer(object): Do not instantiate BaseIndexer objects directly: instead, use one of the sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer. """ + def __init__(self, key): if type(self) is ExplicitIndexer: raise TypeError('cannot instantiate base ExplicitIndexer objects') @@ -319,6 +320,7 @@ class BasicIndexer(ExplicitIndexer): rules for basic indexing: each axis is independently sliced and axes indexed with an integer are dropped from the result. """ + def __init__(self, key): if not isinstance(key, tuple): raise TypeError('key must be a tuple: {!r}'.format(key)) @@ -345,6 +347,7 @@ class OuterIndexer(ExplicitIndexer): axes indexed with an integer are dropped from the result. This type of indexing works like MATLAB/Fortran. """ + def __init__(self, key): if not isinstance(key, tuple): raise TypeError('key must be a tuple: {!r}'.format(key)) @@ -381,6 +384,7 @@ class VectorizedIndexer(ExplicitIndexer): sliced axes are always moved to the end: https://github.com/numpy/numpy/pull/6256 """ + def __init__(self, key): if not isinstance(key, tuple): raise TypeError('key must be a tuple: {!r}'.format(key)) @@ -448,6 +452,7 @@ def __getitem__(self, key): class LazilyIndexedArray(ExplicitlyIndexedNDArrayMixin): """Wrap an array to make basic and orthogonal indexing lazy. """ + def __init__(self, array, key=None): """ Parameters diff --git a/xarray/core/nputils.py b/xarray/core/nputils.py index a721425b839..8ac04752e85 100644 --- a/xarray/core/nputils.py +++ b/xarray/core/nputils.py @@ -120,6 +120,7 @@ class NumpyVIndexAdapter(object): This is a pure Python implementation of (some of) the logic in this NumPy proposal: https://github.com/numpy/numpy/pull/6256 """ + def __init__(self, array): self._array = array diff --git a/xarray/core/ops.py b/xarray/core/ops.py index 2ed3f81d185..d02b8fa3108 100644 --- a/xarray/core/ops.py +++ b/xarray/core/ops.py @@ -262,7 +262,7 @@ def inject_reduce_methods(cls): def inject_cum_methods(cls): methods = ([(name, getattr(duck_array_ops, name), True) - for name in NAN_CUM_METHODS]) + for name in NAN_CUM_METHODS]) for name, f, include_skipna in methods: numeric_only = getattr(f, 'numeric_only', False) func = cls._reduce_method(f, include_skipna, numeric_only) diff --git a/xarray/core/options.py b/xarray/core/options.py index abae7427f9a..9f06f8dbbae 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -35,6 +35,7 @@ class set_options(object): >>> xr.set_options(display_width=80) """ + def __init__(self, **kwargs): invalid_options = {k for k in kwargs if k not in OPTIONS} if invalid_options: diff --git a/xarray/core/pycompat.py b/xarray/core/pycompat.py index a73a27f9643..4b83df9e14f 100644 --- a/xarray/core/pycompat.py +++ b/xarray/core/pycompat.py @@ -1,3 +1,5 @@ +# flake8: noqa + from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -126,6 +128,7 @@ class ExitStack(object): # in the list raise an exception """ + def __init__(self): self._exit_callbacks = deque() @@ -161,7 +164,7 @@ def push(self, exit): self._exit_callbacks.append(exit) else: self._push_cm_exit(exit, exit_method) - return exit # Allow use as a decorator + return exit # Allow use as a decorator def callback(self, callback, *args, **kwds): """Registers an arbitrary callback and arguments. @@ -174,7 +177,7 @@ def _exit_wrapper(exc_type, exc, tb): # setting __wrapped__ may still help with introspection _exit_wrapper.__wrapped__ = callback self.push(_exit_wrapper) - return callback # Allow use as a decorator + return callback # Allow use as a decorator def enter_context(self, cm): """Enters the supplied context manager diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 6ac668f9349..e398780447e 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -122,6 +122,7 @@ class DataArrayRolling(Rolling): + rolling.DataArrayRolling + ops.inject_bottleneck_rolling_methods """ + def __init__(self, obj, min_periods=None, center=False, **windows): super(DataArrayRolling, self).__init__(obj, min_periods=min_periods, center=center, **windows) @@ -228,6 +229,7 @@ def _reduce_method(cls, func): Methods to return a wrapped function for any function `func` for numpy methods. """ + def wrapped_func(self, **kwargs): return self.reduce(func, **kwargs) return wrapped_func @@ -238,6 +240,7 @@ def _bottleneck_reduce(cls, func): Methods to return a wrapped function for any function `func` for bottoleneck method, except for `median`. """ + def wrapped_func(self, **kwargs): from .dataarray import DataArray @@ -285,6 +288,7 @@ class DatasetRolling(Rolling): Dataset.rolling DataArray.rolling """ + def __init__(self, obj, min_periods=None, center=False, **windows): """ Moving window object for Dataset. @@ -355,6 +359,7 @@ def _reduce_method(cls, func): Return a wrapped function for injecting numpy and bottoleneck methods. see ops.inject_datasetrolling_methods """ + def wrapped_func(self, **kwargs): from .dataset import Dataset reduced = OrderedDict() @@ -367,5 +372,6 @@ def wrapped_func(self, **kwargs): return Dataset(reduced, coords=self.obj.coords) return wrapped_func + inject_bottleneck_rolling_methods(DataArrayRolling) inject_datasetrolling_methods(DatasetRolling) diff --git a/xarray/core/utils.py b/xarray/core/utils.py index ac70d7f7aea..923e7d1c806 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -271,6 +271,7 @@ class SingleSlotPickleMixin(object): """Mixin class to add the ability to pickle objects whose state is defined by a single __slots__ attribute. Only necessary under Python 2. """ + def __getstate__(self): return getattr(self, self.__slots__[0]) @@ -386,6 +387,7 @@ class OrderedSet(MutableSet): The API matches the builtin set, but it preserves insertion order of elements, like an OrderedDict. """ + def __init__(self, values=None): self._ordered_dict = OrderedDict() if values is not None: diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index a908de65362..8fccce988ab 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -52,11 +52,12 @@ def _ensure_plottable(*args): other_types = [datetime] for x in args: - if not (_valid_numpy_subdtype(np.array(x), numpy_types) - or _valid_other_type(np.array(x), other_types)): + if not (_valid_numpy_subdtype(np.array(x), numpy_types) or + _valid_other_type(np.array(x), other_types)): raise TypeError('Plotting requires coordinates to be numeric ' 'or dates.') + def _easy_facetgrid(darray, plotfunc, x, y, row=None, col=None, col_wrap=None, sharex=True, sharey=True, aspect=None, size=None, subplot_kws=None, **kwargs): diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index 169aa121e9e..569a5bd6b20 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -234,7 +234,7 @@ def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None, levels = np.linspace(vmin, vmax, levels) else: # N in MaxNLocator refers to bins, not ticks - ticker = mpl.ticker.MaxNLocator(levels-1) + ticker = mpl.ticker.MaxNLocator(levels - 1) levels = ticker.tick_values(vmin, vmax) vmin, vmax = levels[0], levels[-1] diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index 017e290558a..457a7db4e0c 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -2,7 +2,6 @@ from __future__ import division from __future__ import print_function from io import BytesIO -from threading import Lock import contextlib import itertools import os.path @@ -29,11 +28,9 @@ requires_scipy_or_netCDF4, requires_dask, requires_h5netcdf, requires_pynio, requires_pathlib, has_netCDF4, has_scipy, assert_allclose, flaky, network, requires_rasterio, - assert_identical, raises_regex) + assert_identical, raises_regex, mock) from .test_dataset import create_test_data -from xarray.tests import mock, assert_identical - try: import netCDF4 as nc4 except ImportError: @@ -455,7 +452,7 @@ def find_and_validate_array(obj): assert isinstance(obj, indexing.PandasIndexAdapter) else: raise TypeError('{} is wrapped by {}'.format( - type(obj.array), type(obj))) + type(obj.array), type(obj))) for k, v in ds.variables.items(): find_and_validate_array(v._data) @@ -639,7 +636,7 @@ def test_invalid_dataarray_names_raise(self): for name, e in zip([0, (4, 5), True, ''], [te, te, te, ve]): ds = Dataset({name: da}) with raises_regex(*e): - with self.roundtrip(ds) as actual: + with self.roundtrip(ds): pass def test_encoding_kwarg(self): @@ -1147,7 +1144,7 @@ def create_store(self): def test_array_attrs(self): ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]}) with raises_regex(ValueError, 'must be 1-dimensional'): - with self.roundtrip(ds) as roundtripped: + with self.roundtrip(ds): pass def test_roundtrip_example_1_netcdf_gz(self): @@ -1353,13 +1350,13 @@ def validate_open_mfdataset_autoclose(self, engine, nfiles=10): else 'netcdf4') # split into multiple sets of temp files for ii in original.x.values: - subds = original.isel(x=slice(ii, ii+1)) + subds = original.isel(x=slice(ii, ii + 1)) subds.to_netcdf(tmpfiles[ii], engine=writeengine) # check that calculation on opened datasets works properly ds = open_mfdataset(tmpfiles, engine=readengine, autoclose=True) - self.assertAllClose(ds.x.sum().values, (nfiles*(nfiles-1))/2) + self.assertAllClose(ds.x.sum().values, (nfiles * (nfiles - 1)) / 2) self.assertAllClose(ds.foo.sum().values, np.sum(randdata)) self.assertAllClose(ds.sum().foo.values, np.sum(randdata)) ds.close() @@ -1458,14 +1455,14 @@ def gen_datasets_with_common_coord_and_time(self): coords={ 't': (['t', ], t1), 'x': (['x', ], x) - }) + }) ds2 = Dataset(data_vars={self.var_name: (['t', 'x'], v2), self.coord_name: ('x', 2 * x)}, coords={ 't': (['t', ], t2), 'x': (['x', ], x) - }) + }) return ds1, ds2 @@ -1619,7 +1616,7 @@ def test_attrs_mfdataset(self): self.assertEqual(actual.test1, ds1.test1) # attributes from ds2 are not retained, e.g., with raises_regex(AttributeError, - 'no attribute'): + 'no attribute'): actual.test2 def test_preprocess_mfdataset(self): @@ -1737,7 +1734,7 @@ def test_dataarray_compute(self): # Test DataArray.compute() on dask backend. # The test for Dataset.compute() is already in DatasetIOTestCases; # however dask is the only tested backend which supports DataArrays - actual = DataArray([1,2]).chunk() + actual = DataArray([1, 2]).chunk() computed = actual.compute() self.assertFalse(actual._in_memory) self.assertTrue(computed._in_memory) @@ -1857,7 +1854,7 @@ def test_serialization(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 4, 3, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(5000, 80000, 1000, 2000.) with rasterio.open( @@ -1884,7 +1881,7 @@ def test_utm(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 4, 3, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(5000, 80000, 1000, 2000.) with rasterio.open( @@ -1901,9 +1898,9 @@ def test_utm(self): expected = DataArray(data, dims=('band', 'y', 'x'), coords={ 'band': [1, 2, 3], - 'y': -np.arange(ny) * 2000 + 80000 + dy/2, - 'x': np.arange(nx) * 1000 + 5000 + dx/2, - }) + 'y': -np.arange(ny) * 2000 + 80000 + dy / 2, + 'x': np.arange(nx) * 1000 + 5000 + dx / 2, + }) with xr.open_rasterio(tmp_file) as rioda: assert_allclose(rioda, expected) assert 'crs' in rioda.attrs @@ -1939,8 +1936,8 @@ def test_platecarree(self): expected = DataArray(data[np.newaxis, ...], dims=('band', 'y', 'x'), coords={'band': [1], - 'y': -np.arange(ny)*2 + 2 + dy/2, - 'x': np.arange(nx)*0.5 + 1 + dx/2, + 'y': -np.arange(ny) * 2 + 2 + dy / 2, + 'x': np.arange(nx) * 0.5 + 1 + dx / 2, }) with xr.open_rasterio(tmp_file) as rioda: assert_allclose(rioda, expected) @@ -1962,7 +1959,7 @@ def test_indexing(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 8, 10, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(1, 2, 0.5, 2.) with rasterio.open( @@ -1976,8 +1973,8 @@ def test_indexing(self): # ref expected = DataArray(data, dims=('band', 'y', 'x'), - coords={'x': (np.arange(nx)*0.5 + 1) + dx/2, - 'y': (-np.arange(ny)*2 + 2) + dy/2, + coords={'x': (np.arange(nx) * 0.5 + 1) + dx / 2, + 'y': (-np.arange(ny) * 2 + 2) + dy / 2, 'band': [1, 2, 3]}) with xr.open_rasterio(tmp_file, cache=False) as actual: @@ -2054,7 +2051,7 @@ def test_caching(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 8, 10, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(1, 2, 0.5, 2.) with rasterio.open( @@ -2068,8 +2065,8 @@ def test_caching(self): # ref expected = DataArray(data, dims=('band', 'y', 'x'), - coords={'x': (np.arange(nx)*0.5 + 1) + dx/2, - 'y': (-np.arange(ny)*2 + 2) + dy/2, + coords={'x': (np.arange(nx) * 0.5 + 1) + dx / 2, + 'y': (-np.arange(ny) * 2 + 2) + dy / 2, 'band': [1, 2, 3]}) # Cache is the default @@ -2098,7 +2095,7 @@ def test_chunks(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 8, 10, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(1, 2, 0.5, 2.) with rasterio.open( @@ -2119,8 +2116,8 @@ def test_chunks(self): # ref expected = DataArray(data, dims=('band', 'y', 'x'), - coords={'x': np.arange(nx)*0.5 + 1 + dx/2, - 'y': -np.arange(ny)*2 + 2 + dy/2, + coords={'x': np.arange(nx) * 0.5 + 1 + dx / 2, + 'y': -np.arange(ny) * 2 + 2 + dy / 2, 'band': [1, 2, 3]}) # do some arithmetic diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index 430a1a027cb..3b6b6555ff2 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -488,10 +488,10 @@ def add(a, b, keep_attrs): actual = add(a, b, keep_attrs=True) assert_identical(actual.attrs, a.attrs) - a = xr.Dataset({'x': ('x', [1, 2]), 'x': [0, 1]}) + a = xr.Dataset({'x': ('y', [1, 2]), 'x': [0, 1]}) a.attrs['attr'] = 'ds' a.x.attrs['attr'] = 'da' - b = xr.Dataset({'x': ('x', [1, 1]), 'x': [0, 1]}) + b = xr.Dataset({'x': ('y', [1, 1]), 'x': [0, 1]}) actual = add(a, b, keep_attrs=False) assert not actual.attrs @@ -666,8 +666,8 @@ def test_apply_dask_multiple_inputs(): import dask.array as da def covariance(x, y): - return ((x - x.mean(axis=-1, keepdims=True)) - * (y - y.mean(axis=-1, keepdims=True))).mean(axis=-1) + return ((x - x.mean(axis=-1, keepdims=True)) * + (y - y.mean(axis=-1, keepdims=True))).mean(axis=-1) rs = np.random.RandomState(42) array1 = da.from_array(rs.randn(4, 4), chunks=(2, 4)) diff --git a/xarray/tests/test_conventions.py b/xarray/tests/test_conventions.py index ca88ea661c7..d9b778282e5 100644 --- a/xarray/tests/test_conventions.py +++ b/xarray/tests/test_conventions.py @@ -8,8 +8,6 @@ import pytest import warnings -import pytest - from xarray import conventions, Variable, Dataset, open_dataset from xarray.core import utils, indexing from . import TestCase, requires_netCDF4, unittest, raises_regex, IndexerMaker @@ -229,7 +227,7 @@ def test_cf_datetime(self): ([0.5, 1.5], 'hours since 1900-01-01T00:00:00'), (0, 'milliseconds since 2000-01-01T00:00:00'), (0, 'microseconds since 2000-01-01T00:00:00'), - ]: + ]: for calendar in ['standard', 'gregorian', 'proleptic_gregorian']: expected = _ensure_naive_tz(nc4.num2date(num_dates, units, calendar)) print(num_dates, units, calendar) @@ -473,7 +471,7 @@ def test_cf_datetime_nan(self): ['NaT', '2000-01-01T00:00:00Z']), ([np.nan, 0, 1], 'days since 2000-01-01', ['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']), - ]: + ]: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'All-NaN') actual = conventions.decode_cf_datetime(num_dates, units) @@ -604,7 +602,7 @@ def test_incompatible_attributes(self): Variable(['t'], pd.to_timedelta(['1 day']), {'units': 'foobar'}), Variable(['t'], [0, 1, 2], {'add_offset': 0}, {'add_offset': 2}), Variable(['t'], [0, 1, 2], {'_FillValue': 0}, {'_FillValue': 2}), - ] + ] for var in invalid_vars: with pytest.raises(ValueError): conventions.encode_cf_variable(var) @@ -685,6 +683,7 @@ class NullWrapper(utils.NDArrayMixin): Just for testing, this lets us create a numpy array directly but make it look like its not in memory yet. """ + def __init__(self, array): self.array = array diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index 4e9b0250a6a..ff343ebe21d 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -653,7 +653,7 @@ def test_to_dask_dataframe_2D_set_index(self): def test_to_dask_dataframe_coordinates(self): # Test if coordinate is also a dask array x = da.from_array(np.random.randn(10), chunks=4) - t = da.from_array(np.arange(10)*2, chunks=4) + t = da.from_array(np.arange(10) * 2, chunks=4) ds = Dataset(OrderedDict([('a', ('t', x)), ('t', ('t', t))])) diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index c86f706f2ce..e6882493430 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -203,11 +203,11 @@ def test_sizes(self): def test_encoding(self): expected = {'foo': 'bar'} self.dv.encoding['foo'] = 'bar' - assert expected, self.d == encoding + assert expected == self.dv.encoding expected = {'baz': 0} self.dv.encoding = expected - assert expected, self.d == encoding + assert expected == self.dv.encoding self.assertIsNot(expected, self.dv.encoding) def test_constructor(self): @@ -620,7 +620,7 @@ def test_isel_fancy(self): # make sure we're raising errors in the right places with raises_regex(IndexError, - 'Dimensions of indexers mismatch'): + 'Dimensions of indexers mismatch'): da.isel(y=(('points', ), [1, 2]), x=(('points', ), [1, 2, 3])) # tests using index or DataArray as indexers @@ -782,20 +782,20 @@ def test_isel_points(self): # make sure we're raising errors in the right places with raises_regex(ValueError, - 'All indexers must be the same length'): + 'All indexers must be the same length'): da.isel_points(y=[1, 2], x=[1, 2, 3]) with raises_regex(ValueError, - 'dimension bad_key does not exist'): + 'dimension bad_key does not exist'): da.isel_points(bad_key=[1, 2]) with raises_regex(TypeError, 'Indexers must be integers'): da.isel_points(y=[1.5, 2.2]) with raises_regex(TypeError, 'Indexers must be integers'): da.isel_points(x=[1, 2, 3], y=slice(3)) with raises_regex(ValueError, - 'Indexers must be 1 dimensional'): + 'Indexers must be 1 dimensional'): da.isel_points(y=1, x=2) with raises_regex(ValueError, - 'Existing dimension names are not'): + 'Existing dimension names are not'): da.isel_points(y=[1, 2], x=[1, 2], dim='x') # using non string dims @@ -2080,7 +2080,7 @@ def test_resample_drop_nondim_coords(self): ys = np.arange(3) times = pd.date_range('2000-01-01', freq='6H', periods=5) data = np.tile(np.arange(5), (6, 3, 1)) - xx, yy = np.meshgrid(xs*5, ys*2.5) + xx, yy = np.meshgrid(xs * 5, ys * 2.5) tt = np.arange(len(times), dtype=int) array = DataArray(data, {'time': times, 'x': xs, 'y': ys}, @@ -2256,7 +2256,7 @@ def test_upsample_interpolate(self): expected_times = times.to_series().resample('1H').asfreq().index # Split the times into equal sub-intervals to simulate the 6 hour # to 1 hour up-sampling - new_times_idx = np.linspace(0, len(times)-1, len(times)*5) + new_times_idx = np.linspace(0, len(times) - 1, len(times) * 5) for kind in ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic']: actual = array.resample(time='1H').interpolate(kind) @@ -2295,7 +2295,7 @@ def test_upsample_interpolate_dask(self): ('x', 'y', 'time')) with raises_regex(TypeError, - "dask arrays are not yet supported"): + "dask arrays are not yet supported"): array.resample(time='1H').interpolate('linear') def test_align(self): diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index 192624b64f3..575d6aed7e3 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -318,7 +318,7 @@ def test_constructor_pandas_sequence(self): ds = self.make_example_math_dataset() pandas_objs = OrderedDict( - (var_name, ds[var_name].to_pandas()) for var_name in ['foo','bar'] + (var_name, ds[var_name].to_pandas()) for var_name in ['foo', 'bar'] ) ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs) del ds_based_on_pandas['x'] @@ -335,17 +335,16 @@ def test_constructor_pandas_single(self): das = [ DataArray(np.random.rand(4), dims=['a']), # series - DataArray(np.random.rand(4,3), dims=['a', 'b']), # df - DataArray(np.random.rand(4,3,2), dims=['a','b','c']), # panel - ] + DataArray(np.random.rand(4, 3), dims=['a', 'b']), # df + DataArray(np.random.rand(4, 3, 2), dims=['a', 'b', 'c']), # panel + ] - for da in das: - pandas_obj = da.to_pandas() + for a in das: + pandas_obj = a.to_pandas() ds_based_on_pandas = Dataset(pandas_obj) for dim in ds_based_on_pandas.data_vars: self.assertArrayEqual(ds_based_on_pandas[dim], pandas_obj[dim]) - def test_constructor_compat(self): data = OrderedDict([('x', DataArray(0, coords={'y': 1})), ('y', ('z', [1, 1, 1]))]) @@ -930,7 +929,7 @@ def test_isel_fancy(self): dim1=(('points', ), pdim1))) # make sure we're raising errors in the right places with raises_regex(IndexError, - 'Dimensions of indexers mismatch'): + 'Dimensions of indexers mismatch'): data.isel(dim1=(('points', ), [1, 2]), dim2=(('points', ), [1, 2, 3])) with raises_regex(TypeError, 'cannot use a Dataset'): @@ -1199,8 +1198,8 @@ def test_sel_dataarray_mindex(self): mds.sel(one=['a', 'b']) with raises_regex(ValueError, 'Vectorized selection is ' - 'not available along MultiIndex variable:' - ' x'): + 'not available along MultiIndex variable:' + ' x'): mds.sel(x=xr.DataArray([np.array(midx[:2]), np.array(midx[-2:])], dims=['a', 'b'])) @@ -1252,20 +1251,20 @@ def test_isel_points(self): # make sure we're raising errors in the right places with raises_regex(ValueError, - 'All indexers must be the same length'): + 'All indexers must be the same length'): data.isel_points(dim1=[1, 2], dim2=[1, 2, 3]) with raises_regex(ValueError, - 'dimension bad_key does not exist'): + 'dimension bad_key does not exist'): data.isel_points(bad_key=[1, 2]) with raises_regex(TypeError, 'Indexers must be integers'): data.isel_points(dim1=[1.5, 2.2]) with raises_regex(TypeError, 'Indexers must be integers'): data.isel_points(dim1=[1, 2, 3], dim2=slice(3)) with raises_regex(ValueError, - 'Indexers must be 1 dimensional'): + 'Indexers must be 1 dimensional'): data.isel_points(dim1=1, dim2=2) with raises_regex(ValueError, - 'Existing dimension names are not valid'): + 'Existing dimension names are not valid'): data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2') # test to be sure we keep around variables that were not indexed @@ -1687,7 +1686,7 @@ def test_align_nocopy(self): y = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 2])])}) expected_x2 = x expected_y2 = Dataset({'foo': DataArray([1, 2, np.nan], - coords=[('x', [1, 2, 3])])}) + coords=[('x', [1, 2, 3])])}) x2, y2 = align(x, y, copy=False, join='outer') self.assertDatasetIdentical(expected_x2, x2) @@ -1701,10 +1700,10 @@ def test_align_nocopy(self): def test_align_indexes(self): x = Dataset({'foo': DataArray([1, 2, 3], dims='x', - coords=[('x', [1, 2, 3])])}) + coords=[('x', [1, 2, 3])])}) x2, = align(x, indexes={'x': [2, 3, 1]}) expected_x2 = Dataset({'foo': DataArray([2, 3, 1], dims='x', - coords={'x': [2, 3, 1]})}) + coords={'x': [2, 3, 1]})}) self.assertDatasetIdentical(expected_x2, x2) def test_align_non_unique(self): @@ -1722,7 +1721,7 @@ def test_broadcast(self): expected = Dataset({'foo': (('x', 'y'), [[0, 0]]), 'bar': (('x', 'y'), [[1, 1]]), 'baz': (('x', 'y'), [[2, 3]])}, - {'c': ('x', [4])}) + {'c': ('x', [4])}) actual, = broadcast(ds) self.assertDatasetIdentical(expected, actual) @@ -1755,7 +1754,7 @@ def test_broadcast_nocopy(self): def test_broadcast_exclude(self): x = Dataset({ - 'foo': DataArray([[1, 2],[3, 4]], dims=['x', 'y'], coords={'x': [1, 2], 'y': [3, 4]}), + 'foo': DataArray([[1, 2], [3, 4]], dims=['x', 'y'], coords={'x': [1, 2], 'y': [3, 4]}), 'bar': DataArray(5), }) y = Dataset({ @@ -2251,7 +2250,7 @@ def test_setitem(self): self.assertDatasetIdentical(data1, data2) # can't assign an ND array without dimensions with raises_regex(ValueError, - 'without explicit dimension names'): + 'without explicit dimension names'): data2['C'] = var.values.reshape(2, 4) # but can assign a 1D array data1['C'] = var.values @@ -2458,7 +2457,9 @@ def test_groupby(self): self.assertEqual(actual[0], expected[0]) self.assertDatasetEqual(actual[1], expected[1]) - identity = lambda x: x + def identity(x): + return x + for k in ['x', 'c', 'y']: actual = data.groupby(k, squeeze=False).apply(identity) self.assertDatasetEqual(data, actual) @@ -2514,7 +2515,8 @@ def test_groupby_reduce(self): self.assertDatasetAllClose(expected, actual) def test_groupby_math(self): - reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time') + def reorder_dims(x): + return x.transpose('dim1', 'dim2', 'dim3', 'time') ds = create_test_data() ds['dim1'] = ds['dim1'] @@ -2585,13 +2587,14 @@ def test_groupby_order(self): ds = Dataset() for vn in ['a', 'b', 'c']: ds[vn] = DataArray(np.arange(10), dims=['t']) - all_vars_ref = list(ds.variables.keys()) data_vars_ref = list(ds.data_vars.keys()) ds = ds.groupby('t').mean() - all_vars = list(ds.variables.keys()) data_vars = list(ds.data_vars.keys()) self.assertEqual(data_vars, data_vars_ref) + # coords are now at the end of the list, so the test below fails + # all_vars = list(ds.variables.keys()) + # all_vars_ref = list(ds.variables.keys()) # self.assertEqual(all_vars, all_vars_ref) def test_resample_and_first(self): @@ -2662,7 +2665,7 @@ def test_resample_drop_nondim_coords(self): ys = np.arange(3) times = pd.date_range('2000-01-01', freq='6H', periods=5) data = np.tile(np.arange(5), (6, 3, 1)) - xx, yy = np.meshgrid(xs*5, ys*2.5) + xx, yy = np.meshgrid(xs * 5, ys * 2.5) tt = np.arange(len(times), dtype=int) array = DataArray(data, {'time': times, 'x': xs, 'y': ys}, @@ -2764,7 +2767,7 @@ def test_to_and_from_dataframe(self): self.assertDatasetIdentical(expected, actual) # GH697 - df = pd.DataFrame({'A' : []}) + df = pd.DataFrame({'A': []}) actual = Dataset.from_dataframe(df) expected = Dataset({'A': DataArray([], dims=('index',))}, {'index': []}) self.assertDatasetIdentical(expected, actual) @@ -2875,7 +2878,7 @@ def test_to_and_from_dict(self): 't': {'data': t, 'dims': 't'}, 'b': {'dims': 't', 'data': y}} with raises_regex(ValueError, "cannot convert dict " - "without the key 'dims'"): + "without the key 'dims'"): Dataset.from_dict(d) def test_to_and_from_dict_with_time_dim(self): @@ -3201,17 +3204,17 @@ def test_where_drop(self): # 2d with odd coordinates ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])}, - coords={'x': [4, 3], 'y': [1, 2], - 'z' : (['x','y'], [[np.e, np.pi], [np.pi*np.e, np.pi*3]])}) + coords={'x': [4, 3], 'y': [1, 2], + 'z': (['x', 'y'], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]])}) expected = Dataset({'a': (('x', 'y'), [[3]])}, - coords={'x': [3], 'y': [2], - 'z' : (['x','y'], [[np.pi*3]])}) + coords={'x': [3], 'y': [2], + 'z': (['x', 'y'], [[np.pi * 3]])}) actual = ds.where(ds > 2, drop=True) self.assertDatasetIdentical(expected, actual) # 2d multiple variables - ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]]), 'b': (('x','y'), [[4, 5], [6, 7]])}) - expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]]), 'b': (('x', 'y'), [[4, 5], [6,7]])}) + ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]]), 'b': (('x', 'y'), [[4, 5], [6, 7]])}) + expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]]), 'b': (('x', 'y'), [[4, 5], [6, 7]])}) actual = ds.where(ds > 0, drop=True) self.assertDatasetIdentical(expected, actual) @@ -3265,17 +3268,17 @@ def test_reduce(self): def test_reduce_bad_dim(self): data = create_test_data() with raises_regex(ValueError, 'Dataset does not contain'): - ds = data.mean(dim='bad_dim') + data.mean(dim='bad_dim') def test_reduce_cumsum_test_dims(self): data = create_test_data() for cumfunc in ['cumsum', 'cumprod']: with raises_regex(ValueError, "must supply either single 'dim' or 'axis'"): - ds = getattr(data, cumfunc)() + getattr(data, cumfunc)() with raises_regex(ValueError, "must supply either single 'dim' or 'axis'"): - ds = getattr(data, cumfunc)(dim=['dim1', 'dim2']) + getattr(data, cumfunc)(dim=['dim1', 'dim2']) with raises_regex(ValueError, 'Dataset does not contain'): - ds = getattr(data, cumfunc)(dim='bad_dim') + getattr(data, cumfunc)(dim='bad_dim') # ensure dimensions are correct for reduct, expected in [('dim1', ['dim1', 'dim2', 'dim3', 'time']), @@ -3440,7 +3443,7 @@ def scale(x, multiple=1): self.assertDataArrayIdentical(actual['numbers'], data['numbers']) actual = data.apply(np.asarray) - expected = data.drop('time') # time is not used on a data var + expected = data.drop('time') # time is not used on a data var self.assertDatasetEqual(expected, actual) def make_example_math_dataset(self): @@ -3794,7 +3797,7 @@ def test_full_like(self): # For more thorough tests, see test_variable.py # Note: testing data_vars with mismatched dtypes ds = Dataset({ - 'd1': DataArray([1,2,3], dims=['x'], coords={'x': [10, 20, 30]}), + 'd1': DataArray([1, 2, 3], dims=['x'], coords={'x': [10, 20, 30]}), 'd2': DataArray([1.1, 2.2, 3.3], dims=['y']) }, attrs={'foo': 'bar'}) actual = full_like(ds, 2) @@ -3995,12 +3998,14 @@ def test_dir_expected_attrs(data_set): result = dir(data_set) assert set(result) >= some_expected_attrs + def test_dir_non_string(data_set): # add a numbered key to ensure this doesn't break dir data_set[5] = 'foo' result = dir(data_set) assert not (5 in result) + def test_dir_unicode(data_set): data_set[u'unicode'] = 'uni' result = dir(data_set) diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py index 9999ed9a669..7aa68e0383b 100644 --- a/xarray/tests/test_distributed.py +++ b/xarray/tests/test_distributed.py @@ -1,12 +1,12 @@ import pytest import xarray as xr -from xarray.core.pycompat import suppress distributed = pytest.importorskip('distributed') da = pytest.importorskip('dask.array') import dask -from distributed.utils_test import cluster, loop, gen_cluster -from distributed.client import futures_of, wait +from distributed.utils_test import cluster, gen_cluster +from distributed.utils_test import loop # flake8: noqa +from distributed.client import futures_of from xarray.tests.test_backends import create_tmp_file, ON_WINDOWS from xarray.tests.test_dataset import create_test_data diff --git a/xarray/tests/test_extensions.py b/xarray/tests/test_extensions.py index 27a838bc1ac..c3f89a5f533 100644 --- a/xarray/tests/test_extensions.py +++ b/xarray/tests/test_extensions.py @@ -15,6 +15,7 @@ @xr.register_dataarray_accessor('example_accessor') class ExampleAccessor(object): """For the pickling tests below.""" + def __init__(self, xarray_obj): self.obj = xarray_obj @@ -26,6 +27,7 @@ def test_register(self): @xr.register_dataarray_accessor('demo') class DemoAccessor(object): """Demo accessor.""" + def __init__(self, xarray_obj): self._obj = xarray_obj diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index 2c2c9bd614d..f80326c3e84 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -25,7 +25,7 @@ def test_get_indexer_at_least_n_items(self): ((2, 5, 1,), (slice(2), slice(None), slice(None))), ((2, 5, 3,), (0, slice(4), slice(None))), ((2, 3, 3,), (slice(2), slice(None), slice(None))), - ] + ] for shape, expected in cases: actual = formatting._get_indexer_at_least_n_items(shape, 10) self.assertEqual(expected, actual) diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index 7245685e2c7..9b28d3256dd 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -13,8 +13,8 @@ def test_consolidate_slices(): assert _consolidate_slices([slice(3), slice(3, 5)]) == [slice(5)] assert _consolidate_slices([slice(2, 3), slice(3, 6)]) == [slice(2, 6)] - assert (_consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) - == [slice(2, 6, 1)]) + assert (_consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) == + [slice(2, 6, 1)]) slices = [slice(2, 3), slice(5, 6)] assert _consolidate_slices(slices) == slices @@ -51,15 +51,15 @@ def test_groupby_da_datetime(): # test groupby with a DataArray of dtype datetime for GH1132 # create test data times = pd.date_range('2000-01-01', periods=4) - foo = xr.DataArray([1,2,3,4], coords=dict(time=times), dims='time') + foo = xr.DataArray([1, 2, 3, 4], coords=dict(time=times), dims='time') # create test index dd = times.to_pydatetime() reference_dates = [dd[0], dd[2]] - labels = reference_dates[0:1]*2 + reference_dates[1:2]*2 + labels = reference_dates[0:1] * 2 + reference_dates[1:2] * 2 ind = xr.DataArray(labels, coords=dict(time=times), dims='time', name='reference_date') g = foo.groupby(ind) actual = g.sum(dim='time') - expected = xr.DataArray([3,7], coords=dict(reference_date=reference_dates), dims='reference_date') + expected = xr.DataArray([3, 7], coords=dict(reference_date=reference_dates), dims='reference_date') assert actual.equals(expected) diff --git a/xarray/tests/test_merge.py b/xarray/tests/test_merge.py index 532b9ee4ff0..409ad86c1e9 100644 --- a/xarray/tests/test_merge.py +++ b/xarray/tests/test_merge.py @@ -76,9 +76,9 @@ def test_merge_no_conflicts_single_var(self): ds2 = xr.Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}) expected = xr.Dataset({'a': ('x', [1, 2, 3]), 'x': [0, 1, 2]}) assert expected.identical(xr.merge([ds1, ds2], - compat='no_conflicts')) + compat='no_conflicts')) assert expected.identical(xr.merge([ds2, ds1], - compat='no_conflicts')) + compat='no_conflicts')) assert ds1.identical(xr.merge([ds1, ds2], compat='no_conflicts', join='left')) diff --git a/xarray/tests/test_plot.py b/xarray/tests/test_plot.py index e2473d6389e..d3a28281f50 100644 --- a/xarray/tests/test_plot.py +++ b/xarray/tests/test_plot.py @@ -724,7 +724,7 @@ def test_no_labels(self): def test_colorbar_kwargs(self): # replace label self.darray.name = 'testvar' - self.plotmethod(add_colorbar=True, cbar_kwargs={'label':'MyLabel'}) + self.plotmethod(add_colorbar=True, cbar_kwargs={'label': 'MyLabel'}) alltxt = text_in_fig() self.assertIn('MyLabel', alltxt) self.assertNotIn('testvar', alltxt) @@ -736,7 +736,7 @@ def test_colorbar_kwargs(self): # change cbar ax fig, (ax, cax) = plt.subplots(1, 2) self.plotmethod(ax=ax, cbar_ax=cax, add_colorbar=True, - cbar_kwargs={'label':'MyBar'}) + cbar_kwargs={'label': 'MyBar'}) self.assertTrue(ax.has_data()) self.assertTrue(cax.has_data()) alltxt = text_in_fig() @@ -745,7 +745,7 @@ def test_colorbar_kwargs(self): # note that there are two ways to achieve this fig, (ax, cax) = plt.subplots(1, 2) self.plotmethod(ax=ax, add_colorbar=True, - cbar_kwargs={'label':'MyBar', 'cax':cax}) + cbar_kwargs={'label': 'MyBar', 'cax': cax}) self.assertTrue(ax.has_data()) self.assertTrue(cax.has_data()) alltxt = text_in_fig() @@ -756,7 +756,7 @@ def test_colorbar_kwargs(self): self.assertNotIn('testvar', text_in_fig()) # check that error is raised pytest.raises(ValueError, self.plotmethod, - add_colorbar=False, cbar_kwargs= {'label':'label'}) + add_colorbar=False, cbar_kwargs={'label': 'label'}) def test_verbose_facetgrid(self): a = easy_array((10, 15, 3)) diff --git a/xarray/tests/test_tutorial.py b/xarray/tests/test_tutorial.py index 56bdccedcfe..05a72153025 100644 --- a/xarray/tests/test_tutorial.py +++ b/xarray/tests/test_tutorial.py @@ -3,7 +3,6 @@ from __future__ import print_function import os -import pytest from xarray import tutorial, DataArray from xarray.core.pycompat import suppress diff --git a/xarray/tests/test_utils.py b/xarray/tests/test_utils.py index c1df1da8c86..1552091452b 100644 --- a/xarray/tests/test_utils.py +++ b/xarray/tests/test_utils.py @@ -1,7 +1,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import pickle import pytest import numpy as np @@ -32,7 +31,7 @@ def test(self): (pd.Index(x, dtype=object), x.astype(object)), (pd.Index(td), td), (pd.Index(td, dtype=object), td.astype(object)), - ]: + ]: actual = utils.safe_cast_to_index(array) self.assertArrayEqual(expected, actual) self.assertEqual(expected.dtype, actual.dtype) diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index bdaee5edb7a..fcfcadd96e0 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -218,11 +218,11 @@ def test_0d_time_data(self): def test_datetime64_conversion(self): times = pd.date_range('2000-01-01', periods=3) for values, preserve_source in [ - (times, True), - (times.values, True), - (times.values.astype('datetime64[s]'), False), - (times.to_pydatetime(), False), - ]: + (times, True), + (times.values, True), + (times.values.astype('datetime64[s]'), False), + (times.to_pydatetime(), False), + ]: v = self.cls(['t'], values) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertArrayEqual(v.values, times.values) @@ -233,11 +233,11 @@ def test_datetime64_conversion(self): def test_timedelta64_conversion(self): times = pd.timedelta_range(start=0, periods=3) for values, preserve_source in [ - (times, True), - (times.values, True), - (times.values.astype('timedelta64[s]'), False), - (times.to_pytimedelta(), False), - ]: + (times, True), + (times.values, True), + (times.values.astype('timedelta64[s]'), False), + (times.to_pytimedelta(), False), + ]: v = self.cls(['t'], values) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertArrayEqual(v.values, times.values) @@ -732,10 +732,10 @@ def test_numpy_same_methods(self): def test_datetime64_conversion_scalar(self): expected = np.datetime64('2000-01-01', 'ns') for values in [ - np.datetime64('2000-01-01'), - pd.Timestamp('2000-01-01T00'), - datetime(2000, 1, 1), - ]: + np.datetime64('2000-01-01'), + pd.Timestamp('2000-01-01T00'), + datetime(2000, 1, 1), + ]: v = Variable([], values) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertEqual(v.values, expected) @@ -744,10 +744,10 @@ def test_datetime64_conversion_scalar(self): def test_timedelta64_conversion_scalar(self): expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns') for values in [ - np.timedelta64(1, 'D'), - pd.Timedelta('1 day'), - timedelta(days=1), - ]: + np.timedelta64(1, 'D'), + pd.Timedelta('1 day'), + timedelta(days=1), + ]: v = Variable([], values) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertEqual(v.values, expected) @@ -1141,7 +1141,7 @@ def test_transpose_0d(self): np.timedelta64(1, 'h'), None, object(), - ]: + ]: variable = Variable([], value) actual = variable.transpose() assert actual.identical(variable) @@ -1661,7 +1661,7 @@ def test_datetime(self): def test_full_like(self): # For more thorough tests, see test_variable.py - orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], + orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) expect = orig.copy(deep=True) @@ -1703,7 +1703,7 @@ def check(actual, expect_dtype, expect_values): assert not isinstance(v, np.ndarray) def test_zeros_like(self): - orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], + orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) self.assertVariableIdentical(zeros_like(orig), full_like(orig, 0)) @@ -1711,7 +1711,7 @@ def test_zeros_like(self): full_like(orig, 0, dtype=int)) def test_ones_like(self): - orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], + orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) self.assertVariableIdentical(ones_like(orig), full_like(orig, 1)) @@ -1744,6 +1744,7 @@ def test_raise_no_warning_for_nan_in_binary_ops(): class TestBackendIndexing(TestCase): """ Make sure all the array wrappers can be indexed. """ + def setUp(self): self.d = np.random.random((10, 3)).astype(np.float64) @@ -1763,7 +1764,7 @@ def test_NumpyIndexingAdapter(self): # could not doubly wrapping with raises_regex(TypeError, 'NumpyIndexingAdapter only wraps '): v = Variable(dims=('x', 'y'), data=NumpyIndexingAdapter( - NumpyIndexingAdapter(self.d))) + NumpyIndexingAdapter(self.d))) def test_LazilyIndexedArray(self): v = Variable(dims=('x', 'y'), data=LazilyIndexedArray(self.d)) diff --git a/xarray/ufuncs.py b/xarray/ufuncs.py index 557e741c937..1990ac5b765 100644 --- a/xarray/ufuncs.py +++ b/xarray/ufuncs.py @@ -41,6 +41,7 @@ def _dispatch_priority(obj): class _UFuncDispatcher(object): """Wrapper for dispatching ufuncs.""" + def __init__(self, name): self._name = name