Skip to content

Auto flake #1741

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ install:
script:
- python -OO -c "import xarray"
- py.test xarray --cov=xarray --cov-config ci/.coveragerc --cov-report term-missing --verbose $EXTRA_FLAGS
- git diff upstream/master **/*py | flake8 --diff --exit-zero || true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instead of replacing this, I'd actually enforce that the diff must be flake8-clean:

git diff master **/*py | flake8 --diff

And then add a second check that covers all files but ignores some errors in existing code:

flake8 --ignore E20,E231,E241,E26,E4,E721,E731,E741 xarray

Which means that the config section can (needs to be) be left as-is.

- flake8 -j auto xarray
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

question: do we want to have travis fail when flake8 finds something awry?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IMO yes; it's the only way to keep flake8 passing.


after_success:
- coveralls
23 changes: 22 additions & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,25 @@ universal = 1
python_files=test_*.py

[flake8]
max-line-length=79
# References:
# https://flake8.readthedocs.io/en/latest/user/configuration.html
# https://flake8.readthedocs.io/en/latest/user/error-codes.html

# Note: there cannot be spaces after comma's here
exclude = __init__.py
ignore =
# Extra space in brackets
E20,
# Multiple spaces around ","
E231,E241,
# Comments
E26,
# Import formatting
E4,
# Comparing types instead of isinstance
E721,
# Assigning lambda expression
E731,
# Ambiguous variable names
E741
max-line-length = 120
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's leave this as the default (79 per PEP8)

6 changes: 3 additions & 3 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,18 @@ def _get_default_engine(path, allow_remote=False):
engine = 'netcdf4'
except ImportError:
try:
import pydap
import pydap # flake8: noqa
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
else:
try:
import netCDF4
import netCDF4 # flake8: noqa
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf
import scipy.io.netcdf # flake8: noqa
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/h5netcdf_.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def _open_h5netcdf_group(filename, mode, group):
class H5NetCDFStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via h5netcdf
"""

def __init__(self, filename, mode='r', format=None, group=None,
writer=None, autoclose=False):
if format not in [None, 'NETCDF4']:
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class InMemoryDataStore(AbstractWritableDataStore):

This store exists purely for internal testing purposes.
"""

def __init__(self, variables=None, attributes=None, writer=None):
self._variables = OrderedDict() if variables is None else variables
self._attributes = OrderedDict() if attributes is None else attributes
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin):

This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""

def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None,
autoclose=False):

Expand Down
1 change: 0 additions & 1 deletion xarray/backends/netcdf3.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import numpy as np

from .. import conventions, Variable
from ..core import duck_array_ops
from ..core.pycompat import basestring, unicode_type, OrderedDict


Expand Down
1 change: 1 addition & 0 deletions xarray/backends/pydap_.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ class PydapDataStore(AbstractDataStore):
This store provides an alternative way to access OpenDAP datasets that may
be useful if the netCDF4 library is not available.
"""

def __init__(self, ds):
"""
Parameters
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/pynio_.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def __getitem__(self, key):
class NioDataStore(AbstractDataStore, DataStorePickleMixin):
"""Store for accessing datasets via PyNIO
"""

def __init__(self, filename, mode='r', autoclose=False):
import Nio
opener = functools.partial(Nio.open_file, filename, mode=mode)
Expand Down
13 changes: 7 additions & 6 deletions xarray/backends/rasterio_.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""

def __init__(self, rasterio_ds):
self.rasterio_ds = rasterio_ds
self._shape = (rasterio_ds.count, rasterio_ds.height,
Expand Down Expand Up @@ -63,9 +64,9 @@ def __getitem__(self, key):
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(i+1)
squeeze_axis.append(i + 1)
start = k
stop = k+1
stop = k + 1
else:
k = np.asarray(k)
start = k[0]
Expand Down Expand Up @@ -134,10 +135,10 @@ def open_rasterio(filename, chunks=None, cache=None, lock=None):
dx, dy = riods.res[0], -riods.res[1]
x0 = riods.bounds.right if dx < 0 else riods.bounds.left
y0 = riods.bounds.top if dy < 0 else riods.bounds.bottom
coords['y'] = np.linspace(start=y0 + dy/2, num=ny,
stop=(y0 + (ny - 1) * dy) + dy/2)
coords['x'] = np.linspace(start=x0 + dx/2, num=nx,
stop=(x0 + (nx - 1) * dx) + dx/2)
coords['y'] = np.linspace(start=y0 + dy / 2, num=ny,
stop=(y0 + (ny - 1) * dy) + dy / 2)
coords['x'] = np.linspace(start=x0 + dx / 2, num=nx,
stop=(x0 + (nx - 1) * dx) + dx / 2)

# Attributes
attrs = {}
Expand Down
8 changes: 8 additions & 0 deletions xarray/conventions.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,6 +358,7 @@ class MaskedAndScaledArray(indexing.ExplicitlyIndexedNDArrayMixin):
----------
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
"""

def __init__(self, array, fill_value=None, scale_factor=None,
add_offset=None, dtype=float):
"""
Expand Down Expand Up @@ -400,6 +401,7 @@ class DecodedCFDatetimeArray(indexing.ExplicitlyIndexedNDArrayMixin):
values, when accessed, are automatically converted into datetime objects
using decode_cf_datetime.
"""

def __init__(self, array, units, calendar=None):
self.array = indexing.as_indexable(array)
self.units = units
Expand Down Expand Up @@ -440,6 +442,7 @@ class DecodedCFTimedeltaArray(indexing.ExplicitlyIndexedNDArrayMixin):
values, when accessed, are automatically converted into timedelta objects
using decode_cf_timedelta.
"""

def __init__(self, array, units):
self.array = indexing.as_indexable(array)
self.units = units
Expand All @@ -460,6 +463,7 @@ class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin):
array('abc',
dtype='|S3')
"""

def __init__(self, array):
"""
Parameters
Expand Down Expand Up @@ -505,6 +509,7 @@ class BytesToStringArray(indexing.ExplicitlyIndexedNDArrayMixin):
array(['abc'],
dtype=object)
"""

def __init__(self, array, encoding='utf-8'):
"""
Parameters
Expand Down Expand Up @@ -555,6 +560,7 @@ class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""

def __init__(self, array):
self.array = indexing.as_indexable(array)

Expand Down Expand Up @@ -583,6 +589,7 @@ class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""

def __init__(self, array):
self.array = indexing.as_indexable(array)

Expand Down Expand Up @@ -610,6 +617,7 @@ class UnsignedIntTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
>>> UnsignedIntTypeArray(sb)[:]
array([ 0, 1, 127, 128, 255], dtype=uint8)
"""

def __init__(self, array):
self.array = indexing.as_indexable(array)
self.unsigned_dtype = np.dtype('u%s' % array.dtype.itemsize)
Expand Down
1 change: 1 addition & 0 deletions xarray/core/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ class DatetimeAccessor(object):
`dayofyear` may not be accurate.

"""

def __init__(self, xarray_obj):
if not is_datetime_like(xarray_obj.dtype):
raise TypeError("'dt' accessor only available for "
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def var_indexers(var, indexers):
"Indexer has dimensions {0:s} that are different "
"from that to be indexed along {1:s}. "
"This will behave differently in the future.".format(
str(indexer.dims), dim),
str(indexer.dims), dim),
FutureWarning, stacklevel=3)

if dim in variables:
Expand Down
1 change: 0 additions & 1 deletion xarray/core/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,6 @@ def insert_result_variable(k, v):
raise ValueError(
'variable %s not equal across datasets' % k)


# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __complex__(self):
return complex(self.values)

def __long__(self):
return long(self.values)
return long(self.values) # flake8: noqa

def __array__(self, dtype=None):
return np.asarray(self.values, dtype=dtype)
Expand Down Expand Up @@ -609,7 +609,7 @@ def _resample_immediately(self, freq, dim, how, skipna,
"calculations. Instead of passing 'dim' and "
"'how=\"{how}\", instead consider using "
".resample({dim}=\"{freq}\").{how}() ".format(
dim=dim, freq=freq, how=how
dim=dim, freq=freq, how=how
), DeprecationWarning, stacklevel=3)

if isinstance(dim, basestring):
Expand Down
3 changes: 1 addition & 2 deletions xarray/core/computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,9 @@

NOT PUBLIC API.
"""
import collections
import functools
import itertools
import operator
import re

import numpy as np

Expand Down Expand Up @@ -36,6 +34,7 @@ class _UFuncSignature(object):
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""

def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
Expand Down
4 changes: 4 additions & 0 deletions xarray/core/coordinates.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ class DatasetCoordinates(AbstractCoordinates):
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""

def __init__(self, dataset):
self._data = dataset

Expand Down Expand Up @@ -209,6 +210,7 @@ class DataArrayCoordinates(AbstractCoordinates):
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by corresponding DataArray objects.
"""

def __init__(self, dataarray):
self._data = dataarray

Expand Down Expand Up @@ -255,6 +257,7 @@ class LevelCoordinatesSource(object):
Used for attribute style lookup with AttrAccessMixin. Not returned directly
by any public methods.
"""

def __init__(self, data_object):
self._data = data_object

Expand All @@ -269,6 +272,7 @@ def __iter__(self):
class Indexes(Mapping, formatting.ReprMixin):
"""Ordered Mapping[str, pandas.Index] for xarray objects.
"""

def __init__(self, variables, sizes):
"""Not for public consumption.

Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1217,7 +1217,7 @@ def chunk(self, chunks=None, name_prefix='xarray-', token=None,
try:
from dask.base import tokenize
except ImportError:
import dask # raise the usual error if dask is entirely missing
import dask # raise the usual error if dask is entirely missing # flake8: noqa
raise ImportError('xarray requires dask version 0.6 or newer')

if isinstance(chunks, Number):
Expand Down
1 change: 1 addition & 0 deletions xarray/core/extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class AccessorRegistrationWarning(Warning):

class _CachedAccessor(object):
"""Custom property-like object (descriptor) for caching accessors."""

def __init__(self, name, accessor):
self._name = name
self._accessor = accessor
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/formatting.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

from .options import OPTIONS
from .pycompat import PY2, unicode_type, bytes_type, dask_array_type
from .indexing import BasicIndexer


def pretty_print(x, numchars):
Expand Down Expand Up @@ -60,6 +59,7 @@ def ensure_valid_repr(string):

class ReprMixin(object):
"""Mixin that defines __repr__ for a class that already has __unicode__."""

def __repr__(self):
return ensure_valid_repr(self.__unicode__())

Expand Down
10 changes: 7 additions & 3 deletions xarray/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ def _consolidate_slices(slices):
for slice_ in slices:
if not isinstance(slice_, slice):
raise ValueError('list element is not a slice: %r' % slice_)
if (result and last_slice.stop == slice_.start
and _is_one_or_none(last_slice.step)
and _is_one_or_none(slice_.step)):
if (result and last_slice.stop == slice_.start and
_is_one_or_none(last_slice.step) and
_is_one_or_none(slice_.step)):
last_slice = slice(last_slice.start, slice_.stop, slice_.step)
result[-1] = last_slice
else:
Expand Down Expand Up @@ -172,6 +172,7 @@ class GroupBy(object):
Dataset.groupby
DataArray.groupby
"""

def __init__(self, obj, group, squeeze=False, grouper=None, bins=None,
cut_kwargs={}):
"""Create a GroupBy object
Expand Down Expand Up @@ -441,6 +442,7 @@ def _maybe_reorder(xarray_obj, dim, positions):
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""

def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
Expand Down Expand Up @@ -573,6 +575,7 @@ def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)


ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)

Expand Down Expand Up @@ -663,5 +666,6 @@ def assign(self, **kwargs):
"""
return self.apply(lambda ds: ds.assign(**kwargs))


ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
Loading