Skip to content

Commit 21705e6

Browse files
authored
Revisit # noqa annotations (#3359)
1 parent fb575eb commit 21705e6

27 files changed

+167
-177
lines changed

asv_bench/benchmarks/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def decorator(func):
1616

1717
def requires_dask():
1818
try:
19-
import dask # noqa
19+
import dask # noqa: F401
2020
except ImportError:
2121
raise NotImplementedError
2222

asv_bench/benchmarks/dataarray_missing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from . import randn, requires_dask
66

77
try:
8-
import dask # noqa
8+
import dask # noqa: F401
99
except ImportError:
1010
pass
1111

doc/examples/_code/weather_data_setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import numpy as np
22
import pandas as pd
3-
import seaborn as sns # noqa, pandas aware plotting library
3+
import seaborn as sns
44

55
import xarray as xr
66

doc/gallery/plot_cartopy_facetgrid.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
For more details see `this discussion`_ on github.
1313
1414
.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567
15-
""" # noqa
15+
"""
1616

1717

1818
import cartopy.crs as ccrs

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@
8686
- Issue tracker: http://github.com/pydata/xarray/issues
8787
- Source code: http://github.com/pydata/xarray
8888
- SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk
89-
""" # noqa
89+
"""
9090

9191

9292
setup(

xarray/backends/api.py

Lines changed: 35 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,12 @@
4242

4343
def _get_default_engine_remote_uri():
4444
try:
45-
import netCDF4 # noqa
45+
import netCDF4 # noqa: F401
4646

4747
engine = "netcdf4"
4848
except ImportError: # pragma: no cover
4949
try:
50-
import pydap # noqa
50+
import pydap # noqa: F401
5151

5252
engine = "pydap"
5353
except ImportError:
@@ -61,13 +61,13 @@ def _get_default_engine_remote_uri():
6161
def _get_default_engine_grib():
6262
msgs = []
6363
try:
64-
import Nio # noqa
64+
import Nio # noqa: F401
6565

6666
msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
6767
except ImportError: # pragma: no cover
6868
pass
6969
try:
70-
import cfgrib # noqa
70+
import cfgrib # noqa: F401
7171

7272
msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
7373
except ImportError: # pragma: no cover
@@ -80,7 +80,7 @@ def _get_default_engine_grib():
8080

8181
def _get_default_engine_gz():
8282
try:
83-
import scipy # noqa
83+
import scipy # noqa: F401
8484

8585
engine = "scipy"
8686
except ImportError: # pragma: no cover
@@ -90,12 +90,12 @@ def _get_default_engine_gz():
9090

9191
def _get_default_engine_netcdf():
9292
try:
93-
import netCDF4 # noqa
93+
import netCDF4 # noqa: F401
9494

9595
engine = "netcdf4"
9696
except ImportError: # pragma: no cover
9797
try:
98-
import scipy.io.netcdf # noqa
98+
import scipy.io.netcdf # noqa: F401
9999

100100
engine = "scipy"
101101
except ImportError:
@@ -722,44 +722,41 @@ def open_mfdataset(
722722
):
723723
"""Open multiple files as a single dataset.
724724
725-
If combine='by_coords' then the function ``combine_by_coords`` is used to
726-
combine the datasets into one before returning the result, and if
727-
combine='nested' then ``combine_nested`` is used. The filepaths must be
728-
structured according to which combining function is used, the details of
729-
which are given in the documentation for ``combine_by_coords`` and
730-
``combine_nested``. By default the old (now deprecated) ``auto_combine``
731-
will be used, please specify either ``combine='by_coords'`` or
732-
``combine='nested'`` in future. Requires dask to be installed. See
733-
documentation for details on dask [1]. Attributes from the first dataset
734-
file are used for the combined dataset.
725+
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
726+
the datasets into one before returning the result, and if combine='nested' then
727+
``combine_nested`` is used. The filepaths must be structured according to which
728+
combining function is used, the details of which are given in the documentation for
729+
``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)
730+
``auto_combine`` will be used, please specify either ``combine='by_coords'`` or
731+
``combine='nested'`` in future. Requires dask to be installed. See documentation for
732+
details on dask [1]. Attributes from the first dataset file are used for the
733+
combined dataset.
735734
736735
Parameters
737736
----------
738737
paths : str or sequence
739-
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
740-
list of files to open. Paths can be given as strings or as pathlib
741-
Paths. If concatenation along more than one dimension is desired, then
742-
``paths`` must be a nested list-of-lists (see ``manual_combine`` for
743-
details). (A string glob will be expanded to a 1-dimensional list.)
738+
Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of
739+
files to open. Paths can be given as strings or as pathlib Paths. If
740+
concatenation along more than one dimension is desired, then ``paths`` must be a
741+
nested list-of-lists (see ``manual_combine`` for details). (A string glob will
742+
be expanded to a 1-dimensional list.)
744743
chunks : int or dict, optional
745-
Dictionary with keys given by dimension names and values given by chunk
746-
sizes. In general, these should divide the dimensions of each dataset.
747-
If int, chunk each dimension by ``chunks``.
748-
By default, chunks will be chosen to load entire input files into
749-
memory at once. This has a major impact on performance: please see the
750-
full documentation for more details [2].
744+
Dictionary with keys given by dimension names and values given by chunk sizes.
745+
In general, these should divide the dimensions of each dataset. If int, chunk
746+
each dimension by ``chunks``. By default, chunks will be chosen to load entire
747+
input files into memory at once. This has a major impact on performance: please
748+
see the full documentation for more details [2].
751749
concat_dim : str, or list of str, DataArray, Index or None, optional
752-
Dimensions to concatenate files along. You only
753-
need to provide this argument if any of the dimensions along which you
754-
want to concatenate is not a dimension in the original datasets, e.g.,
755-
if you want to stack a collection of 2D arrays along a third dimension.
756-
Set ``concat_dim=[..., None, ...]`` explicitly to
750+
Dimensions to concatenate files along. You only need to provide this argument
751+
if any of the dimensions along which you want to concatenate is not a dimension
752+
in the original datasets, e.g., if you want to stack a collection of 2D arrays
753+
along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to
757754
disable concatenation along a particular dimension.
758755
combine : {'by_coords', 'nested'}, optional
759-
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is
760-
used to combine all the data. If this argument is not provided,
761-
`xarray.auto_combine` is used, but in the future this behavior will
762-
switch to use `xarray.combine_by_coords` by default.
756+
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
757+
combine all the data. If this argument is not provided, `xarray.auto_combine` is
758+
used, but in the future this behavior will switch to use
759+
`xarray.combine_by_coords` by default.
763760
compat : {'identical', 'equals', 'broadcast_equals',
764761
'no_conflicts', 'override'}, optional
765762
String indicating how to compare variables of the same name for
@@ -854,7 +851,7 @@ def open_mfdataset(
854851
855852
.. [1] http://xarray.pydata.org/en/stable/dask.html
856853
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
857-
""" # noqa
854+
"""
858855
if isinstance(paths, str):
859856
if is_remote_uri(paths):
860857
raise ValueError(

xarray/backends/locks.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,7 @@
2121
NETCDFC_LOCK = SerializableLock()
2222

2323

24-
_FILE_LOCKS = (
25-
weakref.WeakValueDictionary()
26-
) # type: MutableMapping[Any, threading.Lock] # noqa
24+
_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock]
2725

2826

2927
def _get_threaded_lock(key):

xarray/conventions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -753,7 +753,7 @@ def cf_encoder(variables, attributes):
753753
for var in new_vars.values():
754754
bounds = var.attrs["bounds"] if "bounds" in var.attrs else None
755755
if bounds and bounds in new_vars:
756-
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries # noqa
756+
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries
757757
for attr in [
758758
"units",
759759
"standard_name",

xarray/core/alignment.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@
1313
from .variable import IndexVariable, Variable
1414

1515
if TYPE_CHECKING:
16-
from .dataarray import DataArray # noqa: F401
17-
from .dataset import Dataset # noqa: F401
16+
from .dataarray import DataArray
17+
from .dataset import Dataset
1818

1919

2020
def _get_joiner(join):
@@ -350,8 +350,8 @@ def deep_align(
350350
351351
This function is not public API.
352352
"""
353-
from .dataarray import DataArray # noqa: F811
354-
from .dataset import Dataset # noqa: F811
353+
from .dataarray import DataArray
354+
from .dataset import Dataset
355355

356356
if indexes is None:
357357
indexes = {}
@@ -411,7 +411,7 @@ def is_alignable(obj):
411411

412412

413413
def reindex_like_indexers(
414-
target: Union["DataArray", "Dataset"], other: Union["DataArray", "Dataset"]
414+
target: "Union[DataArray, Dataset]", other: "Union[DataArray, Dataset]"
415415
) -> Dict[Hashable, pd.Index]:
416416
"""Extract indexers to align target with other.
417417
@@ -503,7 +503,7 @@ def reindex_variables(
503503
new_indexes : OrderedDict
504504
Dict of indexes associated with the reindexed variables.
505505
"""
506-
from .dataarray import DataArray # noqa: F811
506+
from .dataarray import DataArray
507507

508508
# create variables for the new dataset
509509
reindexed = OrderedDict() # type: OrderedDict[Any, Variable]
@@ -600,8 +600,8 @@ def _get_broadcast_dims_map_common_coords(args, exclude):
600600

601601
def _broadcast_helper(arg, exclude, dims_map, common_coords):
602602

603-
from .dataarray import DataArray # noqa: F811
604-
from .dataset import Dataset # noqa: F811
603+
from .dataarray import DataArray
604+
from .dataset import Dataset
605605

606606
def _set_dims(var):
607607
# Add excluded dims to a copy of dims_map

xarray/core/common.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ def _ipython_key_completions_(self) -> List[str]:
293293
"""Provide method for the key-autocompletions in IPython.
294294
See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion
295295
For the details.
296-
""" # noqa
296+
"""
297297
item_lists = [
298298
item
299299
for sublist in self._item_sources
@@ -669,7 +669,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):
669669
--------
670670
core.groupby.DataArrayGroupBy
671671
core.groupby.DatasetGroupBy
672-
""" # noqa
672+
"""
673673
return self._groupby_cls(
674674
self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims
675675
)
@@ -732,7 +732,7 @@ def groupby_bins(
732732
References
733733
----------
734734
.. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
735-
""" # noqa
735+
"""
736736
return self._groupby_cls(
737737
self,
738738
group,
@@ -808,7 +808,7 @@ def rolling(
808808
--------
809809
core.rolling.DataArrayRolling
810810
core.rolling.DatasetRolling
811-
""" # noqa
811+
"""
812812
dim = either_dict_or_kwargs(dim, window_kwargs, "rolling")
813813
return self._rolling_cls(self, dim, min_periods=min_periods, center=center)
814814

@@ -1005,7 +1005,7 @@ def resample(
10051005
----------
10061006
10071007
.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
1008-
""" # noqa
1008+
"""
10091009
# TODO support non-string indexer after removing the old API.
10101010

10111011
from .dataarray import DataArray

xarray/core/dataarray.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3054,7 +3054,7 @@ def integrate(
30543054
return self._from_temp_dataset(ds)
30553055

30563056
# this needs to be at the end, or mypy will confuse with `str`
3057-
# https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names # noqa
3057+
# https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names
30583058
str = property(StringAccessor)
30593059

30603060

xarray/core/dataset.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1063,7 +1063,7 @@ def copy(self, deep: bool = False, data: Mapping = None) -> "Dataset":
10631063
See Also
10641064
--------
10651065
pandas.DataFrame.copy
1066-
""" # noqa
1066+
"""
10671067
if data is None:
10681068
variables = OrderedDict(
10691069
(k, v.copy(deep=deep)) for k, v in self._variables.items()
@@ -1714,7 +1714,7 @@ def chunk(
17141714
from dask.base import tokenize
17151715
except ImportError:
17161716
# raise the usual error if dask is entirely missing
1717-
import dask # noqa
1717+
import dask # noqa: F401
17181718

17191719
raise ImportError("xarray requires dask version 0.9 or newer")
17201720

@@ -4178,7 +4178,7 @@ def apply(
41784178
Data variables:
41794179
foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948
41804180
bar (x) float64 1.0 2.0
4181-
""" # noqa
4181+
"""
41824182
variables = OrderedDict(
41834183
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
41844184
for k, v in self.data_vars.items()
@@ -5381,7 +5381,7 @@ def filter_by_attrs(self, **kwargs):
53815381
temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...
53825382
precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...
53835383
5384-
""" # noqa
5384+
"""
53855385
selection = []
53865386
for var_name, variable in self.variables.items():
53875387
has_value_flag = False

xarray/core/indexing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ class ExplicitIndexer:
331331
__slots__ = ("_key",)
332332

333333
def __init__(self, key):
334-
if type(self) is ExplicitIndexer: # noqa
334+
if type(self) is ExplicitIndexer:
335335
raise TypeError("cannot instantiate base ExplicitIndexer objects")
336336
self._key = tuple(key)
337337

@@ -1261,7 +1261,7 @@ def _indexing_array_and_key(self, key):
12611261
array = self.array
12621262
# We want 0d slices rather than scalars. This is achieved by
12631263
# appending an ellipsis (see
1264-
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes). # noqa
1264+
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).
12651265
key = key.tuple + (Ellipsis,)
12661266
else:
12671267
raise TypeError("unexpected key type: {}".format(type(key)))

0 commit comments

Comments
 (0)