Skip to content

CI: Update version of 'black' #36493

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Sep 22, 2020
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/python/black
rev: 19.10b0
rev: 20.8b1
hooks:
- id: black
language_version: python3
Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/arithmetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def setup(self, op):
arr1 = np.random.randn(n_rows, int(n_cols / 2)).astype("f8")
arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype("f4")
df = pd.concat(
[pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True,
[pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True
)
# should already be the case, but just to be sure
df._consolidate_inplace()
Expand Down
2 changes: 1 addition & 1 deletion doc/make.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ def main():

joined = ",".join(cmds)
argparser = argparse.ArgumentParser(
description="pandas documentation builder", epilog=f"Commands: {joined}",
description="pandas documentation builder", epilog=f"Commands: {joined}"
)

joined = ", ".join(cmds)
Expand Down
2 changes: 1 addition & 1 deletion doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@

for method in methods:
# ... and each of its public methods
moved_api_pages.append((f"{old}.{method}", f"{new}.{method}",))
moved_api_pages.append((f"{old}.{method}", f"{new}.{method}"))

if pattern is None:
html_additional_pages = {
Expand Down
2 changes: 1 addition & 1 deletion doc/source/development/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ submitting code to run the check yourself::
to auto-format your code. Additionally, many editors have plugins that will
apply ``black`` as you edit files.

You should use a ``black`` version >= 19.10b0 as previous versions are not compatible
You should use a ``black`` version 20.8b1 as previous versions are not compatible
with the pandas codebase.

If you wish to run these checks automatically, we encourage you to use
Expand Down
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ dependencies:
- cython>=0.29.21

# code checks
- black=19.10b0
- black=20.8b1
- cpplint
- flake8<3.8.0 # temporary pin, GH#34150
- flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions
Expand Down
3 changes: 1 addition & 2 deletions pandas/_vendored/typing_extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2116,8 +2116,7 @@ def __init_subclass__(cls, *args, **kwargs):
raise TypeError(f"Cannot subclass {cls.__module__}.Annotated")

def _strip_annotations(t):
"""Strips the annotations from a given type.
"""
"""Strips the annotations from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_annotations(t.__origin__)
if isinstance(t, typing._GenericAlias):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def validate_func_kwargs(


def transform(
obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs,
obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs
) -> FrameOrSeries:
"""
Transform a DataFrame or Series
Expand Down
9 changes: 4 additions & 5 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1023,11 +1023,10 @@ def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (
((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any()
or (
(np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]
).any()
)
(np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]
).any() or (
(np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]
).any()

if to_raise:
raise OverflowError("Overflow in int64 addition")
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/array_algos/replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@


def compare_or_regex_search(
a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike,
a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike
) -> Union[ArrayLike, bool]:
"""
Compare two array_like inputs of the same shape or two scalar values
Expand Down
10 changes: 4 additions & 6 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,9 +449,7 @@ def __init__(
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(
self, data,
)
NDFrame.__init__(self, data)
return

mgr = self._init_mgr(
Expand Down Expand Up @@ -5747,7 +5745,7 @@ def nsmallest(self, n, columns, keep="first") -> DataFrame:
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Iceland 337000 17036 IS

When using ``keep='last'``, ties are resolved in reverse order:

Expand Down Expand Up @@ -7142,7 +7140,7 @@ def unstack(self, level=-1, fill_value=None):

return unstack(self, level, fill_value)

@Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt",))
@Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt"))
def melt(
self,
id_vars=None,
Expand Down Expand Up @@ -8624,7 +8622,7 @@ def blk_func(values):
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res = df._mgr.reduce(blk_func)
out = df._constructor(res,).iloc[0].rename(None)
out = df._constructor(res).iloc[0].rename(None)
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and is_object_dtype(out.dtype):
Expand Down
10 changes: 3 additions & 7 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
# Constructors

def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False,
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):

if (
Expand All @@ -208,9 +208,7 @@ def __init__(
and copy is False
):
# GH#33357 called with just the SingleBlockManager
NDFrame.__init__(
self, data,
)
NDFrame.__init__(self, data)
self.name = name
return

Expand Down Expand Up @@ -329,9 +327,7 @@ def __init__(

data = SingleBlockManager.from_array(data, index)

generic.NDFrame.__init__(
self, data,
)
generic.NDFrame.__init__(self, data)
self.name = name
self._set_axis(0, index, fastpath=True)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def get_indexer_indexer(
)
elif isinstance(target, ABCMultiIndex):
indexer = lexsort_indexer(
target._get_codes_for_sorting(), orders=ascending, na_position=na_position,
target._get_codes_for_sorting(), orders=ascending, na_position=na_position
)
else:
# Check monotonic-ness before sort an index (GH 11080)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/util/numba_.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def set_use_numba(enable: bool = False) -> None:


def get_jit_arguments(
engine_kwargs: Optional[Dict[str, bool]] = None, kwargs: Optional[Dict] = None,
engine_kwargs: Optional[Dict[str, bool]] = None, kwargs: Optional[Dict] = None
) -> Tuple[bool, bool, bool]:
"""
Return arguments to pass to numba.JIT, falling back on pandas default JIT settings.
Expand Down
4 changes: 0 additions & 4 deletions pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -1382,10 +1382,6 @@ def _format(x):


class FloatArrayFormatter(GenericArrayFormatter):
"""

"""

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

Expand Down
4 changes: 2 additions & 2 deletions pandas/io/formats/latex.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ def __init__(
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows: List[List[str]] = (
list(zip(*self.strcols)) # type: ignore[arg-type]
self.strrows: List[List[str]] = list(
zip(*self.strcols) # type: ignore[arg-type]
)

def get_strrow(self, row_num: int) -> str:
Expand Down
16 changes: 5 additions & 11 deletions pandas/tests/arrays/sparse/test_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,7 @@ def test_constructor_inferred_fill_value(self, data, fill_value):
assert result == fill_value

@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
@pytest.mark.parametrize(
"size", [0, 10],
)
@pytest.mark.parametrize("size", [0, 10])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
Expand Down Expand Up @@ -693,17 +691,13 @@ def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])

sparse = SparseArray(dense)
res = sparse[
4:,
] # noqa: E231
exp = SparseArray(dense[4:,]) # noqa: E231
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:])
tm.assert_sp_array_equal(res, exp)

sparse = SparseArray(dense, fill_value=0)
res = sparse[
4:,
] # noqa: E231
exp = SparseArray(dense[4:,], fill_value=0) # noqa: E231
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:], fill_value=0)
tm.assert_sp_array_equal(res, exp)

msg = "too many indices for array"
Expand Down
16 changes: 8 additions & 8 deletions pandas/tests/frame/test_analytics.py
Original file line number Diff line number Diff line change
Expand Up @@ -1060,14 +1060,14 @@ def test_any_all_bool_only(self):
(np.any, {"A": pd.Series([0.0, 1.0], dtype="float")}, True),
(np.all, {"A": pd.Series([0, 1], dtype=int)}, False),
(np.any, {"A": pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False,),
pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True,),
pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,),
pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,),
pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False,),
pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True,),
pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,),
pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,),
pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False),
pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False),
pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True),
pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True),
(np.all, {"A": pd.Series([0, 1], dtype="category")}, False),
(np.any, {"A": pd.Series([0, 1], dtype="category")}, True),
(np.all, {"A": pd.Series([1, 2], dtype="category")}, True),
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/io/test_gcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,7 @@ def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding)
compression_only = "gz"
compression["method"] = "infer"
path_gcs += f".{compression_only}"
df.to_csv(
path_gcs, compression=compression, encoding=encoding,
)
df.to_csv(path_gcs, compression=compression, encoding=encoding)
assert gcs_buffer.getvalue() == buffer.getvalue()
read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding)
tm.assert_frame_equal(df, read_df)
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/test_parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so):
pytest.param(
["A"],
marks=pytest.mark.xfail(
PY38, reason="Getting back empty DataFrame", raises=AssertionError,
PY38, reason="Getting back empty DataFrame", raises=AssertionError
),
),
[],
Expand Down
23 changes: 13 additions & 10 deletions pandas/tests/scalar/timestamp/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,17 +259,20 @@ def test_constructor_keyword(self):
Timestamp("20151112")
)

assert repr(
Timestamp(
year=2015,
month=11,
day=12,
hour=1,
minute=2,
second=3,
microsecond=999999,
assert (
repr(
Timestamp(
year=2015,
month=11,
day=12,
hour=1,
minute=2,
second=3,
microsecond=999999,
)
)
) == repr(Timestamp("2015-11-12 01:02:03.999999"))
== repr(Timestamp("2015-11-12 01:02:03.999999"))
)

def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/series/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,9 +554,7 @@ def test_unary_minus_nullable_int(
expected = pd.Series(target, dtype=dtype)
tm.assert_series_equal(result, expected)

@pytest.mark.parametrize(
"source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]],
)
@pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]])
def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source):
dtype = any_signed_nullable_int_dtype
expected = pd.Series(source, dtype=dtype)
Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ python-dateutil>=2.7.3
pytz
asv
cython>=0.29.21
black==19.10b0
black==20.8b1
cpplint
flake8<3.8.0
flake8-comprehensions>=3.1.0
Expand Down
3 changes: 1 addition & 2 deletions scripts/tests/test_validate_docstrings.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@


class BadDocstrings:
"""Everything here has a bad docstring
"""
"""Everything here has a bad docstring"""

def private_classes(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion versioneer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1073,7 +1073,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag, tag_prefix,
full_tag, tag_prefix
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
Expand Down