diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 69ce0f1adce22..816bb23865c04 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -230,6 +230,11 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + # https://github.com/python/mypy/issues/7384 + # MSG='Check for missing error codes with # type: ignore' ; echo $MSG + # invgrep -R --include="*.py" -P '# type: ignore(?!\[)' pandas + # RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of foo.__class__ instead of type(foo)' ; echo $MSG invgrep -R --include=*.{py,pyx} '\.__class__' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/environment.yml b/environment.yml index 9efb995e29497..ed9762e5b8893 100644 --- a/environment.yml +++ b/environment.yml @@ -109,3 +109,4 @@ dependencies: - pip: - git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master - git+https://github.com/numpy/numpydoc + - pyflakes>=2.2.0 diff --git a/pandas/_config/config.py b/pandas/_config/config.py index d7b73a0a685d3..fb41b37980b2e 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -462,7 +462,7 @@ def register_option( for k in path: # NOTE: tokenize.Name is not a public constant # error: Module has no attribute "Name" [attr-defined] - if not re.match("^" + tokenize.Name + "$", k): # type: ignore + if not re.match("^" + tokenize.Name + "$", k): # type: ignore[attr-defined] raise ValueError(f"{k} is not a valid identifier") if keyword.iskeyword(k): raise ValueError(f"{k} is a python keyword") diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 015b203a60256..ef9f36705a7ee 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -64,7 +64,7 @@ class _LoadSparseSeries: # https://github.com/python/mypy/issues/1020 # error: Incompatible return type for "__new__" (returns "Series", but must return # a subtype of "_LoadSparseSeries") - def __new__(cls) -> "Series": # type: ignore + def __new__(cls) -> "Series": # type: ignore[misc] from pandas import Series warnings.warn( @@ -82,7 +82,7 @@ class _LoadSparseFrame: # https://github.com/python/mypy/issues/1020 # error: Incompatible return type for "__new__" (returns "DataFrame", but must # return a subtype of "_LoadSparseFrame") - def __new__(cls) -> "DataFrame": # type: ignore + def __new__(cls) -> "DataFrame": # type: ignore[misc] from pandas import DataFrame warnings.warn( @@ -181,7 +181,7 @@ def __new__(cls) -> "DataFrame": # type: ignore # functions for compat and uses a non-public class of the pickle module. # error: Name 'pkl._Unpickler' is not defined -class Unpickler(pkl._Unpickler): # type: ignore +class Unpickler(pkl._Unpickler): # type: ignore[name-defined] def find_class(self, module, name): # override superclass key = (module, name) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9e3ca4cc53363..befde7c355818 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -427,7 +427,8 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: if is_categorical_dtype(comps): # TODO(extension) # handle categoricals - return comps.isin(values) # type: ignore + # error: "ExtensionArray" has no attribute "isin" [attr-defined] + return comps.isin(values) # type: ignore[attr-defined] comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index c6945e2f78b5a..1b5e1d81f00d6 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -468,6 +468,9 @@ def _ndarray(self) -> np.ndarray: def _from_backing_data(self: _T, arr: np.ndarray) -> _T: # Note: we do not retain `freq` + # error: Unexpected keyword argument "dtype" for "NDArrayBackedExtensionArray" + # TODO: add my error code + # https://github.com/python/mypy/issues/7384 return type(self)(arr, dtype=self.dtype) # type: ignore # ------------------------------------------------------------------ @@ -809,7 +812,8 @@ def _validate_scalar( value = NaT elif isinstance(value, self._recognized_scalars): - value = self._scalar_type(value) # type: ignore + # error: Too many arguments for "object" [call-arg] + value = self._scalar_type(value) # type: ignore[call-arg] else: if msg is None: @@ -1129,7 +1133,8 @@ def resolution(self) -> str: """ Returns day, hour, minute, second, millisecond or microsecond """ - return self._resolution_obj.attrname # type: ignore + # error: Item "None" of "Optional[Any]" has no attribute "attrname" + return self._resolution_obj.attrname # type: ignore[union-attr] @classmethod def _validate_frequency(cls, index, freq, **kwargs): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index ed2437cc061bd..d76e0fd628a48 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1057,7 +1057,7 @@ def mid(self): # https://github.com/python/mypy/issues/1362 # Mypy does not support decorated properties - @property # type: ignore + @property # type: ignore[misc] @Appender( _interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs ) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index fe78481d99d30..ddaf6d39f1837 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -278,8 +278,8 @@ def _check_compatible_with(self, other, setitem: bool = False): def dtype(self) -> PeriodDtype: return self._dtype - # error: Read-only property cannot override read-write property [misc] - @property # type: ignore + # error: Read-only property cannot override read-write property + @property # type: ignore[misc] def freq(self) -> BaseOffset: """ Return the frequency object for this PeriodArray. diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 0e9077e6d557e..05a5538a88772 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -227,7 +227,8 @@ def evaluate(op, a, b, use_numexpr: bool = True): if op_str is not None: use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: - return _evaluate(op, op_str, a, b) # type: ignore + # error: "None" not callable + return _evaluate(op, op_str, a, b) # type: ignore[misc] return _evaluate_standard(op, op_str, a, b) diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index c7c7103654a65..86e125b6b909b 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -37,7 +37,9 @@ def create_valid_python_identifier(name: str) -> str: special_characters_replacements = { char: f"_{token.tok_name[tokval]}_" # The ignore here is because of a bug in mypy that is resolved in 0.740 - for char, tokval in tokenize.EXACT_TOKEN_TYPES.items() # type: ignore + for char, tokval in ( + tokenize.EXACT_TOKEN_TYPES.items() # type: ignore[attr-defined] + ) } special_characters_replacements.update( { diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 001eb1789007f..f1b11a6869c2b 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -63,7 +63,7 @@ def _resolve_name(self): return self.name # read-only property overwriting read/write property - @property # type: ignore + @property # type: ignore[misc] def value(self): return self._value diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 2b2431149e230..0c23f1b4bcdf2 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -327,7 +327,7 @@ def is_terminal() -> bool: """ try: # error: Name 'get_ipython' is not defined - ip = get_ipython() # type: ignore + ip = get_ipython() # type: ignore[name-defined] except NameError: # assume standard Python interpreter in a terminal return True else: diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index a2ca4d84b2bf6..73109020b1b54 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -136,11 +136,13 @@ def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array: """ # TODO: GH27506 potential bug with ExtensionArrays try: - return arr.astype("int64", copy=copy, casting="safe") # type: ignore + # error: Unexpected keyword argument "casting" for "astype" + return arr.astype("int64", copy=copy, casting="safe") # type: ignore[call-arg] except TypeError: pass try: - return arr.astype("uint64", copy=copy, casting="safe") # type: ignore + # error: Unexpected keyword argument "casting" for "astype" + return arr.astype("uint64", copy=copy, casting="safe") # type: ignore[call-arg] except TypeError: if is_extension_array_dtype(arr.dtype): return arr.to_numpy(dtype="float64", na_value=np.nan) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8350e136417b1..8dc500dddeafa 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -635,7 +635,8 @@ class DatetimeTZDtype(PandasExtensionDtype): def __init__(self, unit: Union[str_type, "DatetimeTZDtype"] = "ns", tz=None): if isinstance(unit, DatetimeTZDtype): - unit, tz = unit.unit, unit.tz # type: ignore + # error: "str" has no attribute "tz" + unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 36eff214fc314..1f1017cfc1929 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -7,7 +7,7 @@ def create_pandas_abc_type(name, attr, comp): # https://github.com/python/mypy/issues/1006 # error: 'classmethod' used with a non-method - @classmethod # type: ignore + @classmethod # type: ignore[misc] def _check(cls, inst) -> bool: return getattr(inst, attr, "_typ") in comp diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 79627e43d78c2..cbb9bcc8362ff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2164,10 +2164,14 @@ def to_stata( from pandas.io.stata import StataWriter as statawriter elif version == 117: # mypy: Name 'statawriter' already defined (possibly by an import) - from pandas.io.stata import StataWriter117 as statawriter # type: ignore + from pandas.io.stata import ( # type: ignore[no-redef] + StataWriter117 as statawriter, + ) else: # versions 118 and 119 # mypy: Name 'statawriter' already defined (possibly by an import) - from pandas.io.stata import StataWriterUTF8 as statawriter # type: ignore + from pandas.io.stata import ( # type: ignore[no-redef] + StataWriterUTF8 as statawriter, + ) kwargs: Dict[str, Any] = {} if version is None or version >= 117: @@ -2178,7 +2182,7 @@ def to_stata( kwargs["version"] = version # mypy: Too many arguments for "StataWriter" - writer = statawriter( # type: ignore + writer = statawriter( # type: ignore[call-arg] path, self, convert_dates=convert_dates, @@ -3578,7 +3582,13 @@ def extract_unique_dtypes_from_dtypes_set( extracted_dtypes = [ unique_dtype for unique_dtype in unique_dtypes - if issubclass(unique_dtype.type, tuple(dtypes_set)) # type: ignore + # error: Argument 1 to "tuple" has incompatible type + # "FrozenSet[Union[ExtensionDtype, str, Any, Type[str], + # Type[float], Type[int], Type[complex], Type[bool]]]"; + # expected "Iterable[Union[type, Tuple[Any, ...]]]" + if issubclass( + unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type] + ) ] return extracted_dtypes @@ -5250,7 +5260,8 @@ def f(vals): # TODO: Just move the sort_values doc here. @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) - def sort_values( # type: ignore[override] # NOQA # issue 27237 + # error: Signature of "sort_values" incompatible with supertype "NDFrame" + def sort_values( # type: ignore[override] self, by, axis=0, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e46fde1f59f16..42d02f37508fc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -589,9 +589,9 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries: # ignore needed because of NDFrame constructor is different than # DataFrame/Series constructors. - return self._constructor(new_values, *new_axes).__finalize__( # type: ignore - self, method="swapaxes" - ) + return self._constructor( + new_values, *new_axes # type: ignore[arg-type] + ).__finalize__(self, method="swapaxes") def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: """ @@ -4011,7 +4011,11 @@ def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries: f = functools.partial("{prefix}{}".format, prefix=prefix) mapper = {self._info_axis_name: f} - return self.rename(**mapper) # type: ignore + # error: Incompatible return value type (got "Optional[FrameOrSeries]", + # expected "FrameOrSeries") + # error: Argument 1 to "rename" of "NDFrame" has incompatible type + # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" + return self.rename(**mapper) # type: ignore[return-value, arg-type] def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: """ @@ -4070,7 +4074,11 @@ def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: f = functools.partial("{}{suffix}".format, suffix=suffix) mapper = {self._info_axis_name: f} - return self.rename(**mapper) # type: ignore + # error: Incompatible return value type (got "Optional[FrameOrSeries]", + # expected "FrameOrSeries") + # error: Argument 1 to "rename" of "NDFrame" has incompatible type + # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" + return self.rename(**mapper) # type: ignore[return-value, arg-type] def sort_values( self, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1be381e38b157..fa8da79c8ea0e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -885,7 +885,8 @@ def _format_data(self, name=None) -> str_t: if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": - if is_object_dtype(self.categories): # type: ignore + # error: "Index" has no attribute "categories" + if is_object_dtype(self.categories): # type: ignore[attr-defined] is_justify = False return format_object_summary( @@ -940,7 +941,8 @@ def _format_with_header(self, header, na_rep="NaN") -> List[str_t]: if mask.any(): result = np.array(result) result[mask] = na_rep - result = result.tolist() # type: ignore + # error: "List[str]" has no attribute "tolist" + result = result.tolist() # type: ignore[attr-defined] else: result = trim_front(format_array(values, None, justify="left")) return header + result diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 15a7e25238983..0ce057d6e764a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -54,7 +54,8 @@ def _join_i8_wrapper(joinf, with_indexers: bool = True): Create the join wrapper methods. """ - @staticmethod # type: ignore + # error: 'staticmethod' used with a non-method + @staticmethod # type: ignore[misc] def wrapper(left, right): if isinstance(left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)): left = left.view("i8") @@ -95,7 +96,10 @@ class DatetimeIndexOpsMixin(ExtensionIndex): _bool_ops: List[str] = [] _field_ops: List[str] = [] - hasnans = cache_readonly(DatetimeLikeArrayMixin._hasnans.fget) # type: ignore + # error: "Callable[[Any], Any]" has no attribute "fget" + hasnans = cache_readonly( + DatetimeLikeArrayMixin._hasnans.fget # type: ignore[attr-defined] + ) _hasnans = hasnans # for index / array -agnostic code @property diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6ca6eca1ff829..3186c555b7ae1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2744,7 +2744,8 @@ def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. - values = values.reshape(tuple((1,) + shape)) # type: ignore + # error: "ExtensionArray" has no attribute "reshape" + values = values.reshape(tuple((1,) + shape)) # type: ignore[attr-defined] return values diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 3379ee56b6ad0..aab10cea33632 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -349,7 +349,8 @@ def fill_bool(x, left=None): filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool res_values = na_logical_op(lvalues, rvalues, op) - res_values = filler(res_values) # type: ignore + # error: Cannot call function of unknown type + res_values = filler(res_values) # type: ignore[operator] return res_values diff --git a/pandas/core/resample.py b/pandas/core/resample.py index bfdfc65723433..e82a1d4d2cda8 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -967,7 +967,7 @@ def __init__(self, obj, *args, **kwargs): setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) # error: Too many arguments for "__init__" of "object" - super().__init__(None) # type: ignore + super().__init__(None) # type: ignore[call-arg] self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/pandas/core/series.py b/pandas/core/series.py index ef3be854bc3bb..9e70120f67969 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -571,7 +571,8 @@ def _values(self): """ return self._mgr.internal_values() - @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore + # error: Decorated property not supported + @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc] @property def array(self) -> ExtensionArray: return self._mgr._block.array_values() @@ -4921,7 +4922,10 @@ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": if not isinstance(self.index, PeriodIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") - new_index = self.index.to_timestamp(freq=freq, how=how) # type: ignore + # error: "PeriodIndex" has no attribute "to_timestamp" + new_index = self.index.to_timestamp( # type: ignore[attr-defined] + freq=freq, how=how + ) return self._constructor(new_values, index=new_index).__finalize__( self, method="to_timestamp" ) diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 7aac2f793f61a..3c1fe6bacefcf 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -309,7 +309,7 @@ def _convert_listlike_datetimes( if tz == "utc": # error: Item "DatetimeIndex" of "Union[DatetimeArray, DatetimeIndex]" has # no attribute "tz_convert" - arg = arg.tz_convert(None).tz_localize(tz) # type: ignore + arg = arg.tz_convert(None).tz_localize(tz) # type: ignore[union-attr] return arg elif is_datetime64_ns_dtype(arg_dtype): diff --git a/pandas/io/common.py b/pandas/io/common.py index 6ac8051f35b6f..f39b8279fbdb0 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -121,7 +121,15 @@ def stringify_path( """ if hasattr(filepath_or_buffer, "__fspath__"): # https://github.com/python/mypy/issues/1424 - return filepath_or_buffer.__fspath__() # type: ignore + # error: Item "str" of "Union[str, Path, IO[str]]" has no attribute + # "__fspath__" [union-attr] + # error: Item "IO[str]" of "Union[str, Path, IO[str]]" has no attribute + # "__fspath__" [union-attr] + # error: Item "str" of "Union[str, Path, IO[bytes]]" has no attribute + # "__fspath__" [union-attr] + # error: Item "IO[bytes]" of "Union[str, Path, IO[bytes]]" has no + # attribute "__fspath__" [union-attr] + return filepath_or_buffer.__fspath__() # type: ignore[union-attr] elif isinstance(filepath_or_buffer, pathlib.Path): return str(filepath_or_buffer) return _expand_user(filepath_or_buffer) @@ -516,7 +524,19 @@ def get_handle( return f, handles -class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore +# error: Definition of "__exit__" in base class "ZipFile" is incompatible with +# definition in base class "BytesIO" [misc] +# error: Definition of "__enter__" in base class "ZipFile" is incompatible with +# definition in base class "BytesIO" [misc] +# error: Definition of "__enter__" in base class "ZipFile" is incompatible with +# definition in base class "BinaryIO" [misc] +# error: Definition of "__enter__" in base class "ZipFile" is incompatible with +# definition in base class "IO" [misc] +# error: Definition of "read" in base class "ZipFile" is incompatible with +# definition in base class "BytesIO" [misc] +# error: Definition of "read" in base class "ZipFile" is incompatible with +# definition in base class "IO" [misc] +class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc] """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 13f0ab1e8a52c..c89189f1e679a 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -86,8 +86,9 @@ def _get_columns_formatted_values(self) -> Iterable: return self.columns # https://github.com/python/mypy/issues/1237 + # error: Signature of "is_truncated" incompatible with supertype "TableFormatter" @property - def is_truncated(self) -> bool: # type: ignore + def is_truncated(self) -> bool: # type: ignore[override] return self.fmt.is_truncated @property diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 1cf79dc105901..23daab725ec65 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -499,7 +499,7 @@ def _justify( # error: Incompatible return value type (got "Tuple[List[Sequence[str]], # List[Sequence[str]]]", expected "Tuple[List[Tuple[str, ...]], # List[Tuple[str, ...]]]") - return head, tail # type: ignore + return head, tail # type: ignore[return-value] def format_object_attrs( @@ -524,14 +524,16 @@ def format_object_attrs( attrs: List[Tuple[str, Union[str, int]]] = [] if hasattr(obj, "dtype") and include_dtype: # error: "Sequence[Any]" has no attribute "dtype" - attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore + attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore[attr-defined] if getattr(obj, "name", None) is not None: # error: "Sequence[Any]" has no attribute "name" - attrs.append(("name", default_pprint(obj.name))) # type: ignore + attrs.append(("name", default_pprint(obj.name))) # type: ignore[attr-defined] # error: "Sequence[Any]" has no attribute "names" - elif getattr(obj, "names", None) is not None and any(obj.names): # type: ignore + elif getattr(obj, "names", None) is not None and any( + obj.names # type: ignore[attr-defined] + ): # error: "Sequence[Any]" has no attribute "names" - attrs.append(("names", default_pprint(obj.names))) # type: ignore + attrs.append(("names", default_pprint(obj.names))) # type: ignore[attr-defined] max_seq_items = get_option("display.max_seq_items") or len(obj) if len(obj) > max_seq_items: attrs.append(("length", len(obj))) diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index ff37c36962aec..0b06a26d4aa3c 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -115,7 +115,8 @@ def __init__( self.obj = obj if orient is None: - orient = self._default_orient # type: ignore + # error: "Writer" has no attribute "_default_orient" + orient = self._default_orient # type: ignore[attr-defined] self.orient = orient self.date_format = date_format diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index e0df4c29e543e..9f5b6041b0ffa 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2280,7 +2280,8 @@ def _get_atom(cls, values: ArrayLike) -> "Col": Get an appropriately typed and shaped pytables.Col object for values. """ dtype = values.dtype - itemsize = dtype.itemsize # type: ignore + # error: "ExtensionDtype" has no attribute "itemsize" + itemsize = dtype.itemsize # type: ignore[attr-defined] shape = values.shape if values.ndim == 1: @@ -3349,9 +3350,9 @@ def queryables(self) -> Dict[str, Any]: (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns) ] - return dict(d1 + d2 + d3) # type: ignore - # error: List comprehension has incompatible type - # List[Tuple[Any, None]]; expected List[Tuple[str, IndexCol]] + # error: Unsupported operand types for + ("List[Tuple[str, IndexCol]]" + # and "List[Tuple[str, None]]") + return dict(d1 + d2 + d3) # type: ignore[operator] def index_cols(self): """ return a list of my index cols """ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7677d8a94d521..c12ddb6b8bd4a 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1953,7 +1953,10 @@ def _open_file_binary_write( """ if hasattr(fname, "write"): # See https://github.com/python/mypy/issues/1424 for hasattr challenges - return fname, False, None # type: ignore + # error: Incompatible return value type (got "Tuple[Union[str, Path, + # IO[Any]], bool, None]", expected "Tuple[BinaryIO, bool, Union[str, + # Mapping[str, str], None]]") + return fname, False, None # type: ignore[return-value] elif isinstance(fname, (str, Path)): # Extract compression mode as given, if dict compression_typ, compression_args = get_compression_method(compression) diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 95f9fbf3995ed..eef4276f0ed09 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -45,7 +45,10 @@ def _maybe_resample(series: "Series", ax, kwargs): if ax_freq is not None and freq != ax_freq: if is_superperiod(freq, ax_freq): # upsample input series = series.copy() - series.index = series.index.asfreq(ax_freq, how="s") # type: ignore + # error: "Index" has no attribute "asfreq" + series.index = series.index.asfreq( # type: ignore[attr-defined] + ax_freq, how="s" + ) freq = ax_freq elif _is_sup(freq, ax_freq): # one is weekly how = kwargs.pop("how", "last") @@ -222,7 +225,8 @@ def _get_index_freq(index: "Index") -> Optional[BaseOffset]: if freq is None: freq = getattr(index, "inferred_freq", None) if freq == "B": - weekdays = np.unique(index.dayofweek) # type: ignore + # error: "Index" has no attribute "dayofweek" + weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined] if (5 in weekdays) or (6 in weekdays): freq = None diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index eaf48421dc071..9937726e88aaa 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1511,23 +1511,24 @@ def test_slice_locs_na_raises(self): @pytest.mark.parametrize( "in_slice,expected", [ + # error: Slice index must be an integer or None (pd.IndexSlice[::-1], "yxdcb"), - (pd.IndexSlice["b":"y":-1], ""), # type: ignore - (pd.IndexSlice["b"::-1], "b"), # type: ignore - (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore - (pd.IndexSlice[:"y":-1], "y"), # type: ignore - (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore - (pd.IndexSlice["y"::-4], "yb"), # type: ignore + (pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc] + (pd.IndexSlice["b"::-1], "b"), # type: ignore[misc] + (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc] + (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc] # absent labels - (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore - (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore - (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore - (pd.IndexSlice["z"::-3], "yc"), # type: ignore - (pd.IndexSlice["m"::-1], "dcb"), # type: ignore - (pd.IndexSlice[:"m":-1], "yx"), # type: ignore - (pd.IndexSlice["a":"a":-1], ""), # type: ignore - (pd.IndexSlice["z":"z":-1], ""), # type: ignore - (pd.IndexSlice["m":"m":-1], ""), # type: ignore + (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc] + (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc] + (pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc] + (pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc] + (pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc] + (pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc] + (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] ], ) def test_slice_locs_negative_step(self, in_slice, expected): diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index df014171be817..0942c79837e7c 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -1751,9 +1751,9 @@ def col(t, column): # try to index a col which isn't a data_column msg = ( - f"column string2 is not a data_column.\n" - f"In order to read column string2 you must reload the dataframe \n" - f"into HDFStore and include string2 with the data_columns argument." + "column string2 is not a data_column.\n" + "In order to read column string2 you must reload the dataframe \n" + "into HDFStore and include string2 with the data_columns argument." ) with pytest.raises(AttributeError, match=msg): store.create_table_index("f", columns=["string2"]) diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index d64e2d1933ace..a0723452ccb70 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -15,7 +15,8 @@ ) # the ignore on the following line accounts for to_csv returning Optional(str) # in general, but always str in the case we give no filename -text = df1.to_csv(index=False).encode() # type: ignore +# error: Item "None" of "Optional[str]" has no attribute "encode" +text = df1.to_csv(index=False).encode() # type: ignore[union-attr] @pytest.fixture diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 6135ccba1573d..f81bca7e85156 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -323,7 +323,8 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: sig = inspect.Signature(params) # https://github.com/python/typing/issues/598 - func.__signature__ = sig # type: ignore + # error: "F" has no attribute "__signature__" + func.__signature__ = sig # type: ignore[attr-defined] return cast(F, wrapper) return decorate @@ -357,8 +358,12 @@ def decorator(decorated: F) -> F: for docstring in docstrings: if hasattr(docstring, "_docstring_components"): + # error: Item "str" of "Union[str, Callable[..., Any]]" has no + # attribute "_docstring_components" [union-attr] + # error: Item "function" of "Union[str, Callable[..., Any]]" + # has no attribute "_docstring_components" [union-attr] docstring_components.extend( - docstring._docstring_components # type: ignore + docstring._docstring_components # type: ignore[union-attr] ) elif isinstance(docstring, str) or docstring.__doc__: docstring_components.append(docstring) @@ -373,7 +378,10 @@ def decorator(decorated: F) -> F: ] ) - decorated._docstring_components = docstring_components # type: ignore + # error: "F" has no attribute "_docstring_components" + decorated._docstring_components = ( # type: ignore[attr-defined] + docstring_components + ) return decorated return decorator diff --git a/requirements-dev.txt b/requirements-dev.txt index c0dd77cd73ddc..6a87b0a99a4f8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -73,4 +73,5 @@ cftime pyreadstat tabulate>=0.8.3 git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master -git+https://github.com/numpy/numpydoc \ No newline at end of file +git+https://github.com/numpy/numpydoc +pyflakes>=2.2.0 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index ee5725e36d193..84c281b756395 100644 --- a/setup.cfg +++ b/setup.cfg @@ -122,6 +122,7 @@ check_untyped_defs=True strict_equality=True warn_redundant_casts = True warn_unused_ignores = True +show_error_codes = True [mypy-pandas.tests.*] check_untyped_defs=False