diff --git a/pandas/core/base.py b/pandas/core/base.py index a3d3c3791e20c..a25651a73f507 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -11,7 +11,7 @@ import pandas.lib as lib from pandas.util.decorators import Appender, cache_readonly from pandas.core.strings import StringMethods - +from pandas.core.common import AbstractMethodError _shared_docs = dict() _indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='', @@ -32,7 +32,7 @@ class StringMixin(object): # Formatting def __unicode__(self): - raise NotImplementedError + raise AbstractMethodError(self) def __str__(self): """ @@ -566,4 +566,4 @@ def duplicated(self, take_last=False): # abstracts def _update_inplace(self, result, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index b79f2c9b4f6df..0d66a89b0a585 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1166,7 +1166,8 @@ def fillna(self, fill_value=None, method=None, limit=None): if fill_value is None: fill_value = np.nan if limit is not None: - raise NotImplementedError + raise NotImplementedError("specifying a limit for fillna has not " + "been implemented yet") values = self._codes diff --git a/pandas/core/common.py b/pandas/core/common.py index 0fb35c2fb02fc..7c449ff9ef561 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -39,6 +39,17 @@ class AmbiguousIndexError(PandasError, KeyError): pass +class AbstractMethodError(NotImplementedError): + """Raise this error instead of NotImplementedError for abstract methods + while keeping compatibility with Python 2 and Python 3. + """ + def __init__(self, class_instance): + self.class_instance = class_instance + + def __str__(self): + return "This method must be defined on the concrete class of " \ + + self.class_instance.__class__.__name__ + _POSSIBLY_CAST_DTYPES = set([np.dtype(t).name for t in ['O', 'int8', 'uint8', 'int16', 'uint16', 'int32', diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bc65f1f62fa1a..a99df54650246 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -21,7 +21,8 @@ from pandas.core.common import (isnull, notnull, is_list_like, _values_from_object, _maybe_promote, _maybe_box_datetimelike, ABCSeries, - SettingWithCopyError, SettingWithCopyWarning) + SettingWithCopyError, SettingWithCopyWarning, + AbstractMethodError) import pandas.core.nanops as nanops from pandas.util.decorators import Appender, Substitution, deprecate_kwarg from pandas.core import config @@ -137,7 +138,7 @@ def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): @property def _constructor(self): - raise NotImplementedError + raise AbstractMethodError(self) def __unicode__(self): # unicode representation based upon iterating over self @@ -152,7 +153,7 @@ def _local_dir(self): @property def _constructor_sliced(self): - raise NotImplementedError + raise AbstractMethodError(self) #---------------------------------------------------------------------- # Axis @@ -1100,7 +1101,7 @@ def _iget_item_cache(self, item): return lower def _box_item_values(self, key, values): - raise NotImplementedError + raise AbstractMethodError(self) def _maybe_cache_changed(self, item, value): """ @@ -3057,7 +3058,8 @@ def first(self, offset): """ from pandas.tseries.frequencies import to_offset if not isinstance(self.index, DatetimeIndex): - raise NotImplementedError + raise NotImplementedError("'first' only supports a DatetimeIndex " + "index") if len(self.index) == 0: return self @@ -3091,7 +3093,8 @@ def last(self, offset): """ from pandas.tseries.frequencies import to_offset if not isinstance(self.index, DatetimeIndex): - raise NotImplementedError + raise NotImplementedError("'last' only supports a DatetimeIndex " + "index") if len(self.index) == 0: return self diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 6d98b3b99021b..c80cc21e79e67 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -25,7 +25,8 @@ notnull, _DATELIKE_DTYPES, is_numeric_dtype, is_timedelta64_dtype, is_datetime64_dtype, is_categorical_dtype, _values_from_object, - is_datetime_or_timedelta_dtype, is_bool_dtype) + is_datetime_or_timedelta_dtype, is_bool_dtype, + AbstractMethodError) from pandas.core.config import option_context import pandas.lib as lib from pandas.lib import Timestamp @@ -279,7 +280,7 @@ def _set_grouper(self, obj, sort=False): return self.grouper def _get_binner_for_grouping(self, obj): - raise NotImplementedError + raise AbstractMethodError(self) @property def groups(self): @@ -670,7 +671,7 @@ def _python_apply_general(self, f): not_indexed_same=mutated) def aggregate(self, func, *args, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) @Appender(_agg_doc) def agg(self, func, *args, **kwargs): @@ -680,7 +681,7 @@ def _iterate_slices(self): yield self.name, self._selected_obj def transform(self, func, *args, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) def mean(self): """ @@ -1127,7 +1128,7 @@ def _python_agg_general(self, func, *args, **kwargs): return self._wrap_aggregated_output(output) def _wrap_applied_output(self, *args, **kwargs): - raise NotImplementedError + raise AbstractMethodError(self) def _concat_objects(self, keys, values, not_indexed_same=False): from pandas.tools.merge import concat @@ -1484,7 +1485,8 @@ def aggregate(self, values, how, axis=0): swapped = True values = values.swapaxes(0, axis) if arity > 1: - raise NotImplementedError + raise NotImplementedError("arity of more than 1 is not " + "supported for the 'how' argument") out_shape = (self.ngroups,) + values.shape[1:] is_numeric = is_numeric_dtype(values.dtype) @@ -1556,7 +1558,8 @@ def _aggregate(self, result, counts, values, agg_func, is_numeric): comp_ids, _, ngroups = self.group_info if values.ndim > 3: # punting for now - raise NotImplementedError + raise NotImplementedError("number of dimensions is currently " + "limited to 3") elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): @@ -1815,7 +1818,8 @@ def _aggregate(self, result, counts, values, agg_func, is_numeric=True): if values.ndim > 3: # punting for now - raise NotImplementedError + raise NotImplementedError("number of dimensions is currently " + "limited to 3") elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): agg_func(result[:, :, i], counts, chunk, self.bins) @@ -2622,7 +2626,8 @@ def aggregate(self, arg, *args, **kwargs): if self._selection is not None: subset = obj if isinstance(subset, DataFrame): - raise NotImplementedError + raise NotImplementedError("Aggregating on a DataFrame is " + "not supported") for fname, agg_how in compat.iteritems(arg): colg = SeriesGroupBy(subset, selection=self._selection, @@ -2671,7 +2676,7 @@ def _aggregate_multiple_funcs(self, arg): from pandas.tools.merge import concat if self.axis != 0: - raise NotImplementedError + raise NotImplementedError("axis other than 0 is not supported") obj = self._obj_with_exclusions @@ -2721,7 +2726,7 @@ def _aggregate_generic(self, func, *args, **kwargs): return self._wrap_generic_output(result, obj) def _wrap_aggregated_output(self, output, names=None): - raise NotImplementedError + raise AbstractMethodError(self) def _aggregate_item_by_item(self, func, *args, **kwargs): # only for axis==0 @@ -3283,7 +3288,7 @@ def _iterate_slices(self): slice_axis = self._selection_list slicer = lambda x: self._selected_obj[x] else: - raise NotImplementedError + raise NotImplementedError("axis other than 0 is not supported") for val in slice_axis: if val in self.exclusions: @@ -3348,10 +3353,10 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): new_axes[self.axis] = self.grouper.result_index return Panel._from_axes(result, new_axes) else: - raise NotImplementedError + raise ValueError("axis value must be greater than 0") def _wrap_aggregated_output(self, output, names=None): - raise NotImplementedError + raise AbstractMethodError(self) class NDArrayGroupBy(GroupBy): @@ -3405,7 +3410,7 @@ def _chop(self, sdata, slice_obj): return sdata.iloc[slice_obj] def apply(self, f): - raise NotImplementedError + raise AbstractMethodError(self) class ArraySplitter(DataSplitter): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 9b2d366bfb2be..269f692f8c4b7 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -294,7 +294,8 @@ def fillna(self, value, limit=None, inplace=False, downcast=None): mask = isnull(self.values) if limit is not None: if self.ndim > 2: - raise NotImplementedError + raise NotImplementedError("number of dimensions for 'fillna' " + "is currently limited to 2") mask[mask.cumsum(self.ndim-1)>limit]=False value = self._try_fill(value) @@ -1681,7 +1682,8 @@ def _slice(self, slicer): def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype if limit is not None: - raise NotImplementedError + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") values = self.values if inplace else self.values.copy() return [self.make_block_same_class(values=values.fillna(fill_value=value, @@ -1848,7 +1850,8 @@ def fillna(self, value, limit=None, value = self._try_fill(value) if limit is not None: if self.ndim > 2: - raise NotImplementedError + raise NotImplementedError("number of dimensions for 'fillna' " + "is currently limited to 2") mask[mask.cumsum(self.ndim-1)>limit]=False np.putmask(values, mask, value) @@ -2011,7 +2014,8 @@ def interpolate(self, method='pad', axis=0, inplace=False, def fillna(self, value, limit=None, inplace=False, downcast=None): # we may need to upcast our fill to match our dtype if limit is not None: - raise NotImplementedError + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") if issubclass(self.dtype.type, np.floating): value = float(value) values = self.values if inplace else self.values.copy() diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py index ec0a313ff5767..d021cb2d59ecf 100644 --- a/pandas/core/panelnd.py +++ b/pandas/core/panelnd.py @@ -99,7 +99,7 @@ def _combine_with_constructor(self, other, func): for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter', 'dropna', 'shift']: def func(self, *args, **kwargs): - raise NotImplementedError + raise NotImplementedError("this operation is not supported") setattr(klass, f, func) # add the aggregate operations diff --git a/pandas/core/series.py b/pandas/core/series.py index b71c269468d62..f9c56db018639 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -140,7 +140,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, dtype = self._validate_dtype(dtype) if isinstance(data, MultiIndex): - raise NotImplementedError + raise NotImplementedError("initializing a Series from a " + "MultiIndex is not supported") elif isinstance(data, Index): # need to copy to avoid aliasing issues if name is None: diff --git a/pandas/io/html.py b/pandas/io/html.py index 9f5c10ce128d2..b806b5147c4a5 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -19,6 +19,7 @@ raise_with_traceback, binary_type) from pandas.core import common as com from pandas import Series +from pandas.core.common import AbstractMethodError _IMPORTS = False _HAS_BS4 = False @@ -229,7 +230,7 @@ def _text_getter(self, obj): text : str or unicode The text from an individual DOM node. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_td(self, obj): """Return the td elements from a row element. @@ -243,7 +244,7 @@ def _parse_td(self, obj): columns : list of node-like These are the elements of each row, i.e., the columns. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tables(self, doc, match, attrs): """Return all tables from the parsed DOM. @@ -270,7 +271,7 @@ def _parse_tables(self, doc, match, attrs): tables : list of node-like A list of elements to be parsed into raw data. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tr(self, table): """Return the list of row elements from the parsed table element. @@ -285,7 +286,7 @@ def _parse_tr(self, table): rows : list of node-like A list row elements of a table, usually or ... element. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tbody(self, table): """Return the body of the table. @@ -315,7 +316,7 @@ def _parse_tbody(self, table): tbody : node-like A ... element. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_tfoot(self, table): """Return the footer of the table if any. @@ -330,7 +331,7 @@ def _parse_tfoot(self, table): tfoot : node-like A ... element. """ - raise NotImplementedError + raise AbstractMethodError(self) def _build_doc(self): """Return a tree-like object that can be used to iterate over the DOM. @@ -339,7 +340,7 @@ def _build_doc(self): ------- obj : tree-like """ - raise NotImplementedError + raise AbstractMethodError(self) def _build_table(self, table): header = self._parse_raw_thead(table) diff --git a/pandas/io/json.py b/pandas/io/json.py index 9e8ef74545ef2..0659e34c3f27b 100644 --- a/pandas/io/json.py +++ b/pandas/io/json.py @@ -11,6 +11,7 @@ from pandas import compat, isnull from pandas import Series, DataFrame, to_datetime from pandas.io.common import get_filepath_or_buffer +from pandas.core.common import AbstractMethodError import pandas.core.common as com loads = _json.loads @@ -33,7 +34,7 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch', double_precision=double_precision, ensure_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler).write() else: - raise NotImplementedError + raise NotImplementedError("'obj' should be a Series or a DataFrame") if isinstance(path_or_buf, compat.string_types): with open(path_or_buf, 'w') as fh: @@ -64,7 +65,7 @@ def __init__(self, obj, orient, date_format, double_precision, self._format_axes() def _format_axes(self): - raise NotImplementedError + raise AbstractMethodError(self) def write(self): return dumps( @@ -282,7 +283,7 @@ def _convert_axes(self): setattr(self.obj, axis, new_axis) def _try_convert_types(self): - raise NotImplementedError + raise AbstractMethodError(self) def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): @@ -395,7 +396,7 @@ def _try_convert_to_date(self, data): return data, False def _try_convert_dates(self): - raise NotImplementedError + raise AbstractMethodError(self) class SeriesParser(Parser): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 786d308c6770f..fef02dcb6e0c5 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -14,6 +14,7 @@ from pandas.core.frame import DataFrame import datetime import pandas.core.common as com +from pandas.core.common import AbstractMethodError from pandas.core.config import get_option from pandas.io.date_converters import generic_parser from pandas.io.common import get_filepath_or_buffer @@ -707,7 +708,7 @@ def _make_engine(self, engine='c'): self._engine = klass(self.f, **self.options) def _failover_to_python(self): - raise NotImplementedError + raise AbstractMethodError(self) def read(self, nrows=None): if nrows is not None: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 117d7b4a9ceaa..1dda736be3d47 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -531,7 +531,8 @@ def to_sql(frame, name, con, flavor='sqlite', schema=None, if_exists='fail', if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): - raise NotImplementedError + raise NotImplementedError("'frame' argument should be either a " + "Series or a DataFrame") pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, @@ -1434,7 +1435,8 @@ def __init__(self, con, flavor, is_cursor=False): if flavor is None: flavor = 'sqlite' if flavor not in ['sqlite', 'mysql']: - raise NotImplementedError + raise NotImplementedError("flavors other than SQLite and MySQL " + "are not supported") else: self.flavor = flavor diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index bc022fcb6542b..83278fe12d641 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -418,7 +418,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): new_index, new_columns = this.index, this.columns if level is not None: - raise NotImplementedError + raise NotImplementedError("'level' argument is not supported") if self.empty and other.empty: return SparseDataFrame(index=new_index).__finalize__(self) @@ -459,9 +459,9 @@ def _combine_match_index(self, other, func, level=None, fill_value=None): new_data = {} if fill_value is not None: - raise NotImplementedError + raise NotImplementedError("'fill_value' argument is not supported") if level is not None: - raise NotImplementedError + raise NotImplementedError("'level' argument is not supported") new_index = self.index.union(other.index) this = self @@ -494,9 +494,9 @@ def _combine_match_columns(self, other, func, level=None, fill_value=None): # possible for this to happen, which is bothersome if fill_value is not None: - raise NotImplementedError + raise NotImplementedError("'fill_value' argument is not supported") if level is not None: - raise NotImplementedError + raise NotImplementedError("'level' argument is not supported") new_data = {} @@ -567,10 +567,10 @@ def _reindex_columns(self, columns, copy, level, fill_value, limit=None, raise TypeError('Reindex by level not supported for sparse') if com.notnull(fill_value): - raise NotImplementedError + raise NotImplementedError("'fill_value' argument is not supported") if limit: - raise NotImplementedError + raise NotImplementedError("'limit' argument is not supported") # TODO: fill value handling sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns) diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index d3f3f59f264c5..34256acfb0e60 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -32,7 +32,7 @@ def __set__(self, obj, value): value = _ensure_index(value) if isinstance(value, MultiIndex): - raise NotImplementedError + raise NotImplementedError("value cannot be a MultiIndex") for v in compat.itervalues(obj._frames): setattr(v, self.frame_attr, value) @@ -159,7 +159,7 @@ def _get_items(self): def _set_items(self, new_items): new_items = _ensure_index(new_items) if isinstance(new_items, MultiIndex): - raise NotImplementedError + raise NotImplementedError("itemps cannot be a MultiIndex") # need to create new frames dict diff --git a/pandas/src/generate_code.py b/pandas/src/generate_code.py index 575fcf386f570..9d0384857ed81 100644 --- a/pandas/src/generate_code.py +++ b/pandas/src/generate_code.py @@ -1653,7 +1653,8 @@ def group_ohlc_%(name)s(ndarray[%(dest_type2)s, ndim=2] out, b = 0 if K > 1: - raise NotImplementedError + raise NotImplementedError("Argument 'values' must have only " + "one dimension") else: for i in range(N): while b < ngroups - 1 and i >= bins[b]: diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 6a284e547433a..4cd6f6eda26d9 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -12,6 +12,7 @@ from pandas.util.decorators import cache_readonly, deprecate_kwarg import pandas.core.common as com +from pandas.core.common import AbstractMethodError from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex from pandas.core.series import Series, remove_na @@ -131,7 +132,7 @@ def random_color(column): colors = lmap(random_color, lrange(num_colors)) else: - raise NotImplementedError + raise ValueError("color_type must be either 'default' or 'random'") if len(colors) != num_colors: multiple = num_colors//len(colors) - 1 @@ -1017,7 +1018,7 @@ def _compute_plot_data(self): self.data = numeric_data def _make_plot(self): - raise NotImplementedError + raise AbstractMethodError(self) def _add_table(self): if self.table is False: @@ -1821,7 +1822,7 @@ def f(ax, x, y, w, start=None, log=self.log, **kwds): start = start + self.left return ax.barh(x, y, w, left=start, **kwds) else: - raise NotImplementedError + raise ValueError("BarPlot kind must be either 'bar' or 'barh'") return f diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index ed11b12871ce5..2b37c64940170 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -8,7 +8,7 @@ from pandas import compat import numpy as np from pandas.core import common as com -from pandas.core.common import is_integer, is_float +from pandas.core.common import is_integer, is_float, AbstractMethodError import pandas.tslib as tslib import pandas.lib as lib from pandas.core.index import Index @@ -48,7 +48,7 @@ def _box_func(self): """ box function to get object from internal representation """ - raise NotImplementedError + raise AbstractMethodError(self) def _box_values(self, values): """ @@ -261,7 +261,7 @@ def _formatter_func(self): return str def _format_footer(self): - raise NotImplementedError + raise AbstractMethodError(self) def __unicode__(self): formatter = self._formatter_func @@ -314,10 +314,10 @@ def _convert_scalar_indexer(self, key, kind=None): return super(DatetimeIndexOpsMixin, self)._convert_scalar_indexer(key, kind=kind) def _add_datelike(self, other): - raise NotImplementedError + raise AbstractMethodError(self) def _sub_datelike(self, other): - raise NotImplementedError + raise AbstractMethodError(self) @classmethod def _add_datetimelike_methods(cls): diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index ca5119acc8b99..da9214198d774 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1655,14 +1655,15 @@ def indexer_at_time(self, time, asof=False): from dateutil.parser import parse if asof: - raise NotImplementedError + raise NotImplementedError("'asof' argument is not supported") if isinstance(time, compat.string_types): time = parse(time).time() if time.tzinfo: # TODO - raise NotImplementedError + raise NotImplementedError("argument 'time' with timezone info is " + "not supported") time_micros = self._get_time_micros() micros = _time_to_micros(time) @@ -1694,7 +1695,8 @@ def indexer_between_time(self, start_time, end_time, include_start=True, end_time = parse(end_time).time() if start_time.tzinfo or end_time.tzinfo: - raise NotImplementedError + raise NotImplementedError("argument 'time' with timezone info is " + "not supported") time_micros = self._get_time_micros() start_micros = _time_to_micros(start_time) @@ -1773,7 +1775,8 @@ def _generate_regular_range(start, end, periods, offset): b = e - np.int64(periods) * stride tz = end.tz else: - raise NotImplementedError + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) data = DatetimeIndex._simple_new(data, None, tz=tz) diff --git a/pandas/tseries/resample.py b/pandas/tseries/resample.py index 7607bef0f1d71..c18d378168b93 100644 --- a/pandas/tseries/resample.py +++ b/pandas/tseries/resample.py @@ -373,11 +373,11 @@ def _take_new_index(obj, indexer, new_index, axis=0): return Series(new_values, index=new_index, name=obj.name) elif isinstance(obj, DataFrame): if axis == 1: - raise NotImplementedError + raise NotImplementedError("axis 1 is not supported") return DataFrame(obj._data.reindex_indexer( new_axis=new_index, indexer=indexer, axis=1)) else: - raise NotImplementedError + raise ValueError("'obj' should be either a Series or a DataFrame") def _get_range_edges(first, last, offset, closed='left', base=0): @@ -467,7 +467,7 @@ def asfreq(obj, freq, method=None, how=None, normalize=False): """ if isinstance(obj.index, PeriodIndex): if method is not None: - raise NotImplementedError + raise NotImplementedError("'method' argument is not supported") if how is None: how = 'E' diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index e01ff54feab57..0f8ba279ec3a6 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -927,7 +927,8 @@ def _generate_regular_range(start, end, periods, offset): e = Timedelta(end).value + stride b = e - periods * stride else: - raise NotImplementedError + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) data = TimedeltaIndex._simple_new(data, None)
elements. """ - raise NotImplementedError + raise AbstractMethodError(self) def _parse_thead(self, table): """Return the header of a table. @@ -300,7 +301,7 @@ def _parse_thead(self, table): thead : node-like A