Skip to content

Commit b435786

Browse files
committed
DOC clean up assorted type specifications
With thanks to the demo of numpy/numpydoc#150
1 parent 6fdcb3b commit b435786

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+75
-104
lines changed

sklearn/cluster/k_means_.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
5555
x_squared_norms : array, shape (n_samples,)
5656
Squared Euclidean norm of each data point.
5757
58-
random_state : numpy.RandomState
58+
random_state : RandomState
5959
The generator used to initialize the centers.
6060
6161
n_local_trials : integer, optional
@@ -1495,7 +1495,7 @@ def _labels_inertia_minibatch(self, X):
14951495
14961496
Returns
14971497
-------
1498-
labels : array, shap (n_samples,)
1498+
labels : array, shape (n_samples,)
14991499
Cluster labels for each point.
15001500
15011501
inertia : float

sklearn/covariance/empirical_covariance_.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def empirical_covariance(X, assume_centered=False):
5555
X : ndarray, shape (n_samples, n_features)
5656
Data from which to compute the covariance estimate
5757
58-
assume_centered : Boolean
58+
assume_centered : boolean
5959
If True, data are not centered before computation.
6060
Useful when working with data whose mean is almost, but not exactly
6161
zero.
@@ -142,7 +142,7 @@ def get_precision(self):
142142
143143
Returns
144144
-------
145-
precision_ : array-like,
145+
precision_ : array-like
146146
The precision matrix associated to the current covariance object.
147147
148148
"""
@@ -162,12 +162,12 @@ def fit(self, X, y=None):
162162
Training data, where n_samples is the number of samples and
163163
n_features is the number of features.
164164
165-
y : not used, present for API consistence purpose.
165+
y
166+
not used, present for API consistence purpose.
166167
167168
Returns
168169
-------
169170
self : object
170-
Returns self.
171171
172172
"""
173173
X = check_array(X)
@@ -193,7 +193,8 @@ def score(self, X_test, y=None):
193193
X_test is assumed to be drawn from the same distribution than
194194
the data used in fit (including centering).
195195
196-
y : not used, present for API consistence purpose.
196+
y
197+
not used, present for API consistence purpose.
197198
198199
Returns
199200
-------

sklearn/covariance/graph_lasso_.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -519,7 +519,7 @@ class GraphLassoCV(GraphLasso):
519519
If verbose is True, the objective function and duality gap are
520520
printed at each iteration.
521521
522-
assume_centered : Boolean
522+
assume_centered : boolean
523523
If True, data are not centered before computation.
524524
Useful when working with data whose mean is almost, but not exactly
525525
zero.

sklearn/covariance/robust_covariance.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ class MinCovDet(EmpiricalCovariance):
523523
store_precision : bool
524524
Specify if the estimated precision is stored.
525525
526-
assume_centered : Boolean
526+
assume_centered : bool
527527
If True, the support of the robust location and the covariance
528528
estimates is computed, and a covariance estimate is recomputed from
529529
it, without centering the data.
@@ -606,12 +606,12 @@ def fit(self, X, y=None):
606606
Training data, where n_samples is the number of samples
607607
and n_features is the number of features.
608608
609-
y : not used, present for API consistence purpose.
609+
y
610+
not used, present for API consistence purpose.
610611
611612
Returns
612613
-------
613614
self : object
614-
Returns self.
615615
616616
"""
617617
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')

sklearn/covariance/shrunk_covariance_.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -122,12 +122,12 @@ def fit(self, X, y=None):
122122
Training data, where n_samples is the number of samples
123123
and n_features is the number of features.
124124
125-
y : not used, present for API consistence purpose.
125+
y
126+
not used, present for API consistence purpose.
126127
127128
Returns
128129
-------
129130
self : object
130-
Returns self.
131131
132132
"""
133133
X = check_array(X)
@@ -157,7 +157,7 @@ def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
157157
X : array-like, shape (n_samples, n_features)
158158
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
159159
160-
assume_centered : Boolean
160+
assume_centered : bool
161161
If True, data are not centered before computation.
162162
Useful to work with data whose mean is significantly equal to
163163
zero but is not exactly zero.
@@ -380,12 +380,12 @@ def fit(self, X, y=None):
380380
X : array-like, shape = [n_samples, n_features]
381381
Training data, where n_samples is the number of samples
382382
and n_features is the number of features.
383-
y : not used, present for API consistence purpose.
383+
y
384+
not used, present for API consistence purpose.
384385
385386
Returns
386387
-------
387388
self : object
388-
Returns self.
389389
390390
"""
391391
# Not calling the parent object to fit, to avoid computing the
@@ -537,12 +537,12 @@ def fit(self, X, y=None):
537537
X : array-like, shape = [n_samples, n_features]
538538
Training data, where n_samples is the number of samples
539539
and n_features is the number of features.
540-
y : not used, present for API consistence purpose.
540+
y
541+
not used, present for API consistence purpose.
541542
542543
Returns
543544
-------
544545
self : object
545-
Returns self.
546546
547547
"""
548548
X = check_array(X)

sklearn/cross_decomposition/pls_.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,8 @@ class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
153153
The algorithm used to estimate the weights. It will be called
154154
n_components times, i.e. once for each iteration of the outer loop.
155155
156-
max_iter : an integer, the maximum number of iterations (default 500)
156+
max_iter : int (default 500)
157+
The maximum number of iterations
157158
of the NIPALS inner loop (used only if algorithm="nipals")
158159
159160
tol : non-negative real, default 1e-06

sklearn/datasets/base.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -212,8 +212,9 @@ def load_data(module_path, data_file_name):
212212
213213
Parameters
214214
----------
215-
data_file_name : String. Name of csv file to be loaded from
216-
module_path/data/data_file_name. For example 'wine_data.csv'.
215+
data_file_name : String
216+
Name of csv file to be loaded from
217+
module_path/data/data_file_name. For example 'wine_data.csv'.
217218
218219
Returns
219220
-------

sklearn/datasets/mlcomp.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,14 +29,17 @@ def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
2929
Parameters
3030
----------
3131
32-
name_or_id : the integer id or the string name metadata of the MLComp
33-
dataset to load
32+
name_or_id : int or str
33+
The integer id or the string name metadata of the MLComp
34+
dataset to load
3435
35-
set_ : select the portion to load: 'train', 'test' or 'raw'
36+
set_ : str, default='raw'
37+
Select the portion to load: 'train', 'test' or 'raw'
3638
37-
mlcomp_root : the filesystem path to the root folder where MLComp datasets
38-
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
39-
environment variable is looked up instead.
39+
mlcomp_root : str, optional
40+
The filesystem path to the root folder where MLComp datasets
41+
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
42+
environment variable is looked up instead.
4043
4144
**kwargs : domain specific kwargs to be passed to the dataset loader.
4245

sklearn/dummy.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ def fit(self, X, y, sample_weight=None):
104104
Returns
105105
-------
106106
self : object
107-
Returns self.
108107
"""
109108
if self.strategy not in ("most_frequent", "stratified", "uniform",
110109
"constant", "prior"):
@@ -386,7 +385,6 @@ def fit(self, X, y, sample_weight=None):
386385
Returns
387386
-------
388387
self : object
389-
Returns self.
390388
"""
391389
if self.strategy not in ("mean", "median", "quantile", "constant"):
392390
raise ValueError("Unknown strategy type: %s, expected "

sklearn/ensemble/_gradient_boosting.pyx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -376,7 +376,7 @@ def _random_sample_mask(np.npy_intp n_total_samples,
376376
n_total_in_bag : int
377377
The number of elements in the sample mask which are set to 1.
378378
379-
random_state : np.RandomState
379+
random_state : RandomState
380380
A numpy ``RandomState`` object.
381381
382382
Returns

sklearn/ensemble/bagging.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,6 @@ def fit(self, X, y, sample_weight=None):
242242
Returns
243243
-------
244244
self : object
245-
Returns self.
246245
"""
247246
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
248247

@@ -275,7 +274,6 @@ def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
275274
Returns
276275
-------
277276
self : object
278-
Returns self.
279277
"""
280278
random_state = check_random_state(self.random_state)
281279

sklearn/ensemble/forest.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,6 @@ def fit(self, X, y, sample_weight=None):
241241
Returns
242242
-------
243243
self : object
244-
Returns self.
245244
"""
246245
# Validate or convert input data
247246
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
@@ -1895,7 +1894,6 @@ def fit(self, X, y=None, sample_weight=None):
18951894
Returns
18961895
-------
18971896
self : object
1898-
Returns self.
18991897
19001898
"""
19011899
self.fit_transform(X, y, sample_weight=sample_weight)

sklearn/ensemble/gradient_boosting.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -987,7 +987,6 @@ def fit(self, X, y, sample_weight=None, monitor=None):
987987
Returns
988988
-------
989989
self : object
990-
Returns self.
991990
"""
992991
# if not warmstart - clear the estimator state
993992
if not self.warm_start:
@@ -1412,7 +1411,7 @@ class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
14121411
14131412
.. versionadded:: 0.19
14141413
1415-
init : BaseEstimator, None, optional (default=None)
1414+
init : estimator, optional
14161415
An estimator object that is used to compute the initial
14171416
predictions. ``init`` has to provide ``fit`` and ``predict``.
14181417
If None it uses ``loss.init_estimator``.
@@ -1493,7 +1492,7 @@ class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
14931492
loss_ : LossFunction
14941493
The concrete ``LossFunction`` object.
14951494
1496-
init_ : BaseEstimator
1495+
init_ : estimator
14971496
The estimator that provides the initial predictions.
14981497
Set via the ``init`` argument or ``loss.init_estimator``.
14991498
@@ -1870,7 +1869,7 @@ class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
18701869
The alpha-quantile of the huber loss function and the quantile
18711870
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
18721871
1873-
init : BaseEstimator, None, optional (default=None)
1872+
init : estimator, optional (default=None)
18741873
An estimator object that is used to compute the initial
18751874
predictions. ``init`` has to provide ``fit`` and ``predict``.
18761875
If None it uses ``loss.init_estimator``.
@@ -1945,7 +1944,7 @@ class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
19451944
loss_ : LossFunction
19461945
The concrete ``LossFunction`` object.
19471946
1948-
init_ : BaseEstimator
1947+
init_ : estimator
19491948
The estimator that provides the initial predictions.
19501949
Set via the ``init`` argument or ``loss.init_estimator``.
19511950

sklearn/ensemble/iforest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,6 @@ def fit(self, X, y=None, sample_weight=None):
157157
Returns
158158
-------
159159
self : object
160-
Returns self.
161160
"""
162161
X = check_array(X, accept_sparse=['csc'])
163162
if issparse(X):

sklearn/ensemble/voting_classifier.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ def set_params(self, **params):
316316
317317
Parameters
318318
----------
319-
params: keyword arguments
319+
params : keyword arguments
320320
Specific parameters using e.g. set_params(parameter_name=new_value)
321321
In addition, to setting the parameters of the ``VotingClassifier``,
322322
the individual classifiers of the ``VotingClassifier`` can also be

sklearn/ensemble/weight_boosting.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ def fit(self, X, y, sample_weight=None):
9393
Returns
9494
-------
9595
self : object
96-
Returns self.
9796
"""
9897
# Check parameters
9998
if self.learning_rate <= 0:
@@ -188,7 +187,7 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
188187
sample_weight : array-like of shape = [n_samples]
189188
The current sample weights.
190189
191-
random_state : numpy.RandomState
190+
random_state : RandomState
192191
The current random number generator
193192
194193
Returns
@@ -403,7 +402,6 @@ def fit(self, X, y, sample_weight=None):
403402
Returns
404403
-------
405404
self : object
406-
Returns self.
407405
"""
408406
# Check that algorithm is supported
409407
if self.algorithm not in ('SAMME', 'SAMME.R'):
@@ -452,7 +450,7 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
452450
sample_weight : array-like of shape = [n_samples]
453451
The current sample weights.
454452
455-
random_state : numpy.RandomState
453+
random_state : RandomState
456454
The current random number generator
457455
458456
Returns
@@ -949,7 +947,6 @@ def fit(self, X, y, sample_weight=None):
949947
Returns
950948
-------
951949
self : object
952-
Returns self.
953950
"""
954951
# Check loss
955952
if self.loss not in ('linear', 'square', 'exponential'):
@@ -986,7 +983,7 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
986983
sample_weight : array-like of shape = [n_samples]
987984
The current sample weights.
988985
989-
random_state : numpy.RandomState
986+
random_state : RandomState
990987
The current random number generator
991988
992989
Returns

sklearn/feature_selection/from_model.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,6 @@ def fit(self, X, y=None, **fit_params):
160160
Returns
161161
-------
162162
self : object
163-
Returns self.
164163
"""
165164
if self.prefit:
166165
raise NotFittedError(
@@ -192,7 +191,6 @@ def partial_fit(self, X, y=None, **fit_params):
192191
Returns
193192
-------
194193
self : object
195-
Returns self.
196194
"""
197195
if self.prefit:
198196
raise NotFittedError(

sklearn/feature_selection/univariate_selection.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,6 @@ def fit(self, X, y):
336336
Returns
337337
-------
338338
self : object
339-
Returns self.
340339
"""
341340
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
342341

0 commit comments

Comments
 (0)