|
3 | 3 | from io import StringIO
|
4 | 4 | from itertools import islice
|
5 | 5 | import os
|
6 |
| -from typing import Any, Callable, Optional, Type, Iterator |
| 6 | +from typing import Any, Callable, Iterator, Optional, Type |
7 | 7 |
|
8 | 8 | import numpy as np
|
9 | 9 |
|
@@ -350,20 +350,20 @@ def _write(
|
350 | 350 | )
|
351 | 351 | def read_json(
|
352 | 352 | path_or_buf=None,
|
353 |
| - orient: str = None, |
354 |
| - typ: str = "frame", |
| 353 | + orient=None, |
| 354 | + typ="frame", |
355 | 355 | dtype=None,
|
356 |
| - convert_axes: bool = None, |
| 356 | + convert_axes=None, |
357 | 357 | convert_dates=True,
|
358 | 358 | keep_default_dates: bool = True,
|
359 | 359 | numpy: bool = False,
|
360 | 360 | precise_float: bool = False,
|
361 |
| - date_unit: str = None, |
362 |
| - encoding: str = None, |
| 361 | + date_unit=None, |
| 362 | + encoding=None, |
363 | 363 | lines: bool = False,
|
364 | 364 | chunksize: Optional[int] = None,
|
365 |
| - compression: str = "infer", |
366 |
| - nrows: int = None, |
| 365 | + compression="infer", |
| 366 | + nrows: Optional[int] = None, |
367 | 367 | ):
|
368 | 368 | """
|
369 | 369 | Convert a JSON string to pandas object.
|
@@ -495,20 +495,19 @@ def read_json(
|
495 | 495 | This can only be passed if `lines=True`.
|
496 | 496 | If this is None, the file will be read into memory all at once.
|
497 | 497 |
|
498 |
| - nrows : int, default None |
499 |
| - The number of lines from the line-delimited jsonfile that has to be read. |
500 |
| - This can only be passed if `lines=True`. |
501 |
| - If this is None, all the rows will be returned. |
502 |
| - .. versionadded:: 1.1 |
503 |
| -
|
504 |
| -
|
505 | 498 | compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
|
506 | 499 | For on-the-fly decompression of on-disk data. If 'infer', then use
|
507 | 500 | gzip, bz2, zip or xz if path_or_buf is a string ending in
|
508 | 501 | '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
|
509 | 502 | otherwise. If using 'zip', the ZIP file must contain only one data
|
510 | 503 | file to be read in. Set to None for no decompression.
|
511 | 504 |
|
| 505 | + nrows : int, optional |
| 506 | + The number of lines from the line-delimited jsonfile that has to be read. |
| 507 | + This can only be passed if `lines=True`. |
| 508 | + If this is None, all the rows will be returned. |
| 509 | + .. versionadded:: 1.1 |
| 510 | +
|
512 | 511 | Returns
|
513 | 512 | -------
|
514 | 513 | Series or DataFrame
|
@@ -634,20 +633,20 @@ class JsonReader(abc.Iterator):
|
634 | 633 | def __init__(
|
635 | 634 | self,
|
636 | 635 | filepath_or_buffer,
|
637 |
| - orient: str, |
638 |
| - typ: str, |
| 636 | + orient, |
| 637 | + typ, |
639 | 638 | dtype,
|
640 |
| - convert_axes: bool, |
| 639 | + convert_axes, |
641 | 640 | convert_dates,
|
642 | 641 | keep_default_dates: bool,
|
643 | 642 | numpy: bool,
|
644 | 643 | precise_float: bool,
|
645 |
| - date_unit: str, |
646 |
| - encoding: str, |
| 644 | + date_unit, |
| 645 | + encoding, |
647 | 646 | lines: bool,
|
648 | 647 | chunksize: Optional[int],
|
649 |
| - compression: str, |
650 |
| - nrows: int, |
| 648 | + compression, |
| 649 | + nrows: Optional[int], |
651 | 650 | ):
|
652 | 651 |
|
653 | 652 | self.path_or_buf = filepath_or_buffer
|
|
0 commit comments