
    q-Ph]                       d dl mZ d dlZd dlZd dlmZ d dlmZmZm	Z	 d dl
mZ d dlmZ d dlmZ d dlmZ d dlmZmZmZ d d	lmZ d d
lmZ d dlmZ d dlmZm Z  d dl!m"Z"  ej#        e$          5  d dl%m&Z& d dl%m'Z( d dl%m)Z* ddd           n# 1 swxY w Y   er,d dlm+Z+ d dlm,Z,m-Z-m.Z. d dl/m0Z0m1Z1m2Z2 d dl3m4Z4 d dl5m6Z6 d dl!m7Z7  eddd           eddd          dddd dddddddddddd dddddd!dWdC                        Z8ddddddDdXdFZ9dYdIZ)dZdKZ' eddd           eddd          ddd ddddddddddddd ddddLd[dQ                        Z:dddddd dddddddddd ddddRd\dVZ;dS )]    )annotationsN)Path)IOTYPE_CHECKINGAny)concat)deprecate_renamed_parameter)issue_unstable_warning)is_int_sequenceis_path_or_str_sequencenormalize_filepath)wrap_ldf)
from_arrow)import_optional)parse_row_index_argsprepare_file_arg)!_init_credential_provider_builder)PyLazyFrame)read_parquet_metadata)read_parquet_schema)Literal)	DataFrameDataType	LazyFrame)
FileSourceParallelStrategy
SchemaDict)ScanCastOptions)CredentialProviderFunction)CredentialProviderBuilderrow_count_namerow_index_namez0.20.4)versionrow_count_offsetrow_index_offsetautoTF   )columnsn_rowsr"   r%   paralleluse_statisticshive_partitioningglobschemahive_schematry_parse_hive_datesrechunk
low_memorystorage_optionscredential_providerretriesuse_pyarrowpyarrow_options
memory_mapinclude_file_pathsallow_missing_columnssourcer   r(   list[int] | list[str] | Noner)   
int | None
str | Noneintr*   r   r+   boolr,   bool | Noner-   r.   SchemaDict | Noner/   r0   r1   r2   r3   dict[str, Any] | Noner4   3CredentialProviderFunction | Literal['auto'] | Noner5   r6   r7   r8   r9   r:   returnr   c               Z   |	d}t          |           |
d}t          |           |ra|d}t          |          |d}t          |          |	d}t          |          |
d}t          |          t          | |||||          S t	          | fi d	|d
|d|d|d|d|d|	d|
d|d|d|ddd|d|d|d|d|d|}|Lt          |          r(|                    t          j        |                    }n|                    |          }|	                                S )a  
    Read into a DataFrame from a parquet file.

    .. versionchanged:: 0.20.4
        * The `row_count_name` parameter was renamed `row_index_name`.
        * The `row_count_offset` parameter was renamed `row_index_offset`.

    Parameters
    ----------
    source
        Path(s) to a file or directory
        When needing to authenticate for scanning cloud locations, see the
        `storage_options` parameter.

        File-like objects are supported (by "file-like object" we refer to objects
        that have a `read()` method, such as a file handler like the builtin `open`
        function, or a `BytesIO` instance). For file-like objects, the stream position
        may not be updated accordingly after reading.
    columns
        Columns to select. Accepts a list of column indices (starting at zero) or a list
        of column names.
    n_rows
        Stop reading from parquet file after reading `n_rows`.
        Only valid when `use_pyarrow=False`.
    row_index_name
        Insert a row index column with the given name into the DataFrame as the first
        column. If set to `None` (default), no row index column is created.
    row_index_offset
        Start the row index at this offset. Cannot be negative.
        Only used if `row_index_name` is set.
    parallel : {'auto', 'columns', 'row_groups', 'none'}
        This determines the direction of parallelism. 'auto' will try to determine the
        optimal direction.
    use_statistics
        Use statistics in the parquet to determine if pages
        can be skipped from reading.
    hive_partitioning
        Infer statistics and schema from Hive partitioned URL and use them
        to prune reads. This is unset by default (i.e. `None`), meaning it is
        automatically enabled when a single directory is passed, and otherwise
        disabled.
    glob
        Expand path given via globbing rules.
    schema
        Specify the datatypes of the columns. The datatypes must match the
        datatypes in the file(s). If there are extra columns that are not in the
        file(s), consider also enabling `allow_missing_columns`.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.
    hive_schema
        The column names and data types of the columns by which the data is partitioned.
        If set to `None` (default), the schema of the Hive partitions is inferred.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.
    try_parse_hive_dates
        Whether to try parsing hive values as date/datetime types.
    rechunk
        Make sure that all columns are contiguous in memory by
        aggregating the chunks into a single array.
    low_memory
        Reduce memory pressure at the expense of performance.
    storage_options
        Options that indicate how to connect to a cloud provider.

        The cloud providers currently supported are AWS, GCP, and Azure.
        See supported keys here:

        * `aws <https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html>`_
        * `gcp <https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html>`_
        * `azure <https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html>`_
        * Hugging Face (`hf://`): Accepts an API key under the `token` parameter:           `{'token': '...'}`, or by setting the `HF_TOKEN` environment variable.

        If `storage_options` is not provided, Polars will try to infer the information
        from environment variables.
    credential_provider
        Provide a function that can be called to provide cloud storage
        credentials. The function is expected to return a dictionary of
        credential keys along with an optional credential expiry time.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.
    retries
        Number of retries if accessing a cloud instance fails.
    use_pyarrow
        Use PyArrow instead of the Rust-native Parquet reader. The PyArrow reader is
        more stable.
    pyarrow_options
        Keyword arguments for `pyarrow.parquet.read_table
        <https://arrow.apache.org/docs/python/generated/pyarrow.parquet.read_table.html>`_.
    memory_map
        Memory map underlying file. This will likely increase performance.
        Only used when `use_pyarrow=True`.
    include_file_paths
        Include the path of the source file(s) as a column with this name.
        Only valid when `use_pyarrow=False`.
    allow_missing_columns
        When reading a list of parquet files, if a column existing in the first
        file cannot be found in subsequent files, the default behavior is to
        raise an error. However, if `allow_missing_columns` is set to
        `True`, a full-NULL column is returned instead of erroring for the files
        that do not contain the column.

    Returns
    -------
    DataFrame

    See Also
    --------
    scan_parquet: Lazily read from a parquet file or multiple files via glob patterns.
    scan_pyarrow_dataset

    Warnings
    --------
    Calling `read_parquet().lazy()` is an antipattern as this forces Polars to
    materialize a full parquet file and therefore cannot push any optimizations
    into the reader. Therefore always prefer `scan_parquet` if you want to work
    with `LazyFrame` s.

    Nz@the `schema` parameter of `read_parquet` is considered unstable.zEthe `hive_schema` parameter of `read_parquet` is considered unstable.z/`n_rows` cannot be used with `use_pyarrow=True`z;`include_file_paths` cannot be used with `use_pyarrow=True`z/`schema` cannot be used with `use_pyarrow=True`zwcannot use `hive_partitions` with `use_pyarrow=True`

Hint: Pass `pyarrow_options` instead with a 'partitioning' entry.r(   r3   r7   r8   r1   r)   r"   r%   r*   r+   r,   r.   r/   r0   r1   r2   cacheFr3   r4   r5   r-   r9   r:   )
r
   
ValueError	TypeError_read_parquet_with_pyarrowscan_parquetr   selectFnthcollect)r;   r(   r)   r"   r%   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   msglfs                           [/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/polars/io/parquet/functions.pyread_parquetrT   +   s   p Ps###Us###  
CCS//!)OCS//!CCS//!"X  C.. )++!
 
 
 	
 

 
 
v
 &~
 *)	

 
 &~
 ,+
 v
  K
 21
 
 :
 e
 (
 0/
  !
" T#
$ .-%
& 43'
B, 7## 	$15>>**BB7##B::<<    rG   Wstr | Path | IO[bytes] | bytes | list[str] | list[Path] | list[IO[bytes]] | list[bytes]c                  t          ddd          }|pi }g }t          | t                    rAt          |           dk    r*t          | d         t          t
          j        f          r| }n| g}n| g}g }|D ]c} t          | d|          5 }	 |j        |	f||d|}
d d d            n# 1 swxY w Y   t          |
|	          }|
                    |           dt          |          d
k    r|d         S t          |          S )Nzpyarrow.parquet z<is required when using `read_parquet(..., use_pyarrow=True)`)
err_prefix
err_suffixr   T)r6   r3   )r8   r(   )r1      )r   
isinstancelistlenbytesioIOBaser   
read_tabler   appendplconcat)r;   r(   r3   r7   r8   r1   pyarrow_parquetsourcesresultssource_preppa_tableresults               rS   rK   rK     s     &Q  O
 &+OMOG&$ v;;??z&)eRY5GHH?GGhGG(!G  +
 
 
 
	 11%  "	 H
	 
	 
	 
	 
	 
	 
	 
	 
	 
	 
	 
	 
	 
	 
	 Hg666v
7||qqz   s   
B((B,	/B,	str | Path | IO[bytes] | bytesdict[str, DataType]c                z    t          | t          t          f          rt          | d          } t	          |           S )a  
    Get the schema of a Parquet file without reading data.

    Parameters
    ----------
    source
        Path to a file or a file-like object (by "file-like object" we refer to objects
        that have a `read()` method, such as a file handler like the builtin `open`
        function, or a `BytesIO` instance). For file-like objects, the stream position
        may not be updated accordingly after reading.

    Returns
    -------
    dict
        Dictionary mapping column names to datatypes
    Fcheck_not_directory)r\   strr   r   _read_parquet_schemar;   s    rS   r   r   ;  s;    " &3+&& G#FFFF'''rU   dict[str, str]c                z    t          | t          t          f          rt          | d          } t	          |           S )a  
    Get file-level custom metadata of a Parquet file without reading data.

    .. warning::
        This functionality is considered **experimental**. It may be removed or
        changed at any point without it being considered a breaking change.

    Parameters
    ----------
    source
        Path to a file or a file-like object (by "file-like object" we refer to objects
        that have a `read()` method, such as a file handler like the builtin `open`
        function, or a `BytesIO` instance). For file-like objects, the stream position
        may not be updated accordingly after reading.

    Returns
    -------
    dict
        Dictionary with the metadata. Empty if no custom metadata is available.
    Frn   )r\   rp   r   r   _read_parquet_metadatarr   s    rS   r   r   R  s;    * &3+&& G#FFFF!&)))rU   )r)   r"   r%   r*   r+   r,   r-   r.   r/   r0   r1   r2   rH   r3   r4   r5   r9   r:   cast_optionsrH   rv   ScanCastOptions | Noner   c                  |d}t          |           |	d}t          |           |d}t          |           t          | t          t          f          rt	          | d          } nt          |           rd | D             } t          || |d          }t          | fi d	|d
|d|d|d|d|d|d|d|d|d|d|d|	d|
d|d|d|d|d|S )a8  
    Lazily read from a local or cloud-hosted parquet file (or files).

    This function allows the query optimizer to push down predicates and projections to
    the scan level, typically increasing performance and reducing memory overhead.

    .. versionchanged:: 0.20.4
        * The `row_count_name` parameter was renamed `row_index_name`.
        * The `row_count_offset` parameter was renamed `row_index_offset`.

    Parameters
    ----------
    source
        Path(s) to a file or directory
        When needing to authenticate for scanning cloud locations, see the
        `storage_options` parameter.
    n_rows
        Stop reading from parquet file after reading `n_rows`.
    row_index_name
        If not None, this will insert a row index column with the given name into the
        DataFrame
    row_index_offset
        Offset to start the row index column (only used if the name is set)
    parallel : {'auto', 'columns', 'row_groups', 'prefiltered', 'none'}
        This determines the direction and strategy of parallelism. 'auto' will
        try to determine the optimal direction.

        The `prefiltered` strategy first evaluates the pushed-down predicates in
        parallel and determines a mask of which rows to read. Then, it
        parallelizes over both the columns and the row groups while filtering
        out rows that do not need to be read. This can provide significant
        speedups for large files (i.e. many row-groups) with a predicate that
        filters clustered rows or filters heavily. In other cases,
        `prefiltered` may slow down the scan compared other strategies.

        The `prefiltered` settings falls back to `auto` if no predicate is
        given.

        .. warning::
            The `prefiltered` strategy is considered **unstable**. It may be
            changed at any point without it being considered a breaking change.

    use_statistics
        Use statistics in the parquet to determine if pages
        can be skipped from reading.
    hive_partitioning
        Infer statistics and schema from hive partitioned URL and use them
        to prune reads.
    glob
        Expand path given via globbing rules.
    schema
        Specify the datatypes of the columns. The datatypes must match the
        datatypes in the file(s). If there are extra columns that are not in the
        file(s), consider also enabling `allow_missing_columns`.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.
    hive_schema
        The column names and data types of the columns by which the data is partitioned.
        If set to `None` (default), the schema of the Hive partitions is inferred.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.
    try_parse_hive_dates
        Whether to try parsing hive values as date/datetime types.
    rechunk
        In case of reading multiple files via a glob pattern rechunk the final DataFrame
        into contiguous memory chunks.
    low_memory
        Reduce memory pressure at the expense of performance.
    cache
        Cache the result after reading.
    storage_options
        Options that indicate how to connect to a cloud provider.

        The cloud providers currently supported are AWS, GCP, and Azure.
        See supported keys here:

        * `aws <https://docs.rs/object_store/latest/object_store/aws/enum.AmazonS3ConfigKey.html>`_
        * `gcp <https://docs.rs/object_store/latest/object_store/gcp/enum.GoogleConfigKey.html>`_
        * `azure <https://docs.rs/object_store/latest/object_store/azure/enum.AzureConfigKey.html>`_
        * Hugging Face (`hf://`): Accepts an API key under the `token` parameter:           `{'token': '...'}`, or by setting the `HF_TOKEN` environment variable.

        If `storage_options` is not provided, Polars will try to infer the information
        from environment variables.
    credential_provider
        Provide a function that can be called to provide cloud storage
        credentials. The function is expected to return a dictionary of
        credential keys along with an optional credential expiry time.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.
    retries
        Number of retries if accessing a cloud instance fails.
    include_file_paths
        Include the path of the source file(s) as a column with this name.
    allow_missing_columns
        When reading a list of parquet files, if a column existing in the first
        file cannot be found in subsequent files, the default behavior is to
        raise an error. However, if `allow_missing_columns` is set to
        `True`, a full-NULL column is returned instead of erroring for the files
        that do not contain the column.
    cast_options
        Configuration for column type-casting during scans. Useful for datasets
        containing files that have differing schemas.

        .. warning::
            This functionality is considered **unstable**. It may be changed
            at any point without it being considered a breaking change.

    See Also
    --------
    read_parquet
    scan_pyarrow_dataset

    Examples
    --------
    Scan a local Parquet file.

    >>> pl.scan_parquet("path/to/file.parquet")  # doctest: +SKIP

    Scan a file on AWS S3.

    >>> source = "s3://bucket/*.parquet"
    >>> pl.scan_parquet(source)  # doctest: +SKIP
    >>> storage_options = {
    ...     "aws_access_key_id": "<secret>",
    ...     "aws_secret_access_key": "<secret>",
    ...     "aws_region": "us-east-1",
    ... }
    >>> pl.scan_parquet(source, storage_options=storage_options)  # doctest: +SKIP
    Nz@the `schema` parameter of `scan_parquet` is considered unstable.zEthe `hive_schema` parameter of `scan_parquet` is considered unstable.zFThe `cast_options` parameter of `scan_parquet` is considered unstable.Frn   c                0    g | ]}t          |d           S )Frn   )r   ).0r;   s     rS   
<listcomp>z scan_parquet.<locals>.<listcomp>  s3     
 
 
FLv5AAA
 
 
rU   rL   r)   rH   r*   r1   r"   r%   r3   r4   r2   r+   r,   r.   r/   r0   r5   r-   r9   r:   rv   )r
   r\   rp   r   r   r   r   _scan_parquet_impl)r;   r)   r"   r%   r*   r+   r,   r-   r.   r/   r0   r1   r2   rH   r3   r4   r5   r9   r:   rv   rQ   credential_provider_builders                         rS   rL   rL   m  s   B Ps###Us###Vs###&3+&& 
#FFFF	 	(	( 

 
PV
 
 
 #DV_n# #   v e 	
  &~ *) ( 87 : &~ ,+ v  K 21  !" T#$ .-%& 43'( "\) rU   )r)   rH   r*   r1   r"   r%   r3   r4   r2   r+   r,   r-   r.   r/   r0   r5   r9   r:   rv   2str | list[str] | list[Path] | IO[str] | IO[bytes]dict[str, object] | None CredentialProviderBuilder | Nonec                  t          | t                    r| }d } ng }|r"t          |                                          }nd }t          j        | |||||t          ||          |	f|||
|||||||||d}t          |          S )N)cloud_optionsr4   r+   r,   r.   r/   r0   r5   r-   r9   r:   rv   )r\   r]   itemsr   new_from_parquetr   r   )r;   r)   rH   r*   r1   r"   r%   r3   r4   r2   r+   r,   r-   r.   r/   r0   r5   r9   r:   rv   rf   pylfs                         rS   r|   r|   =  s    . &$  446677 '^-=>> &/%+1-3!)  D, D>>rU   ).r;   r   r(   r<   r)   r=   r"   r>   r%   r?   r*   r   r+   r@   r,   rA   r-   r@   r.   rB   r/   rB   r0   r@   r1   r@   r2   r@   r3   rC   r4   rD   r5   r?   r6   r@   r7   rC   r8   r@   r9   r>   r:   r@   rE   r   )r;   rV   r(   r<   r3   rC   r7   rC   r8   r@   r1   r@   rE   r   )r;   rk   rE   rl   )r;   rk   rE   rs   )*r;   r   r)   r=   r"   r>   r%   r?   r*   r   r+   r@   r,   rA   r-   r@   r.   rB   r/   rB   r0   r@   r1   r@   r2   r@   rH   r@   r3   rC   r4   rD   r5   r?   r9   r>   r:   r@   rv   rw   rE   r   )*r;   r~   r)   r=   rH   r@   r*   r   r1   r@   r"   r>   r%   r?   r3   r   r4   r   r2   r@   r+   r@   r,   rA   r-   r@   r.   rB   r/   rB   r0   r@   r5   r?   r9   r>   r:   r@   rv   rw   rE   r   )<
__future__r   
contextlibr`   pathlibr   typingr   r   r   polars.functions	functionsrN   polarsr   rd   polars._utils.deprecationr	   polars._utils.unstabler
   polars._utils.variousr   r   r   polars._utils.wrapr   polars.convertr   polars.dependenciesr   polars.io._utilsr   r   ,polars.io.cloud.credential_provider._builderr   suppressImportErrorpolars.polarsr   r   ru   r   rq   r   r   r   r   polars._typingr   r   r   polars.io.cast_optionsr   polars.io.cloudr   r    rT   rK   rL   r|    rU   rS   <module>r      s   " " " " " "     				       ) ) ) ) ) ) ) ) ) )       % % % % % % A A A A A A 9 9 9 9 9 9         
 ( ' ' ' ' ' % % % % % % / / / / / /             Z%% J J))))))MMMMMMIIIIIIJ J J J J J J J J J J J J J J
  W5555555555GGGGGGGGGG666666::::::VVVVVV -/?RRR/1CXVVV -1!%!'%) $%)!%-1OU-1%)"'/U U U U U WV SRUD -1-1-13! 3! 3! 3! 3! 3!l( ( ( (.* * * *6 -/?RRR/1CXVVV !%!'%) $%)!%-1OU%)"'+/+K K K K K WV SRKb !'!%04<@%) $%)!%%)"'+/+9 9 9 9 9 9 9 9s   1BBB