
    L-PhM                        d Z ddlmZ ddlZddlZddlZddlZddlZddlZddl	Z	ddl
mZ ddlmZ ddlmZmZ ee	j                 Zdd
ZddZddZddZddZdS )z"Better tokenizing for coverage.py.    )annotationsN)Iterable)env)TLineNoTSourceTokenLinestoks
TokenInfosreturnc           	   #    K   d}d}d}| D ]H\  }}\  }}\  }}	}
||k    r|r|                     d          rd}|                     d          rd}nv|t          j        k    rC|                     d          r+|                    d                               |          rd}n&d}n#t          j        j        r|t          j        k    rd}|rLt          |	                    d	          d
                   dz
  }t          j        dd||f||dz   f|          V  |
}|t          j        t          j        fvr|}t          j        ||||f||	f|
          V  |}JdS )aB  Return all physical tokens, even line continuations.

    tokenize.generate_tokens() doesn't return a token for the backslash that
    continues lines.  This wrapper provides those tokens so that we can
    re-create a faithful representation of the original source.

    Returns the same values as generate_tokens()

    N z\
T\Fz \

   i    )endswithtokenSTRINGrstripr   
PYBEHAVIORfstring_syntaxFSTRING_MIDDLElensplittokenize	TokenInfoNEWLINENL)r   	last_linelast_lineno
last_ttextttypettextslinenoscolelinenoecolltextinject_backslashccols                S/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/coverage/phystokens.py_phys_tokensr-      s      !IKJAE 2 2=uow'!! +Y//77 +  $( &&t,, -',$$el**!**622 1!((11:::FF1 ,0(( ,1((^2 -u@T7T7T',$# yt44R899A=D",v $'46):!    
 I)8;777J $QVWWWWWe2 2    sourcestrset[TLineNo]c                   t                      }t          j        t          j        |                     D ]}t          j        dk    r^t          |t          j                  rD|                    |j	                   |j
        D ]!}|                    |j        j	                   "pt          j        dk    r4t          |t          j                  r|                    |j	                   |S )zCHelper for finding lines with soft keywords, like match/case lines.)   
   )r3      )setastwalkparsesysversion_info
isinstanceMatchaddlinenocasespattern	TypeAlias)r/   soft_key_linesnodecases       r,   find_soft_key_linesrF   [   s    #&55N6**++ , ,w&&:dCI+F+F&t{+++
 8 8""4<#677778((Zcm-L-L(t{+++r.   r   c              #    K   t           j        t           j        t           j        t          j        h}g }d}|                     d                              dd          } t          |           }t          j
        j        rt          |           }nt                      }t          |          D ]\  }}\  }}	\  }
}}
d}t          j        d|          D ]}|dk    r|V  g }d}d}n}|dk    rd}ns||v rd}nkt          j
        j        rL|t           j        k    r<|                    d	d
                              dd          }|	t'          |          z   }|r%|	|k    r|                    dd|	|z
  z  f           d}t          j                            |d                                          dd         }|t           j        k    rt3          j        |          rd}nmt          j
        j        r\t3          j        |          rHt'          |          dk    rd}n*t'          |          dk    r|d         d         dk    rd}nd}|r||v rd}|                    ||f           d}d}	|r|}|r|V  dS dS )a  Generate a series of lines, one for each line in `source`.

    Each line is a list of pairs, each pair is a token::

        [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]

    Each pair has a token class, and the token text.

    If you concatenate all the token texts, and then join them with newlines,
    you should have your original `source` back, with two differences:
    trailing white space is not preserved, and a final line with no newline
    is indistinguishable from a final line with a newline.

    r      z
r   Tz(
)Fr   {z{{}z}}ws xxNr3   keyr   )r   INDENTDEDENTr   r   r   
expandtabsreplacegenerate_tokensr   r   soft_keywordsrF   r6   r-   rer   r   r   r   appendtok_namegetlowerNAMEkeyword	iskeywordissoftkeyword)r/   	ws_tokenslinecoltokgenrC   r#   r$   sliner&   _r(   
mark_startpartmark_end	tok_classis_start_of_lines                    r,   source_token_linesri   j   s       u|U]HKHI"$D
Cq!!))&$77FV$$F
~# ,V445A&5I5I & &1umudYa
HVU++ "	 "	Dt||


  )## >0 ,Ue>R5R5R<<T22::3EED#d))+D '$**KKsdSj'9 :;;;!&J$-11%>>DDFFrrJ	EJ&&(// .$)		5 	.':OPU:V:V 	.t99>>/3,,!$ii1nn$q'!*2D2D/3,,/4,+ .0G0G(-IY-...DD 	C 




 r.   textc                \    t          j        |           j        }t          j        |          S )zA helper around `tokenize.generate_tokens`.

    Originally this was used to cache the results, but it didn't seem to make
    reporting go faster, and caused issues with using too much memory.

    )ioStringIOreadliner   rS   )rj   rn   s     r,   rS   rS      s'     {4  )H#H---r.   bytesc                    t          |                     d                    j        }t          j        |          d         S )zDetermine the encoding for `source`, according to PEP 263.

    `source` is a byte string: the text of the program.

    Returns a string, the name of the encoding.

    Tr   )iter
splitlines__next__r   detect_encoding)r/   rn   s     r,   source_encodingru      s8     F%%d++,,5H#H--a00r.   )r   r	   r
   r	   )r/   r0   r
   r1   )r/   r0   r
   r   )rj   r0   r
   r	   )r/   ro   r
   r0   )__doc__
__future__r   r7   rl   r[   rU   r:   r   r   collections.abcr   coverager   coverage.typesr   r   r   r	   r-   rF   ri   rS   ru    r.   r,   <module>r|      s   ) ( " " " " " " 



 				  				 



   $ $ $ $ $ $       5 5 5 5 5 5 5 5 h()
? ? ? ?D   E E E EP. . . .	1 	1 	1 	1 	1 	1r.   