
    M/PhA                     Z    d Z ddlmZ ddlZddlmZ ddZd Zdd	Z		 ddZ
ddZddZdS )a  extra statistical function and helper functions

contains:

* goodness-of-fit tests
  - powerdiscrepancy
  - gof_chisquare_discrete
  - gof_binning_discrete



Author: Josef Perktold
License : BSD-3

changes
-------
2013-02-25 : add chisquare_power, effectsize and "value"

    )lrangeN)stats        c                    t          j        |           }t          j        |          }t          |t                    s|}n<|dk    rd}n3|dk    rd}n*|dk    rd}n!|dk    rd}n|d	k    rd
}nt	          d          t          j        ||          }|}	|j        dk    rMt          j        |          }|dk    r|j        }	|j	        dk    r!t          j        |          }|dk    r|j        }t          j
        t          j        ||          |dd          r	|d|	z  z  }
nCt          j
        t          j        ||          ddd          r|}
|	|z  }nt	          d          |j        |         }|j        |         |k    rt	          d          |dk    r;d|z  t          j        |d|	z  z  t          j        ||z            z  |          z  }nx|dk    r;d|z  t          j        |d|	z  z  t          j        ||z            z  |          z  }n7d|z  |z  |dz   z  t          j        |d|	z  z  ||z  |z  dz
  z  |          z  }|t          j                            ||dz
  |z
            fS )aR  Calculates power discrepancy, a class of goodness-of-fit tests
    as a measure of discrepancy between observed and expected data.

    This contains several goodness-of-fit tests as special cases, see the
    description of lambd, the exponent of the power discrepancy. The pvalue
    is based on the asymptotic chi-square distribution of the test statistic.

    freeman_tukey:
    D(x|\theta) = \sum_j (\sqrt{x_j} - \sqrt{e_j})^2

    Parameters
    ----------
    o : Iterable
        Observed values
    e : Iterable
        Expected values
    lambd : {float, str}
        * float : exponent `a` for power discrepancy
        * 'loglikeratio': a = 0
        * 'freeman_tukey': a = -0.5
        * 'pearson': a = 1   (standard chisquare test statistic)
        * 'modified_loglikeratio': a = -1
        * 'cressie_read': a = 2/3
        * 'neyman' : a = -2 (Neyman-modified chisquare, reference from a book?)
    axis : int
        axis for observations of one series
    ddof : int
        degrees of freedom correction,

    Returns
    -------
    D_obs : Discrepancy of observed values
    pvalue : pvalue


    References
    ----------
    Cressie, Noel  and Timothy R. C. Read, Multinomial Goodness-of-Fit Tests,
        Journal of the Royal Statistical Society. Series B (Methodological),
        Vol. 46, No. 3 (1984), pp. 440-464

    Campbell B. Read: Freeman-Tukey chi-squared goodness-of-fit statistics,
        Statistics & Probability Letters 18 (1993) 271-278

    Nobuhiro Taneichi, Yuri Sekiya, Akio Suzukawa, Asymptotic Approximations
        for the Distributions of the Multinomial Goodness-of-Fit Statistics
        under Local Alternatives, Journal of Multivariate Analysis 81, 335?359 (2002)
    Steele, M. 1,2, C. Hurst 3 and J. Chaseling, Simulated Power of Discrete
        Goodness-of-Fit Tests for Likert Type Data

    Examples
    --------

    >>> observed = np.array([ 2.,  4.,  2.,  1.,  1.])
    >>> expected = np.array([ 0.2,  0.2,  0.2,  0.2,  0.2])

    for checking correct dimension with multiple series

    >>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd='freeman_tukey',axis=1)
    (array([[ 2.745166,  2.745166]]), array([[ 0.6013346,  0.6013346]]))
    >>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected,axis=1)
    (array([[ 2.77258872,  2.77258872]]), array([[ 0.59657359,  0.59657359]]))
    >>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd=0,axis=1)
    (array([[ 2.77258872,  2.77258872]]), array([[ 0.59657359,  0.59657359]]))
    >>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd=1,axis=1)
    (array([[ 3.,  3.]]), array([[ 0.5578254,  0.5578254]]))
    >>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd=2/3.0,axis=1)
    (array([[ 2.89714546,  2.89714546]]), array([[ 0.57518277,  0.57518277]]))
    >>> powerdiscrepancy(np.column_stack((observed,observed)).T, expected, lambd=2/3.0,axis=1)
    (array([[ 2.89714546,  2.89714546]]), array([[ 0.57518277,  0.57518277]]))
    >>> powerdiscrepancy(np.column_stack((observed,observed)), expected, lambd=2/3.0, axis=0)
    (array([[ 2.89714546,  2.89714546]]), array([[ 0.57518277,  0.57518277]]))

    each random variable can have different total count/sum

    >>> powerdiscrepancy(np.column_stack((observed,2*observed)), expected, lambd=2/3.0, axis=0)
    (array([[ 2.89714546,  5.79429093]]), array([[ 0.57518277,  0.21504648]]))
    >>> powerdiscrepancy(np.column_stack((observed,2*observed)), expected, lambd=2/3.0, axis=0)
    (array([[ 2.89714546,  5.79429093]]), array([[ 0.57518277,  0.21504648]]))
    >>> powerdiscrepancy(np.column_stack((2*observed,2*observed)), expected, lambd=2/3.0, axis=0)
    (array([[ 5.79429093,  5.79429093]]), array([[ 0.21504648,  0.21504648]]))
    >>> powerdiscrepancy(np.column_stack((2*observed,2*observed)), 20*expected, lambd=2/3.0, axis=0)
    (array([[ 5.79429093,  5.79429093]]), array([[ 0.21504648,  0.21504648]]))
    >>> powerdiscrepancy(np.column_stack((observed,2*observed)), np.column_stack((10*expected,20*expected)), lambd=2/3.0, axis=0)
    (array([[ 2.89714546,  5.79429093]]), array([[ 0.57518277,  0.21504648]]))
    >>> powerdiscrepancy(np.column_stack((observed,2*observed)), np.column_stack((10*expected,20*expected)), lambd=-1, axis=0)
    (array([[ 2.77258872,  5.54517744]]), array([[ 0.59657359,  0.2357868 ]]))
    loglikeratior   freeman_tukeyg      pearson   modified_loglikeratiocressie_readgUUUUUU?znlambd has to be a number or one of loglikeratio, freeman_tukey, pearson, modified_loglikeratio or cressie_read)axis:0yE>)rtolatol      ?zZobserved and expected need to have the same number of observations, or e needs to add to 1z:observed and expected need to have the same number of bins   )nparray
isinstancestr
ValueErrorsumsize
atleast_2dTndimallcloseshapelogr   chi2sf)observedexpectedlambdr   ddofoeanntpkD_obss                U/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/statsmodels/stats/gof.pypowerdiscrepancyr0      s   r 	A
AeS!! FN""AAo%%AAiAA---AAn$$AA E F F F 	qtA	
BvaxxM!199B6Q;;a  AqyyC	{26!$'''A>>> Ks2vJ	RVAD)))14a	@	@	@ KF J K K 	K	Awt} * + + 	+ 	Avv!bfQBZ"&1++5DAAAA	
b!bfQBZ"&1++5DAAAA!AqsbfQBZAaC!8a<%@tLLLL%*--ac$h////    c                 z   t          |          }d}d|z  }t          t          | j        d          t	          | j        d          dz             }d}	t          | j        d          g}
g }|D ]V} | j        |g|R  }||	z
  |dz
  k    r:|
                    |           |                    ||	z
             |}	|d|z
  k    r nW|
d         | j        k     r2|
                    | j                   |                    d|	z
             t          j	        |
          }
t          j	        |          }|
d	z   }| j        |d<   t          j
        ||          \  }} | j        |
g|R  }t          j        t          j	        |          ||z            \  }}||||k    d
|dt          |          dt          |          fS )a  perform chisquare test for random sample of a discrete distribution

    Parameters
    ----------
    distname : str
        name of distribution function
    arg : sequence
        parameters of distribution
    alpha : float
        significance level, threshold for p-value

    Returns
    -------
    result : bool
        0 if test passes, 1 if test fails

    Notes
    -----
    originally written for scipy.stats test suite,
    still needs to be checked for standalone usage, insufficient input checking
    may not run yet (after copy/paste)

    refactor: maybe a class, check returns, or separate binning from
        test results
       r     r
   r   +=r   r   zchisquare - test for z	at arg = z with pval = )lenr   maxr)   minbcdfappendr   r   	histogramr   	chisquarer   )distfnargrvsalphamsgr*   nsuppwsuppdistsupportlastdistsuppdistmassiicurrenthistsuppfreqhsuppcdfschispvals                       r/   gof_chisquare_discreterR      s   : 	CAEIE VXu--s68T/B/BQ/FGGKDFHe$$%HH  &*R%%%%T>U5[((OOBOOGdN+++D!E'""|vx!!!$x!!Hx!!H }H(HQK ,s8,,KD%6:h$$$$D/"(4..8<<KT$u+.33s3xxxxD			(C C Cr1   r3   c                    t          |           }d|z  }t          t          |j        d          t	          |j        d          dz             }d}t          |j        d          g}g }	|D ]V}
 |j        |
g|R  }||z
  |dz
  k    r:|                    |
           |	                    ||z
             |}|d|z
  k    r nW|d         |j        k     r2|                    |j                   |	                    d|z
             t          j	        |          }t          j	        |	          }	|dz   }|j        |d<   t          j
        | |          \  }} |j        |g|R  }t          j	        |          ||	z  |fS )	a  get bins for chisquare type gof tests for a discrete distribution

    Parameters
    ----------
    rvs : ndarray
        sample data
    distname : str
        name of distribution function
    arg : sequence
        parameters of distribution
    nsupp : int
        number of bins. The algorithm tries to find bins with equal weights.
        depending on the distribution, the actual number of bins can be smaller.

    Returns
    -------
    freq : ndarray
        empirical frequencies for sample; not normalized, adds up to sample size
    expfreq : ndarray
        theoretical frequencies according to distribution
    histsupp : ndarray
        bin boundaries for histogram, (added 1e-8 for numerical robustness)

    Notes
    -----
    The results can be used for a chisquare test ::

        (chis,pval) = stats.chisquare(freq, expfreq)

    originally written for scipy.stats test suite,
    still needs to be checked for standalone usage, insufficient input checking
    may not run yet (after copy/paste)

    refactor: maybe a class, check returns, or separate binning from
        test results
    todo :
      optimal number of bins ? (check easyfit),
      recommendation in literature at least 5 expected observations in each bin

    r   r4   r5   r
   r   r6   r   r   )r7   r   r8   r)   r9   r:   r;   r<   r   r   r=   )rA   r?   r@   rD   r*   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   s                   r/   gof_binning_discreterT      s   X 	CAIE VXu--s68T/B/BQ/FGGKDFHe$$%HH  &*R%%%%T>U5[((OOBOOGdN+++D!E'""|vx!!!$x!!Hx!!H }H(HQK c(++JD6:h$$$$D8D>>1X:x//r1   Tc                 ,   t          j        |           } t          |           }|                     d          }|?t          j        |t
                    }|                    |t          |          z             t          j        |t
                    }| |z
  dz  |z                      d          }|dk    r't          j        	                    ||dz
  |z
            }n-t          j
        	                    ||dz
  |z
  |dz  |z            }|r||fS ||fS )a  chisquare goodness-of-fit test

    The null hypothesis is that the distance between the expected distribution
    and the observed frequencies is ``value``. The alternative hypothesis is
    that the distance is larger than ``value``. ``value`` is normalized in
    terms of effect size.

    The standard chisquare test has the null hypothesis that ``value=0``, that
    is the distributions are the same.


    Notes
    -----
    The case with value greater than zero is similar to an equivalence test,
    that the exact null hypothesis is replaced by an approximate hypothesis.
    However, TOST "reverses" null and alternative hypothesis, while here the
    alternative hypothesis is that the distance (divergence) is larger than a
    threshold.

    References
    ----------
    McLaren, ...
    Drost,...

    See Also
    --------
    powerdiscrepancy
    scipy.stats.chisquare

    r   Nr   r
   )r   asarrayr7   r   emptyfloatfillr   r!   r"   ncx2)	f_obsf_expvaluer&   return_basicn_binsnobschisqpvalues	            r/   r>   r>   T  s    @ JuEZZF99Q<<D}''

4%--'(((Jue$$Eema%',,Q//Ezzufqj4&788ufqj4&7DII f}f}r1   皙?c                     t           j                            ||dz
  |z
            }t           j                            ||dz
  |z
  | dz  |z            }|S )aq  power of chisquare goodness of fit test

    effect size is sqrt of chisquare statistic divided by nobs

    Parameters
    ----------
    effect_size : float
        This is the deviation from the Null of the normalized chi_square
        statistic. This follows Cohen's definition (sqrt).
    nobs : int or float
        number of observations
    n_bins : int (or float)
        number of bins, or points in the discrete distribution
    alpha : float in (0,1)
        significance level of the test, default alpha=0.05

    Returns
    -------
    power : float
        power of the test at given significance level at effect size

    Notes
    -----
    This function also works vectorized if all arguments broadcast.

    This can also be used to calculate the power for power divergence test.
    However, for the range of more extreme values of the power divergence
    parameter, this power is not a very good approximation for samples of
    small to medium size (Drost et al. 1989)

    References
    ----------
    Drost, ...

    See Also
    --------
    chisquare_effectsize
    statsmodels.stats.GofChisquarePower

    r
   r   )r   r!   isfrZ   r"   )effect_sizer`   r_   rB   r&   critpowers          r/   chisquare_powerri     sQ    R :>>%!d!233DJMM$
T 1;>D3HIIELr1   c                    t          j        | t                    } t          j        |t                    }| |                     |          z  } ||                    |          z  }|| z
  dz  | z                      |          }|D|\  }}|| z
  | z                      |          }t          j        ||z  |z
  |z
  |dz
  z  d          }|rt          j        |          S |S )as  effect size for a chisquare goodness-of-fit test

    Parameters
    ----------
    probs0 : array_like
        probabilities or cell frequencies under the Null hypothesis
    probs1 : array_like
        probabilities or cell frequencies under the Alternative hypothesis
        probs0 and probs1 need to have the same length in the ``axis`` dimension.
        and broadcast in the other dimensions
        Both probs0 and probs1 are normalized to add to one (in the ``axis``
        dimension).
    correction : None or tuple
        If None, then the effect size is the chisquare statistic divide by
        the number of observations.
        If the correction is a tuple (nobs, df), then the effectsize is
        corrected to have less bias and a smaller variance. However, the
        correction can make the effectsize negative. In that case, the
        effectsize is set to zero.
        Pederson and Johnson (1990) as referenced in McLaren et all. (1994)
    cohen : bool
        If True, then the square root is returned as in the definition of the
        effect size by Cohen (1977), If False, then the original effect size
        is returned.
    axis : int
        If the probability arrays broadcast to more than 1 dimension, then
        this is the axis over which the sums are taken.

    Returns
    -------
    effectsize : float
        effect size of chisquare test

    r   Nr   r   )r   rV   rX   r   maximumsqrt)	probs0probs1
correctioncohenr   d2r`   dfdiffs	            r/   chisquare_effectsizert     s    F Z&&FZ&&Ffjj&&&Ffjj&&&FF?Q

'	,	,T	2	2Bb&F*//55ZdT)B.4"9=qAA wr{{	r1   )r   r   r   )r3   )Nr   r   T)rc   r   )NTr   )__doc__statsmodels.compat.pythonr   numpyr   scipyr   r0   rR   rT   r>   ri   rt    r1   r/   <module>rz      s    & - , , , , ,          N0 N0 N0 N0jDC DC DCNP0 P0 P0 P0h3 3 3 3l+ + + +\2 2 2 2 2 2r1   