
    M/Ph                   "   d dl mZ d dlmZ d dlmZ d dlZd dlZd dl	Z
d dlmZ d dlmZ d dlmZ d dlmc mZ d dlmZ d d	lmZmZmZ d d
lmZ d dlmZmZm Z  d dl!m"Z" d dl#m$Z$m%Z% d dl&m'Z'm(Z( d dl)m*Z* dZ+dZ,dZ-dZ. G d d          Z/ G d de/          Z0 G d de0          Z1 G d d          Z2 G d de2          Z3 G d dej4                  Z5 ej6        e5e3            G d  d!          Z7 G d" d#          Z8 G d$ d%e3e7          Z9dS )&    )annotations)lzip)reduceN)stats)handle_data)	Optimizer)handle_formula_data)ContrastResultsWaldTestResultst_test_pairwise)_is_using_pandas)cache_readonlycached_datacached_value)approx_fprime)HessianInversionWarningValueWarning)nan_dotrecipr)	bool_likeFa  Parameters
    ----------
    endog : array_like
        A 1-d endogenous response variable. The dependent variable.
    exog : array_like
        A nobs x k array where `nobs` is the number of observations and `k`
        is the number of regressors. An intercept is not included by default
        and should be added by the user. See
        :func:`statsmodels.tools.add_constant`.zmissing : str
        Available options are 'none', 'drop', and 'raise'. If 'none', no nan
        checking is done. If 'drop', any observations with nans are dropped.
        If 'raise', an error is raised. Default is 'none'.a  
    hasconst : None or bool
        Indicates whether the RHS includes a user-supplied constant. If True,
        a constant is not checked for and k_constant is set to 1 and all
        result statistics are calculated as if a constant is present. If
        False, a constant is not checked for and k_constant is set to 0.
    **kwargs
        Extra arguments that are used to set model properties when using the
        formula interface.c                      e Zd Zd                    eeez             ZdZg dZ	ddZ
d ZdefdZd	 Zedd
            Zed             Zedd            Zd ZddZdS )Modela  
    A (predictive) statistical model. Intended to be subclassed not used.

    {params_doc}
    {extra_params_doc}

    Attributes
    ----------
    exog_names
    endog_names

    Notes
    -----
    `endog` and `exog` are references to any data provided.  So if the data is
    already stored in numpy arrays and it is changed then `endog` and `exog`
    will change as well.
    )
params_docextra_params_doc   missingmissing_idxformuladesign_infohasconstNc                   |                     dd          }|                     dd           } | j        ||||fi || _        | j        j        | _        | j        j        | _        | j        j        | _        g | _        | j                            g d           d|vr| j                            ddg           t          |	                                          | _
        || j
                            d           d S d S )Nr   noner!   )exogendogz	data.exogz
data.endogr   zdata.orig_endogzdata.orig_exog)pop_handle_datadata
k_constantr$   r%   
_data_attrextendlistkeys
_init_keysappend)selfr%   r$   kwargsr   r!   s         V/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/statsmodels/base/model.py__init__zModel.__init__\   s   **Y//::j$//%D%eT7H 0 0(.0 0	).IN	Y_
KKKLLLF""O""$57G#HIII v{{}}--O"":.....      c                .      fd j         D             }|S )zAreturn dictionary with extra keys used in model.__init__
        c                4    i | ]}|t          |d           S N)getattr).0keyr0   s     r2   
<dictcomp>z(Model._get_init_kwds.<locals>.<dictcomp>q   s7     1 1 1 WT3-- 1 1 1r4   )r.   )r0   kwdss   ` r2   _get_init_kwdszModel._get_init_kwdsn   s4    1 1 1 1 $1 1 1 r4   c                    g d|r                     |           fd|D             }|rAdt          |          z   }|du rt          j        |t                     d S t          |          d S )Nr   c                    g | ]}|v|	S  r@   )r9   ikwargs_alloweds     r2   
<listcomp>z'Model._check_kwargs.<locals>.<listcomp>~   s#    GGGq/F/F!/F/F/Fr4   zunknown kwargs F)r+   reprwarningswarnr   
ValueError)r0   r1   
keys_extraerrorkwargs_invalidmsgrB   s         @r2   _check_kwargszModel._check_kwargsv   s        	.!!*---GGGGVGGG 	&#d>&:&::C~~c<00000 oo%	& 	&r4   c                    t          ||||fi |}|D ]A}|dv r	 t          | ||j                            |                     2# t          $ r Y >w xY w|S )N)r    r   )r   setattr__dict__r&   KeyError)r0   r%   r$   r   r!   r1   r(   r:   s           r2   r'   zModel._handle_data   s    5$DDVDD 	 	C000c4=#4#4S#9#9::::   s   )A
AAc                   ||j         |         }|                    dd          }|d}n2|dk    rddlm}  |i           }nt	          |t
                    r|dz  }|                    dd	          }	|	d
k    rd}	t          |d|||	          }
|
\  \  }}}}| j        }|C|j	        dk    r8|j
        d         |k    r't          d                    |j
                            t                    dk    rfd|j        D             }t          |          t          |j                  k     r\||         }t          |j                  }D ](}	 |                    |           # t          $ r Y %w xY w|                    |          }|                    ||	||d            | ||g|R i |}||_        ||j        _        |S )a  
        Create a Model from a formula and dataframe.

        Parameters
        ----------
        formula : str or generic Formula object
            The formula specifying the model.
        data : array_like
            The data for the model. See Notes.
        subset : array_like
            An array-like object of booleans, integers, or index values that
            indicate the subset of df to use in the model. Assumes df is a
            `pandas.DataFrame`.
        drop_cols : array_like
            Columns to drop from the design matrix.  Cannot be used to
            drop terms involving categoricals.
        *args
            Additional positional argument that are passed to the model.
        **kwargs
            These are passed to the model with one exception. The
            ``eval_env`` keyword is passed to patsy. It can be either a
            :class:`patsy:patsy.EvalEnvironment` object or an integer
            indicating the depth of the namespace to use. For example, the
            default ``eval_env=0`` uses the calling namespace. If you wish
            to use a "clean" environment set ``eval_env=-1``.

        Returns
        -------
        model
            The model instance.

        Notes
        -----
        data must define __getitem__ with the keys in the formula terms
        args and kwargs are passed on to the model instantiation. E.g.,
        a numpy structured or rec array, a dictionary, or a pandas DataFrame.
        Neval_env   r   )EvalEnvironmentr   r   dropr#   raise)depthr   zendog has evaluated to an array with multiple columns that has shape {}. This occurs when the variable converted to endog is non-numeric (e.g., bool or str).c                    g | ]}|v|	S r@   r@   )r9   x	drop_colss     r2   rC   z&Model.from_formula.<locals>.<listcomp>   s#    BBB!q	/A/AA/A/A/Ar4   )r   r   r   r    )locr&   patsyrU   
isinstanceintgetr	   _formula_max_endogndimshaperG   formatlencolumnsr,   
term_namesremovesubsetupdater   r(   frame)clsr   r(   ri   r[   argsr1   rR   rU   r   tmpr%   r$   r   r    	max_endogcolscolmods       `              r2   from_formulazModel.from_formula   sK   T 8F#D::j$//HH^^------&r**HH#&& 	MH**Y//fG!$gX*13 3 3471%[*	!
Q5;q>I#=#= 5 6<VEK5H5HJ J J  S^^a%7%7BBBBt|BBBD4yy3t|,,,,DzK233$  CC((((%   )0066k")")&13 3 	4 	4 	4 c%//////
s   E))
E65E6c                    | j         j        S )z0
        Names of endogenous variables.
        )r(   ynamesr0   s    r2   endog_nameszModel.endog_names       
 yr4   returnlist[str] | Nonec                    | j         j        S )z/
        Names of exogenous variables.
        )r(   xnamesrv   s    r2   
exog_nameszModel.exog_names   rx   r4   c                    t           )z&
        Fit a model to data.
        NotImplementedErrorrv   s    r2   fitz	Model.fit   s
     "!r4   c                    t           )z
        After a model has been fit predict returns the fitted values.

        This is a placeholder intended to be overwritten by individual models.
        r   )r0   paramsr$   rm   r1   s        r2   predictzModel.predict   
     "!r4   r7   NN)ry   rz   )__name__
__module____qualname__rd   _model_params_doc_missing_param_doc_extra_param_doc__doc__ra   _kwargs_allowedr3   r=   ERROR_INIT_KWARGSrL   r'   classmethodrs   propertyrw   r}   r   r   r@   r4   r2   r   r   ?   s,         	+.1AA 	 	C 	C! , 
 
 
O/ / / /$   04;L & & & &    U U U [Un     X        X " " "" " " " " "r4   r   c                  d     e Zd ZdZd fd	Zd Zd Zd Zd Zd Z		 	 	 ddZ
	 	 ddZddZ xZS )LikelihoodModelz2
    Likelihood model is a subclass of Model.
    Nc                f     t                      j        ||fi | |                                  d S r7   )superr3   
initialize)r0   r%   r$   r1   	__class__s       r2   r3   zLikelihoodModel.__init__  s:    /////r4   c                    dS )z
        Initialize (possibly re-initialize) a Model instance.

        For example, if the the design matrix of a linear model changes then
        initialized can be used to recompute values using the modified design
        matrix.
        Nr@   rv   s    r2   r   zLikelihoodModel.initialize  s	     	r4   c                    t           )z
        Log-likelihood of model.

        Parameters
        ----------
        params : ndarray
            The model parameters used to compute the log-likelihood.

        Notes
        -----
        Must be overridden by subclasses.
        r   r0   r   s     r2   loglikezLikelihoodModel.loglike  s
     "!r4   c                    t           )aP  
        Score vector of model.

        The gradient of logL with respect to each parameter.

        Parameters
        ----------
        params : ndarray
            The parameters to use when evaluating the Hessian.

        Returns
        -------
        ndarray
            The score vector evaluated at the parameters.
        r   r   s     r2   scorezLikelihoodModel.score-  s
      "!r4   c                    t           )z
        Fisher information matrix of model.

        Returns -1 * Hessian of the log-likelihood evaluated at params.

        Parameters
        ----------
        params : ndarray
            The model parameters.
        r   r   s     r2   informationzLikelihoodModel.information?  s
     "!r4   c                    t           )a  
        The Hessian matrix of the model.

        Parameters
        ----------
        params : ndarray
            The parameters to use when evaluating the Hessian.

        Returns
        -------
        ndarray
            The hessian evaluated at the parameters.
        r   r   s     r2   hessianzLikelihoodModel.hessianL  s
     "!r4   newtond   Tr@   Fc
                    d}|Et           d          r j        }n- j        dg j        j        d         z  }nt	          d           j        j        d          fd}|dk    r fd	} fd
}n fd} fd}|
                    dd          }d|
v r*|
                    di           }|
d         |d}|r|
d= |
d= ni }d|
v r|
d         |d<   |
d= t                      }|	                    |||||
|||||||          \  }}}|
                    |           |
                    dd          }|r | ||          }n0|dk    r,|r*t          j                            |d                    z  }n|	sd                     |          z  }d}t          j        t          j        |                    r<t          j                            |          \  }}t          j        |          dk    rd}|rb|                    t          j        d|z                                          |j                  }t          j        ||j        z   dz            }nt1          j        dt4                     d}t7           ||fddi|}||_        t;          |t<                    r%|r#|d         sddlm } t1          j        d|           ||_!        |S )a  
        Fit method for likelihood based models

        Parameters
        ----------
        start_params : array_like, optional
            Initial guess of the solution for the loglikelihood maximization.
            The default is an array of zeros.
        method : str, optional
            The `method` determines which solver from `scipy.optimize`
            is used, and it can be chosen from among the following strings:

            - 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
            - 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
            - 'lbfgs' for limited-memory BFGS with optional box constraints
            - 'powell' for modified Powell's method
            - 'cg' for conjugate gradient
            - 'ncg' for Newton-conjugate gradient
            - 'basinhopping' for global basin-hopping solver
            - 'minimize' for generic wrapper of scipy minimize (BFGS by default)

            The explicit arguments in `fit` are passed to the solver,
            with the exception of the basin-hopping solver. Each
            solver has several optional arguments that are not the same across
            solvers. See the notes section below (or scipy.optimize) for the
            available arguments and for the list of explicit arguments that the
            basin-hopping solver supports.
        maxiter : int, optional
            The maximum number of iterations to perform.
        full_output : bool, optional
            Set to True to have all available output in the Results object's
            mle_retvals attribute. The output is dependent on the solver.
            See LikelihoodModelResults notes section for more information.
        disp : bool, optional
            Set to True to print convergence messages.
        fargs : tuple, optional
            Extra arguments passed to the likelihood function, i.e.,
            loglike(x,*args)
        callback : callable callback(xk), optional
            Called after each iteration, as callback(xk), where xk is the
            current parameter vector.
        retall : bool, optional
            Set to True to return list of solutions at each iteration.
            Available in Results object's mle_retvals attribute.
        skip_hessian : bool, optional
            If False (default), then the negative inverse hessian is calculated
            after the optimization. If True, then the hessian will not be
            calculated. However, it will be available in methods that use the
            hessian in the optimization (currently only with `"newton"`).
        kwargs : keywords
            All kwargs are passed to the chosen solver with one exception. The
            following keyword controls what happens after the fit::

                warn_convergence : bool, optional
                    If True, checks the model for the converged flag. If the
                    converged flag is False, a ConvergenceWarning is issued.

        Notes
        -----
        The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
        explicit arguments.

        Optional arguments for solvers (see returned Results.mle_settings)::

            'newton'
                tol : float
                    Relative error in params acceptable for convergence.
            'nm' -- Nelder Mead
                xtol : float
                    Relative error in params acceptable for convergence
                ftol : float
                    Relative error in loglike(params) acceptable for
                    convergence
                maxfun : int
                    Maximum number of function evaluations to make.
            'bfgs'
                gtol : float
                    Stop when norm of gradient is less than gtol.
                norm : float
                    Order of norm (np.inf is max, -np.inf is min)
                epsilon
                    If fprime is approximated, use this value for the step
                    size. Only relevant if LikelihoodModel.score is None.
            'lbfgs'
                m : int
                    This many terms are used for the Hessian approximation.
                factr : float
                    A stop condition that is a variant of relative error.
                pgtol : float
                    A stop condition that uses the projected gradient.
                epsilon
                    If fprime is approximated, use this value for the step
                    size. Only relevant if LikelihoodModel.score is None.
                maxfun : int
                    Maximum number of function evaluations to make.
                bounds : sequence
                    (min, max) pairs for each element in x,
                    defining the bounds on that parameter.
                    Use None for one of min or max when there is no bound
                    in that direction.
            'cg'
                gtol : float
                    Stop when norm of gradient is less than gtol.
                norm : float
                    Order of norm (np.inf is max, -np.inf is min)
                epsilon : float
                    If fprime is approximated, use this value for the step
                    size. Can be scalar or vector.  Only relevant if
                    Likelihoodmodel.score is None.
            'ncg'
                fhess_p : callable f'(x,*args)
                    Function which computes the Hessian of f times an arbitrary
                    vector, p.  Should only be supplied if
                    LikelihoodModel.hessian is None.
                avextol : float
                    Stop when the average relative error in the minimizer
                    falls below this amount.
                epsilon : float or ndarray
                    If fhess is approximated, use this value for the step size.
                    Only relevant if Likelihoodmodel.hessian is None.
            'powell'
                xtol : float
                    Line-search error tolerance
                ftol : float
                    Relative error in loglike(params) for acceptable for
                    convergence.
                maxfun : int
                    Maximum number of function evaluations to make.
                start_direc : ndarray
                    Initial direction set.
            'basinhopping'
                niter : int
                    The number of basin hopping iterations.
                niter_success : int
                    Stop the run if the global minimum candidate remains the
                    same for this number of iterations.
                T : float
                    The "temperature" parameter for the accept or reject
                    criterion. Higher "temperatures" mean that larger jumps
                    in function value will be accepted. For best results
                    `T` should be comparable to the separation (in function
                    value) between local minima.
                stepsize : float
                    Initial step size for use in the random displacement.
                interval : int
                    The interval for how often to update the `stepsize`.
                minimizer : dict
                    Extra keyword arguments to be passed to the minimizer
                    `scipy.optimize.minimize()`, for example 'method' - the
                    minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
                    tolerance for termination. Other arguments are mapped from
                    explicit argument of `fit`:
                      - `args` <- `fargs`
                      - `jac` <- `score`
                      - `hess` <- `hess`
            'minimize'
                min_method : str, optional
                    Name of minimization method to use.
                    Any method specific arguments can be passed directly.
                    For a list of methods and their arguments, see
                    documentation of `scipy.optimize.minimize`.
                    If no method is specified, then BFGS is used.
        Nstart_paramsg        r   z6If exog is None, then start_params should be specifiedr   c                (     j         | g|R   z  S r7   )r   r   rm   nobsr0   s     r2   fzLikelihoodModel.fit.<locals>.f  s%     DL/$////$66r4   r   c                &     j         | g|R  z  S r7   r   r   s     r2   r   z"LikelihoodModel.fit.<locals>.score  s"    !tz&04000477r4   c                &     j         | g|R  z  S r7   r   r   s     r2   hessz!LikelihoodModel.fit.<locals>.hess  s"    #t|F2T222T99r4   c                (     j         | g|R   z  S r7   r   r   s     r2   r   z"LikelihoodModel.fit.<locals>.score   s%    "
61D1111D88r4   c                (     j         | g|R   z  S r7   r   r   s     r2   r   z!LikelihoodModel.fit.<locals>.hess#  s%    $V3d3333d::r4   warn_convergenceTcov_typecov_kwds)r   r   use_t)r   methoddispmaxitercallbackretallfull_outputcov_params_funcHessianrT   F      ?g       @z8Inverting hessian failed, no bse or cov_params availablescale	converged)ConvergenceWarningzEMaximum Likelihood optimization failed to converge. Check mle_retvals)"hasattrr   r$   rc   rG   r%   r&   r`   r   _fitrj   
setdefaultnplinalginvr   allisfiniteeighmindotdiagTasfortranarrayrE   rF   r   LikelihoodModelResultsmle_retvalsr^   dictstatsmodels.tools.sm_exceptionsr   mle_settings)r0   r   r   r   r   r   fargsr   r   skip_hessianr1   Hinvr   r   r   r   r   r<   	optimizerxoptretvalsoptim_settingsr   H
invertibleeigvalseigvecsmlefitr   r   s   `                            @r2   r   zLikelihoodModel.fit\  s   L t^,, 1#0& #utyq'99  "0 1 1 1 z"	7 	7 	7 	7 	7 	7 X8 8 8 8 8 8: : : : : : :9 9 9 9 9 9; ; ; ; ; ; "::&8$?? zz*b11H &z 2IID ':&z""Df"7ODMwKK	(1q%7<f?C>D<@?F@H>DCN )7 )P )P%g~ 	d### ++,=tDD 	"?4w77DDxK9==')"4!455<DD 	T\\$'''AJvbk!nn%% &#%9>>!#4#4 6'??Q&&!%J {{273=#9#9::>>wyII($-3)>?? *+BD D D (dDKKKdKK %gt$$ 	2 2(< 2NNNNNN <02 2 2 -r4   c                   t          | d          rg| j        dk    r\t          j        |d          }| j        j        d         }t          j        ||| j        z             }t          j        ||f          }n|}|||         |d<   t          |          }| 	                                }	 | j
        | j        | j        dd|f         fi |	}
 |
j        di |}|}|&| j        j        d         }|t          | dd          z  }t          j        |          }|j        ||<   	 |                     ddddd	|
          }n+# t           t"          f$ r |                                 }Y nw xY wt          |j        d          r"|j        j        x|j        _        |j        _        t          |d          r|j        |j        _        t          |d          r|j        |j        _        ||j        _        t          |j        d          r|j        j        !t          j        ||f          |j        _        nd|j        j        d<   t          j        |          }|j        |j        j        |dddf         |f<   |j        |j        j        z
  }t          |d          r@t          j        ||f          |j        _        |j        |j        j        |dddf         |f<   t          |d          r"|j        |j        _        |j        |j        _        ||j        _        |j        |j        _        |j        |j        _        ||j        _        ||j        _        t          |j        d          r`|j        j         d= |j        j         d= |j        j         d= |j        j         d         }d|d<   |j!        ||dddf         |f<   ||j        _        |S )a  experimental, fit the model subject to zero constraints

        Intended for internal use cases until we know what we need.
        API will need to change to handle models with two exog.
        This is not yet supported by all model subclasses.

        This is essentially a simplified version of `fit_constrained`, and
        does not need to use `offset`.

        The estimation creates a new model with transformed design matrix,
        exog, and converts the results back to the original parameterization.

        Some subclasses could use a more efficient calculation than using a
        new model.

        Parameters
        ----------
        keep_index : array_like (int or bool) or slice
            variables that should be dropped.
        start_params : None or array_like
            starting values for the optimization. `start_params` needs to be
            given in the original parameter space and are internally
            transformed.
        k_params : int or None
            If None, then we try to infer from start_params or model.
        **fit_kwds : keyword arguments
            fit_kwds are used in the optimization of the transformed model.

        Returns
        -------
        results : Results instance
        k_extrar   T)copyr   Nr   nmF)r   r   r   r   r   r   r   r   r   normalized_cov_params.cov_params_defaultr   Mresidfittedvaluessresidbcov_scaledr@   )"r   r   r   arrayr$   rc   arangeconcatenatere   r=   r   r%   r   r8   zerosr   	TypeErrorrG   modelr   _resultsr   r   r   df_residr   r   r   
keep_indexdf_modelk_constrresults_constrained_cacher   )r0   r   r   return_auxiliaryk_paramsfit_kwdskextra_indexkeep_index_p	init_kwds
mod_constr
res_constrparams_fullresr   covs                   r2   
_fit_zeroszLikelihoodModel._fit_zerosf  s   H 4## 	&q(8(8*4888J	"A)Aq4<'788K>:{*CDDLL%L #'3L'AH^$<((H ''))	#T^DJ	!!!Z-0H 1 1&/1 1
#Z^//h//
!
yq)Hi333Hhx((","3J	((11T,1  M MCC :& 	 	 	((**CCC	 :#W-- 	J 4>3C3IICIOcl0:}-- 	>'1'=CL$ :~.. 	@(2(?CL%)&=>> 	82:138X:N1O1OCL..67CL.s3 Xj))
, 	*:aaag+>
+JK&)>>:344 	..0h(7K.L.LCL+- L+Jqqq$w,?,KL:z** 	8$.$7CL!$.$7CL!", * 3 * 3 (+5( 39c"" 	2#G,#N3#H-,%m4CCH3=3IC
111d7#Z/0.1CL+
s   #D? ?%E'&E'+=vIh%<=c                T   | j         }|||                    d          z  z   }t          j                            |d          }t          j        |                                          t          j        |          k     }t          j        |           d         } | j	        dd|i|S )a  experimental, fit of the model without collinear variables

        This currently uses QR to drop variables based on the given
        sequence.
        Options will be added in future, when the supporting functions
        to identify collinear variables become available.
        r   r)moder   r@   )
r$   varr   r   qrabsdiagonalsqrtwherer  )	r0   atolrtolr<   rZ   tolr  maskidx_keeps	            r2   _fit_collinearzLikelihoodModel._fit_collinear  s     ITAEE!HH_$ILLL%%vajjll##bgcll2 8TE??1%t;;(;d;;;r4   r7   )	Nr   r   TTr@   NFF)NNFN)r  r  )r   r   r   r   r3   r   r   r   r   r   r   r  r  __classcell__r   s   @r2   r   r     s                " " "" " "$" " "" " "  ?BINH H H HT 8<48K K K KZ< < < < < < < <r4   r   c                       e Zd ZdZ	 	 d fd	Zd Z fdZd Zd Zd	 Z	d
 Z
d Zd Zd Zd ZddZ	 	 d fd	Z xZS )GenericLikelihoodModela  
    Allows the fitting of any likelihood function via maximum likelihood.

    A subclass needs to specify at least the log-likelihood
    If the log-likelihood is specified for each observation, then results that
    require the Jacobian will be available. (The other case is not tested yet.)

    Notes
    -----
    Optimization methods that require only a likelihood function are 'nm' and
    'powell'

    Optimization methods that require a likelihood function and a
    score/gradient are 'bfgs', 'cg', and 'ncg'. A function to compute the
    Hessian is optional for 'ncg'.

    Optimization method that require a likelihood function, a score/gradient,
    and a Hessian is 'newton'

    If they are not overwritten by a subclass, then numerical gradient,
    Jacobian and Hessian of the log-likelihood are calculated by numerical
    forward differentiation. This might results in some cases in precision
    problems, and the Hessian might not be positive definite. Even if the
    Hessian is not positive definite the covariance matrix of the parameter
    estimates based on the outer product of the Jacobian might still be valid.


    Examples
    --------
    see also subclasses in directory miscmodels

    import statsmodels.api as sm
    data = sm.datasets.spector.load()
    data.exog = sm.add_constant(data.exog)
    # in this dir
    from model import GenericLikelihoodModel
    probit_mod = sm.Probit(data.endog, data.exog)
    probit_res = probit_mod.fit()
    loglike = probit_mod.loglike
    score = probit_mod.score
    mod = GenericLikelihoodModel(data.endog, data.exog, loglike, score)
    res = mod.fit(method="nm", maxiter = 500)
    import numpy as np
    np.allclose(res.params, probit_res.params)
    Nr#   c                h   ||| _         ||| _        ||| _        |                    dd           }	| j                            |            t                      j        ||f||	d| |,t          j	        |          dk    r|j
        d         nd| _        ||                     |           d S d S )Nr!   )r   r!   rS   r   )r   r   r   r&   rO   rj   r   r3   r   rb   rc   nparams_set_extra_params_names)r0   r%   r$   r   r   r   r   extra_params_namesr<   r!   r   s             r2   r3   zGenericLikelihoodModel.__init__6  s     "DLDJ"DL88J--T""" 	4	
!(8	
 	
?C	
 	
 	

 -/WT]]a-?-?DJqMMQDL)(();<<<<< *)r4   c                
   |g| j         | j                            |           n|| j        _        t          |          | _        t          | d          r| xj        | j        z  c_        t          | j                  | _	        d S )Nr   )
r$   r}   r+   r(   r|   re   r   r   r   r  )r0   r  s     r2   r  z.GenericLikelihoodModel._set_extra_params_namesT  s}    )y$&&'9::::#5	 122DLtZ(( .-4?++r4   c                     j         s fd _          j        s	 n j        s	  j        ct          j                             j                  }t          |dz
             _        t           j        j        d         |z
             _	        n"t          j
         _        t          j
         _	        t                                                       dS )z
        Initialize (possibly re-initialize) a Model instance. For
        instance, the design matrix of a linear model may change
        and some things must be recomputed.
        c                .    t          | j                  S r7   )r   r   )rZ   r0   s    r2   <lambda>z3GenericLikelihoodModel.initialize.<locals>.<lambda>j  s    =DL#A#A r4   Nr   r   )r   r   r$   r   r   matrix_rankfloatr   rc   r   nanr   r   )r0   err   s   ` r2   r   z!GenericLikelihoodModel.initializec  s     z 	AAAADJ< <  9 &&ty11B!"q&MMDM!$)/!"4r"9::DMMFDMFDMr4   c                L    | j                                         }||| j        <   |S )ag  
        expand to full parameter array when some parameters are fixed

        Parameters
        ----------
        params : ndarray
            reduced parameter array

        Returns
        -------
        paramsfull : ndarray
            expanded parameter array where fixed parameters are included

        Notes
        -----
        Calling this requires that self.fixed_params and self.fixed_paramsmask
        are defined.

        *developer notes:*

        This can be used in the log-likelihood to ...

        this could also be replaced by a more general parameter
        transformation.
        )fixed_paramsr   fixed_paramsmask)r0   r   
paramsfulls      r2   expandparamsz#GenericLikelihoodModel.expandparams}  s+    4 &++--
,2
4()r4   c                    || j                  S )zReduce parameters)r(  r   s     r2   reduceparamsz#GenericLikelihoodModel.reduceparams  s    d+,,r4   c                R    |                      |                              d          S )z!Log-likelihood of model at paramsr   
loglikeobssumr   s     r2   r   zGenericLikelihoodModel.loglike  s"    v&&**1---r4   c                T    |                      |                              d           S )z*Negative log-likelihood of model at paramsr   r.  r   s     r2   nloglikezGenericLikelihoodModel.nloglike  s%    ''++A....r4   c                .    |                      |           S )a:  
        Log-likelihood of the model for all observations at params.

        Parameters
        ----------
        params : array_like
            The parameters of the model.

        Returns
        -------
        loglike : array_like
            The log likelihood of the model evaluated at `params`.
        )nloglikeobsr   s     r2   r/  z!GenericLikelihoodModel.loglikeobs  s       ((((r4   c                |    i }|                     dd           t          || j        fi |                                S )z@
        Gradient of log-likelihood evaluated at params
        centeredT)r   r   r   ravelr0   r   r<   s      r2   r   zGenericLikelihoodModel.score  sC     
D)))VT\::T::@@BBBr4   c                T    |                     dd           t          || j        fi |S )zg
        Jacobian/Gradient of log-likelihood evaluated at params for each
        observation.
        r6  T)r   r   r/  r8  s      r2   	score_obsz GenericLikelihoodModel.score_obs  s2     	
D)))VT_=====r4   c                0    ddl m}  ||| j                  S )z?
        Hessian of log-likelihood evaluated at params
        r   )approx_hess)statsmodels.tools.numdiffr<  r   )r0   r   r<  s      r2   r   zGenericLikelihoodModel.hessian  s,     	:99999 {64<000r4   Tc                    t           )a  Weights for calculating Hessian

        Parameters
        ----------
        params : ndarray
            parameter at which Hessian is evaluated
        scale : None or float
            If scale is None, then the default scale will be calculated.
            Default scale is defined by `self.scaletype` and set in fit.
            If scale is not None, then it is used as a fixed scale.
        observed : bool
            If True, then the observed Hessian is returned. If false then the
            expected information matrix is returned.

        Returns
        -------
        hessian_factor : ndarray, 1d
            A 1d weight vector used in the calculation of the Hessian.
            The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
        r   )r0   r   r   observeds       r2   hessian_factorz%GenericLikelihoodModel.hessian_factor  s
    , "!r4   r     r   r   c           
        |4t          | d          r| j        }ndt          j        | j                  z  }d|vrd|d<   t                      j        }	 |	d
||||||d|}
t          | dt                    } || |
          }| j	        g n| j	        }t          |          t          |
j                  z
  }|dk    sN|dk     r.|                     d t          |           D                        nt          j        d	t                      |S )Nr   g?r   	nonrobust)r   r   r   r   r   r   results_classr   c                    g | ]}d |z  S )zpar%dr@   r9   rA   s     r2   rC   z.GenericLikelihoodModel.fit.<locals>.<listcomp>  s2     .G .G .G23 /6k .G .G .Gr4   zmore exog_names than parametersr@   )r   r   r   onesr  r   r   r8   GenericLikelihoodModelResultsr}   re   r   r  rangerE   rF   r   )r0   r   r   r   r   r   r   r   r1   
fit_methodr   rD  genericmlefitr}   k_missr   s                  r2   r   zGenericLikelihoodModel.fit  se    t^,, ;#0"RWT\%:%::V##!,F:WW[
 D#)7(3!%D D =CD D
  o =? ?%dF33 !O3RR$/
Z3v}#5#55{{zz,, .G .G7<fW~~.G .G .G H H H H ?NNNr4   )NNNNr#   NNT)Nr   rA  r   r   Nr   )r   r   r   r   r3   r  r   r*  r,  r   r2  r/  r   r:  r   r@  r   r  r  s   @r2   r  r    s0       , ,Z >BBF= = = = = =<, , ,    4  <- - -. . ./ / /) ) ) C C C> > >1 1 1" " " "0 LM*+" " " " " " " " " "r4   r  c                  4    e Zd ZdZd Zd Zd	dZd
dZd ZdS )Resultsz
    Class to contain model results

    Parameters
    ----------
    model : class instance
        the previously specified model instance
    params : ndarray
        parameter estimates from the fit model
    c                x    | j                             |            | j        ||fi | g | _        g d| _        d S )N)r   r   wresid)rO   rj   r   r*   _data_in_cache)r0   r   r   kwds       r2   r3   zResults.__init__  sO    S!!!v-----AAAr4   c                ^    || _         || _        t          |d          r|j        | _        dS dS )aA  
        Initialize (possibly re-initialize) a Results instance.

        Parameters
        ----------
        model : Model
            The model instance.
        params : ndarray
            The model parameters.
        **kwargs
            Any additional keyword arguments required to initialize the model.
        r)   N)r   r   r   r)   )r0   r   r   r1   s       r2   r   zResults.initialize   s<     
5,'' 	/#.DOOO	/ 	/r4   Tc                   t          |d           }d }|r0|j        dk    s| j        j        dk    r|j        }n|j        j        g}|rt          | j        d          r|t          | j        dd           p| j        j	        j
        }ddlm} t          |t          j                  rzt          |d          rJt          |j        t                     r0|j        |                                v rt          j        |          }nt          j        |          j        }|j        }t)          |          }t          |t*                    }	  |||d	          }nV# t,          $ rI}	d
                    t!          t!          |	                              }
|	                    |
          d }	~	ww xY w|t)          |          k    r4|s2|t3          j        dt6                     n|                    |          }|j        }|ot;          j        |          }|j        dk    r<| j        j        j        dk    s| j        j        j         d         dk    r|d d d f         }t;          j!        |          }||fS )NrS   r   r   r    r   )dmatrixname	dataframe)return_typezpredict requires that you use a DataFrame when predicting from a model
that was created using the formula api.

The original error message returned by patsy is:
{}znan values have been dropped)"r   rb   r   sizeindexrW  r   r   r8   r(   r    r]   rV  r^   pdSeriesstrdescribe	DataFramer   re   r   	Exceptionrd   r   rE   rF   r   reindexr   asarrayr$   rc   
atleast_2d)r0   r$   	transform	is_pandas
exog_indexr    rV  orig_exog_lenis_dictexcrK   s              r2   _transform_predict_exogzResults._transform_predict_exog2  sp   $T400	
 	/yA~~!1Q!6!6!Z

"jo.
 	$Y77 	$T=M"4:}dCC 7:?6 %%%%%%$	** 	(D&)) 0jC.H.H 0	[%9%9%;%;;;<--DD <--/D!Z
IIM t,,G)w{DkJJJ ) ) ) #F3s3xx==11	 
 mmC((() s4yy(((%M"@,OOOO<<
33DJ:d##DyA~~4:?#71#<#<#':?#8#;q#@#@AAAtG}=&&DZs   E 
F) AF$$F)Nc                   |                      ||          \  }} | j        j        | j        |g|R i |}|Gt	          |d          s7|j        dk    rt          j        ||          S t          j        ||          S |S )a  
        Call self.model.predict with self.params as the first argument.

        Parameters
        ----------
        exog : array_like, optional
            The values for which you want to predict. see Notes below.
        transform : bool, optional
            If the model was fit via a formula, do you want to pass
            exog through the formula. Default is True. E.g., if you fit
            a model y ~ log(x1) + log(x2), and transform is True, then
            you can pass a data structure that contains x1 and x2 in
            their original form. Otherwise, you'd need to log the data
            first.
        *args
            Additional arguments to pass to the model, see the
            predict method of the model for the details.
        **kwargs
            Additional keywords arguments to pass to the model, see the
            predict method of the model for the details.

        Returns
        -------
        array_like
            See self.model.predict.

        Notes
        -----
        The types of exog that are supported depends on whether a formula
        was used in the specification of the model.

        If a formula was used, then exog is processed in the same way as
        the original data. This transformation needs to have key access to the
        same variable names, and can be a pandas DataFrame or a dict like
        object that contains numpy arrays.

        If no formula was used, then the provided exog needs to have the
        same number of columns as the original exog in the model. No
        transformation of the data is performed except converting it to
        a numpy array.

        Row indices as in pandas data frames are supported, and added to the
        returned prediction.
        )re  Npredicted_valuesr   )r[  )	rk  r   r   r   r   rb   r\  r]  r`  )r0   r$   re  rm   r1   rg  predict_resultss          r2   r   zResults.predictf  s    Z  77BK 8 M Mj -$*,T[$ 7 7 7 7/57 7 !'/2D+F +F!#q((y
CCCC|O:FFFF""r4   c                    t           )z2
        Summary

        Not implemented
        r   rv   s    r2   summaryzResults.summary  r   r4   )TrM  )	r   r   r   r   r3   r   rk  r   rp  r@   r4   r2   rO  rO    sx        	 	B B B/ / /$2  2  2  2 h:# :# :# :#x" " " " "r4   rO  c                  8    e Zd ZdZd fd	Zd Z	 	 ddZed	             Zej	        d
             Ze
d             Ze
d             Ze
d             Ze
d             Z	 	 ddZd dZd dZ	 	 ddZ	 	 d!dZ	 	 d"dZd#dZd$dZd%dZed             Zd Z xZS )&r   a$  
    Class to contain results from likelihood models

    Parameters
    ----------
    model : LikelihoodModel instance or subclass instance
        LikelihoodModelResults holds a reference to the model that is fit.
    params : 1d array_like
        parameter estimates from estimated model
    normalized_cov_params : 2d array
       Normalized (before scaling) covariance of params. (dot(X.T,X))**-1
    scale : float
        For (some subset of models) scale will typically be the
        mean square error from the estimated model (sigma^2)

    Attributes
    ----------
    mle_retvals : dict
        Contains the values returned from the chosen optimization method if
        full_output is True during the fit.  Available only if the model
        is fit by maximum likelihood.  See notes below for the output from
        the different methods.
    mle_settings : dict
        Contains the arguments passed to the chosen optimization method.
        Available if the model is fit by maximum likelihood.  See
        LikelihoodModel.fit for more information.
    model : model instance
        LikelihoodResults contains a reference to the model that is fit.
    params : ndarray
        The parameters estimated for the model.
    scale : float
        The scaling factor of the model given during instantiation.
    tvalues : ndarray
        The t-values of the standard errors.


    Notes
    -----
    The covariance of params is given by scale times normalized_cov_params.

    Return values by solver if full_output is True during fit:

        'newton'
            fopt : float
                The value of the (negative) loglikelihood at its
                minimum.
            iterations : int
                Number of iterations performed.
            score : ndarray
                The score vector at the optimum.
            Hessian : ndarray
                The Hessian at the optimum.
            warnflag : int
                1 if maxiter is exceeded. 0 if successful convergence.
            converged : bool
                True: converged. False: did not converge.
            allvecs : list
                List of solutions at each iteration.
        'nm'
            fopt : float
                The value of the (negative) loglikelihood at its
                minimum.
            iterations : int
                Number of iterations performed.
            warnflag : int
                1: Maximum number of function evaluations made.
                2: Maximum number of iterations reached.
            converged : bool
                True: converged. False: did not converge.
            allvecs : list
                List of solutions at each iteration.
        'bfgs'
            fopt : float
                Value of the (negative) loglikelihood at its minimum.
            gopt : float
                Value of gradient at minimum, which should be near 0.
            Hinv : ndarray
                value of the inverse Hessian matrix at minimum.  Note
                that this is just an approximation and will often be
                different from the value of the analytic Hessian.
            fcalls : int
                Number of calls to loglike.
            gcalls : int
                Number of calls to gradient/score.
            warnflag : int
                1: Maximum number of iterations exceeded. 2: Gradient
                and/or function calls are not changing.
            converged : bool
                True: converged.  False: did not converge.
            allvecs : list
                Results at each iteration.
        'lbfgs'
            fopt : float
                Value of the (negative) loglikelihood at its minimum.
            gopt : float
                Value of gradient at minimum, which should be near 0.
            fcalls : int
                Number of calls to loglike.
            warnflag : int
                Warning flag:

                - 0 if converged
                - 1 if too many function evaluations or too many iterations
                - 2 if stopped for another reason

            converged : bool
                True: converged.  False: did not converge.
        'powell'
            fopt : float
                Value of the (negative) loglikelihood at its minimum.
            direc : ndarray
                Current direction set.
            iterations : int
                Number of iterations performed.
            fcalls : int
                Number of calls to loglike.
            warnflag : int
                1: Maximum number of function evaluations. 2: Maximum number
                of iterations.
            converged : bool
                True : converged. False: did not converge.
            allvecs : list
                Results at each iteration.
        'cg'
            fopt : float
                Value of the (negative) loglikelihood at its minimum.
            fcalls : int
                Number of calls to loglike.
            gcalls : int
                Number of calls to gradient/score.
            warnflag : int
                1: Maximum number of iterations exceeded. 2: Gradient and/
                or function calls not changing.
            converged : bool
                True: converged. False: did not converge.
            allvecs : list
                Results at each iteration.
        'ncg'
            fopt : float
                Value of the (negative) loglikelihood at its minimum.
            fcalls : int
                Number of calls to loglike.
            gcalls : int
                Number of calls to gradient/score.
            hcalls : int
                Number of calls to hessian.
            warnflag : int
                1: Maximum number of iterations exceeded.
            converged : bool
                True: converged. False: did not converge.
            allvecs : list
                Results at each iteration.
        Nr   c                t   t                                          ||           || _        || _        d| _        d|v r|d         }||nd| _        d|v rd|                    dd          }|                    di           }|dk    rd| _        ddi| _        d S dd	l	m
}	 |i }| j        } |	| f|d
|d| d S d S )NFr   r   rC  r   descriptionWStandard Errors assume that the covariance matrix of the errors is correctly specified.r   get_robustcov_resultsTr   use_selfr   )r   r3   r   r   _use_tr   r`   r   r   statsmodels.base.covtyperv  )r0   r   r   r   r   r1   r   r   r   rv  r   s             r2   r3   zLikelihoodModelResults.__init__J  s   '''%:"
 f7OE"'"3DJzz*k::Hzz*b11H;&& +!. 1. !/ KJJJJJ#!H
%%d ?X,1? ?5=? ? ? ? ?  r4   c                    t           )z"See specific model class docstringr   rv   s    r2   r   z,LikelihoodModelResults.normalized_cov_paramsh  s    !!r4   rC  Tc                    |du rt          d          ddlm} |i }|dk    rd| _        ddi| _        d S  || f|d|d	| d S )
NFz8use_self should have been removed long ago.  See GH#4401r   ru  rC  rs  rt  Trw  )rG   rz  rv  r   r   )r0   r   rx  r   r   rv  s         r2   _get_robustcov_resultsz-LikelihoodModelResults._get_robustcov_resultsl  s    u + , , ,BBBBBBH{""'DM* -* +DMMM
 "!$ ;D(-; ;19; ; ; ; ;r4   c                    | j         S )z?Flag indicating to use the Student's distribution in inference.)ry  rv   s    r2   r   zLikelihoodModelResults.use_t~  s     {r4   c                .    t          |          | _        d S r7   )boolry  )r0   values     r2   r   zLikelihoodModelResults.use_t  s    5kkr4   c                @    | j                             | j                  S )zLog-likelihood of model)r   r   r   rv   s    r2   llfzLikelihoodModelResults.llf  s     z!!$+...r4   c                   t          | d          s?| j        8t          j        t	          | j                            }t          j        |dd<   n}t          j                    5  t          j	        dt                     t          j        t          j        |                                                     }ddd           n# 1 swxY w Y   |S )z/The standard errors of the parameter estimates.r   Nignore)r   r   r   emptyre   r   r$  rE   catch_warningssimplefilterRuntimeWarningr  r   
cov_params)r0   bse_s     r2   bsezLikelihoodModelResults.bse  s     344 	;+38C,,--DfDGG(** ; ;%h???wrwt'8'899::; ; ; ; ; ; ; ; ; ; ; ; ; ; ; s   #ACC	Cc                    t          j                    5  t          j        dt                     | j        | j        z  cddd           S # 1 swxY w Y   dS )zH
        Return the t-statistic for a given parameter estimate.
        r  N)rE   r  r  r  r   r  rv   s    r2   tvalueszLikelihoodModelResults.tvalues  s    
 $&& 	* 	*!(N;;;;)	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	*s   )A

AAc                   t          j                    5  t          j        dt                     | j        r\t          | d| j                  }t          j        	                    t          j        | j                  |          dz  cddd           S t          j        	                    t          j        | j                            dz  cddd           S # 1 swxY w Y   dS )z6The two-tailed p values for the t-stats of the params.r  df_resid_inferencerS   N)rE   r  r  r  r   r8   r   r   tsfr   r  r  norm)r0   r   s     r2   pvalueszLikelihoodModelResults.pvalues  s    $&& 	? 	?!(N;;;z ?"4)=t}MMwzz"&"6"6AAAE		? 	? 	? 	? 	? 	? 	? 	? z}}RVDL%9%9::Q>	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	? 	?s   A1C8CCCc           	        t          | d          r| j        d         dv rt          }nt          j        }|&| j        t          | d          st          d          |||t          d          ||t          d          |+t          | d          r| j        }n|| j        }| j        |z  }|=t          j	        |          }|j
        d	k    r
|||f         S ||dddf         |f         S |qt          j	        |          }|j
        d	k    rt          d
          ||}nt          j	        |          } || ||t          j        |                              }|S |S )a  
        Compute the variance/covariance matrix.

        The variance/covariance matrix can be of a linear contrast of the
        estimated parameters or all params multiplied by scale which will
        usually be an estimate of sigma^2.  Scale is assumed to be a scalar.

        Parameters
        ----------
        r_matrix : array_like
            Can be 1d, or 2d.  Can be used alone or with other.
        column : array_like, optional
            Must be used on its own.  Can be 0d or 1d see below.
        scale : float, optional
            Can be specified or not.  Default is None, which means that
            the scale argument is taken from the model.
        cov_p : ndarray, optional
            The covariance of the parameters. If not provided, this value is
            read from `self.normalized_cov_params` or
            `self.cov_params_default`.
        other : array_like, optional
            Can be used when r_matrix is specified.

        Returns
        -------
        ndarray
            The covariance matrix of the parameter estimates or of linear
            combination of parameter estimates. See Notes.

        Notes
        -----
        (The below are assumed to be in matrix notation.)

        If no argument is specified returns the covariance matrix of a model
        ``(scale)*(X.T X)^(-1)``

        If contrast is specified it pre and post-multiplies as follows
        ``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``

        If contrast and other are specified returns
        ``(scale) * r_matrix (X.T X)^(-1) other.T``

        If column is specified returns
        ``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d

        OR

        ``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
        r   r   l1l1_cvxopt_cpNr   zFneed covariance of parameters for computing (unnormalized) covariancesz3Column should be specified without other arguments.z)other can only be specified with r_matrixr@   zr_matrix should be 1d or 2d)r   r   r   r   r   r   rG   r   r   rc  rc   	transpose)r0   r_matrixcolumnr   cov_potherdot_funrn   s           r2   r  z!LikelihoodModelResults.cov_params  s   f D.)) 	!+.2HHHGGfGMd8@D"677 A : ; ; ;8#75;L * + + +!1HIII=t122 ;/= JE2U:Z''F|r!!VV^,,VAAAtG_f455!z(++H~## !>???} 
5))'(GGE2<3F3F$G$GHHCJLr4   c                   ddl m} t          |ddd          }| j        j        dk    rd | j        j        j        D             }n| j        j        j        } ||                              |          }|j	        |j
        }}|j        d         }|j        d         }	|&| j        t          | d
          st          d          | j                            d          }
|	|
j        d         k    rt          d          |t!          j        |          }n(t!          j        |          }|                                }|j        dk    r |j        d         |k    rt          d          |t          | d          o| j        }t!          j        ||
          }|dk    r<t!          j        t!          j        |                     ||                              }n)t!          j        |                     ||                    }||z
  t5          |          z  }t7          | d| j                  }|rt;          ||||          S t;          ||||d          S )av  
        Compute a t-test for a each linear hypothesis of the form Rb = q.

        Parameters
        ----------
        r_matrix : {array_like, str, tuple}
            One of:

            - array : If an array is given, a p x k 2d array or length k 1d
              array specifying the linear restrictions. It is assumed
              that the linear combination is equal to zero.
            - str : The full hypotheses to test can be given as a string.
              See the examples.
            - tuple : A tuple of arrays in the form (R, q). If q is given,
              can be either a scalar or a length p row vector.

        cov_p : array_like, optional
            An alternative estimate for the parameter covariance matrix.
            If None is given, self.normalized_cov_params is used.
        use_t : bool, optional
            If use_t is None, then the default of the model is used. If use_t
            is True, then the p-values are based on the t distribution. If
            use_t is False, then the p-values are based on the normal
            distribution.

        Returns
        -------
        ContrastResults
            The results for the test are attributes of this results instance.
            The available results have the same elements as the parameter table
            in `summary()`.

        See Also
        --------
        tvalues : Individual t statistics for the estimated parameters.
        f_test : Perform an F tests on model parameters.
        patsy.DesignInfo.linear_constraint : Specify a linear constraint.

        Examples
        --------
        >>> import numpy as np
        >>> import statsmodels.api as sm
        >>> data = sm.datasets.longley.load()
        >>> data.exog = sm.add_constant(data.exog)
        >>> results = sm.OLS(data.endog, data.exog).fit()
        >>> r = np.zeros_like(results.params)
        >>> r[5:] = [1,-1]
        >>> print(r)
        [ 0.  0.  0.  0.  0.  1. -1.]

        r tests that the coefficients on the 5th and 6th independent
        variable are the same.

        >>> T_test = results.t_test(r)
        >>> print(T_test)
                                     Test for Constraints
        ==============================================================================
                         coef    std err          t      P>|t|      [0.025      0.975]
        ------------------------------------------------------------------------------
        c0         -1829.2026    455.391     -4.017      0.003   -2859.368    -799.037
        ==============================================================================
        >>> T_test.effect
        -1829.2025687192481
        >>> T_test.sd
        455.39079425193762
        >>> T_test.tvalue
        -4.0167754636411717
        >>> T_test.pvalue
        0.0015163772380899498

        Alternatively, you can specify the hypothesis tests using a string

        >>> from statsmodels.formula.api import ols
        >>> dta = sm.datasets.longley.load_pandas().data
        >>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
        >>> results = ols(formula, dta).fit()
        >>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
        >>> t_test = results.t_test(hypotheses)
        >>> print(t_test)
                                     Test for Constraints
        ==============================================================================
                         coef    std err          t      P>|t|      [0.025      0.975]
        ------------------------------------------------------------------------------
        c0            15.0977     84.937      0.178      0.863    -177.042     207.238
        c1            -2.0202      0.488     -8.231      0.000      -3.125      -0.915
        c2             1.0001      0.249      0.000      1.000       0.437       1.563
        ==============================================================================
        r   
DesignInfor   TstrictoptionalrS   c                6    g | ]}d |d          d|d          S yr   _r   r@   rF  s     r2   rC   z1LikelihoodModelResults.t_test.<locals>.<listcomp>i  @     9 9 9 '1&&!&& 9 9 9r4   r   Nr   z8Need covariance of parameters for computing T statisticsForderz#r_matrix and params are not aligned7r_matrix and q_matrix must have the same number of rowsr  r  r  )effectr  sddf_denomr  )r  	statisticr  r  distribution)r]   r  r   r   rb   r   r(   	cov_nameslinear_constraintcoefs	constantsrc   r   r   rG   r7  r   r   rc  squeezerZ  r   r   r  r   r  r   r8   r   r
   )r0   r  r  r   r  namesLCq_matrix
num_ttests
num_paramsr   _effect_sd_tr   s                  r2   t_testzLikelihoodModelResults.t_test  s   r 	%$$$$$%EEE;q  9 9"jo79 9 9EE JO-EZ00::Xr|(^A&
^A&
Md8@D"677 A , - - -"""--a((BCCCx
++HHz(++H''))H=1~a J..  "2 3 3 3 =T7++:
E&6** >>'"'$//! #2 #0 #0 1 1 2 2CC '$//85/IIJJC F3KK/4!5t}EE 	8"'RC,46 6 6 6 #'RC,4068 8 8 8r4   c                :    |                      |||dd          }|S )a  
        Compute the F-test for a joint linear hypothesis.

        This is a special case of `wald_test` that always uses the F
        distribution.

        Parameters
        ----------
        r_matrix : {array_like, str, tuple}
            One of:

            - array : An r x k array where r is the number of restrictions to
              test and k is the number of regressors. It is assumed
              that the linear combination is equal to zero.
            - str : The full hypotheses to test can be given as a string.
              See the examples.
            - tuple : A tuple of arrays in the form (R, q), ``q`` can be
              either a scalar or a length k row vector.

        cov_p : array_like, optional
            An alternative estimate for the parameter covariance matrix.
            If None is given, self.normalized_cov_params is used.
        invcov : array_like, optional
            A q x q array to specify an inverse covariance matrix based on a
            restrictions matrix.

        Returns
        -------
        ContrastResults
            The results for the test are attributes of this results instance.

        See Also
        --------
        t_test : Perform a single hypothesis test.
        wald_test : Perform a Wald-test using a quadratic form.
        statsmodels.stats.contrast.ContrastResults : Test results.
        patsy.DesignInfo.linear_constraint : Specify a linear constraint.

        Notes
        -----
        The matrix `r_matrix` is assumed to be non-singular. More precisely,

        r_matrix (pX pX.T) r_matrix.T

        is assumed invertible. Here, pX is the generalized inverse of the
        design matrix of the model. There can be problems in non-OLS models
        where the rank of the covariance of the noise is not full.

        Examples
        --------
        >>> import numpy as np
        >>> import statsmodels.api as sm
        >>> data = sm.datasets.longley.load()
        >>> data.exog = sm.add_constant(data.exog)
        >>> results = sm.OLS(data.endog, data.exog).fit()
        >>> A = np.identity(len(results.params))
        >>> A = A[1:,:]

        This tests that each coefficient is jointly statistically
        significantly different from zero.

        >>> print(results.f_test(A))
        <F test: F=array([[ 330.28533923]]), p=4.984030528700946e-10, df_denom=9, df_num=6>

        Compare this to

        >>> results.fvalue
        330.2853392346658
        >>> results.f_pvalue
        4.98403096572e-10

        >>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))

        This tests that the coefficient on the 2nd and 3rd regressors are
        equal and jointly that the coefficient on the 5th and 6th regressors
        are equal.

        >>> print(results.f_test(B))
        <F test: F=array([[ 9.74046187]]), p=0.005605288531708235, df_denom=9, df_num=2>

        Alternatively, you can specify the hypothesis tests using a string

        >>> from statsmodels.datasets import longley
        >>> from statsmodels.formula.api import ols
        >>> dta = longley.load_pandas().data
        >>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
        >>> results = ols(formula, dta).fit()
        >>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
        >>> f_test = results.f_test(hypotheses)
        >>> print(f_test)
        <F test: F=array([[ 144.17976065]]), p=6.322026217355609e-08, df_denom=9, df_num=3>
        T)r  invcovuse_fscalar)	wald_test)r0   r  r  r  r  s        r2   f_testzLikelihoodModelResults.f_test  s&    z nnXU6VZn[[
r4   c                   t          |ddd          }t          |ddd          }|t          | d          o| j        }ddlm} | j        j        d	k    rd
 | j        j        j	        D             }n| j        j        j	        }| j        
                    d          }	 ||                              |          }
|
j        |
j        }}| j        #|!|t          | d          st          d          t!          j        ||	dddf                   }t%          |j        d                   }|t!          j        |          }nt!          j        |          }|j        dk    r,|dddf         }|j        d         |k    rt          d          ||z
  }||                     ||          }t!          j        |                                          rt          d          t           j                            |          }t           j                            |          }||k     r!t9          j        d||fz  t<                     |}||}t          | d          r3| j        d         dv r$tA          tA          |j!        |          |          }n-t!          j        t!          j        |j!        |          |          }tE          | d| j#                  }|t9          j        dtH                     d}|r,|j%        dk    r!t%          t!          j&        |                    }|r||z  }tO          |||          S tO          |||d|f          S )a
  
        Compute a Wald-test for a joint linear hypothesis.

        Parameters
        ----------
        r_matrix : {array_like, str, tuple}
            One of:

            - array : An r x k array where r is the number of restrictions to
              test and k is the number of regressors. It is assumed that the
              linear combination is equal to zero.
            - str : The full hypotheses to test can be given as a string.
              See the examples.
            - tuple : A tuple of arrays in the form (R, q), ``q`` can be
              either a scalar or a length p row vector.

        cov_p : array_like, optional
            An alternative estimate for the parameter covariance matrix.
            If None is given, self.normalized_cov_params is used.
        invcov : array_like, optional
            A q x q array to specify an inverse covariance matrix based on a
            restrictions matrix.
        use_f : bool
            If True, then the F-distribution is used. If False, then the
            asymptotic distribution, chisquare is used. If use_f is None, then
            the F distribution is used if the model specifies that use_t is True.
            The test statistic is proportionally adjusted for the distribution
            by the number of constraints in the hypothesis.
        df_constraints : int, optional
            The number of constraints. If not provided the number of
            constraints is determined from r_matrix.
        scalar : bool, optional
            Flag indicating whether the Wald test statistic should be returned
            as a sclar float. The current behavior is to return an array.
            This will switch to a scalar float after 0.14 is released. To
            get the future behavior now, set scalar to True. To silence
            the warning and retain the legacy behavior, set scalar to
            False.

        Returns
        -------
        ContrastResults
            The results for the test are attributes of this results instance.

        See Also
        --------
        f_test : Perform an F tests on model parameters.
        t_test : Perform a single hypothesis test.
        statsmodels.stats.contrast.ContrastResults : Test results.
        patsy.DesignInfo.linear_constraint : Specify a linear constraint.

        Notes
        -----
        The matrix `r_matrix` is assumed to be non-singular. More precisely,

        r_matrix (pX pX.T) r_matrix.T

        is assumed invertible. Here, pX is the generalized inverse of the
        design matrix of the model. There can be problems in non-OLS models
        where the rank of the covariance of the noise is not full.
        r  Tr  r  Nr   r   r  rS   c                6    g | ]}d |d          d|d          S r  r@   rF  s     r2   rC   z4LikelihoodModelResults.wald_test.<locals>.<listcomp>C  r  r4   r  r  r   z8need covariance of parameters for computing F statisticsr   r  r  zPr_matrix performs f_test for using dimensions that are asymptotically non-normalzbcovariance of constraints does not have full rank. The number of constraints is %d, but rank is %dr   r   r  r  zThe behavior of wald_test will change after 0.14 to returning scalar test statistic values. To get the future behavior now, set scalar to True. To silence this message while retaining the legacy behavior, set scalar to False.F)r  r  df_numchi2)r  r  r  r  distargs)(r   r   r   r]   r  r   rb   r   r(   r  r7  r  r  r  r   rG   r   r   r#  rc   r   rc  r  isnanmaxr   pinvr"  rE   rF   r   r   r   r   r8   r   FutureWarningrZ  r  r
   )r0   r  r  r  r  df_constraintsr  r  r  r   r  r  cparamsJRbqJ_r  r   s                     r2   r  z LikelihoodModelResults.wald_test  s   ~ %EEE68D4HHH=T7++:
E$$$$$$;q  9 9"jo79 9 9EE JO-E"""--Z00::Xr|(&.5=wt5I'J'J , - - - &6!!!T'?33(.#$$x{{HHz(++H=A4(H~a A%%  "2 3 3 3 >OOXUOCCEx""$$ /  ". / / / Y^^E**F&&u--BAvv +./W56BD D D  %AD.)) 	3!+.2HHHv..44AArvceV,,c22A4!5t}EE>M<    F 	%afkkbjmm$$A 	GFA"Q*+- - - - #A06!G G G Gr4   Fc                   ddl m} | }|g }|g }t          |j        j        dd          }||t          d          t          j        t          |j	                            }g }	 |t                    }
||j        D ]}|                    |          }|                                }||         }|D ]!}||v r|
|                             |           "|j        d         }|r|dk    rm|	                    ||f           g }|D ]1}|                    |t          j        |
|                   f           2nt#          |j        j                  D ]]\  }}t          j        ||                   }|D ]!}||v r|
|                             |           "|rF|	                    ||f           ^g }|D ]1}|                    |t          j        |
|                   f           2|j        }ddg|         }g }g }|	|z   |z   D ]|\  }}|                    ||	          }|j        |j        |j        d         g}|r|                    |j                   |                    |           |                    |           }g d
}|r|                    d           ddlm}  ||||          }t7          d|d|          }|	|z   |z   |_        |S )a1  
        Compute a sequence of Wald tests for terms over multiple columns.

        This computes joined Wald tests for the hypothesis that all
        coefficients corresponding to a `term` are zero.
        `Terms` are defined by the underlying formula or by string matching.

        Parameters
        ----------
        skip_single : bool
            If true, then terms that consist only of a single column and,
            therefore, refers only to a single parameter is skipped.
            If false, then all terms are included.
        extra_constraints : ndarray
            Additional constraints to test. Note that this input has not been
            tested.
        combine_terms : {list[str], None}
            Each string in this list is matched to the name of the terms or
            the name of the exogenous variables. All columns whose name
            includes that string are combined in one joint test.
        scalar : bool, optional
            Flag indicating whether the Wald test statistic should be returned
            as a sclar float. The current behavior is to return an array.
            This will switch to a scalar float after 0.14 is released. To
            get the future behavior now, set scalar to True. To silence
            the warning and retain the legacy behavior, set scalar to
            False.

        Returns
        -------
        WaldTestResults
            The result instance contains `table` which is a pandas DataFrame
            with the test results: test statistic, degrees of freedom and
            pvalues.

        Examples
        --------
        >>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)", data).fit()
        >>> res_ols.wald_test_terms()
        <class 'statsmodels.stats.contrast.WaldTestResults'>
                                                  F                P>F  df constraint  df denom
        Intercept                        279.754525  2.37985521351e-22              1        51
        C(Duration, Sum)                   5.367071    0.0245738436636              1        51
        C(Weight, Sum)                    12.432445  3.99943118767e-05              2        51
        C(Duration, Sum):C(Weight, Sum)    0.176002      0.83912310946              2        51

        >>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)",                                            data).fit(cov_type='HC0')
        >>> wt = res_poi.wald_test_terms(skip_single=False,                                          combine_terms=['Duration', 'Weight'])
        >>> print(wt)
                                    chi2             P>chi2  df constraint
        Intercept              15.695625  7.43960374424e-05              1
        C(Weight)              16.132616  0.000313940174705              2
        C(Duration)             1.009147     0.315107378931              1
        C(Weight):C(Duration)   0.216694     0.897315972824              2
        Duration               11.187849     0.010752286833              3
        Weight                 30.263368  4.32586407145e-06              4
        r   )defaultdictNr    zno constraints, nothing to dor   r  r  )r  )r  pvaluedf_constraintr  )r`  )r[  rf   )table)collectionsr  r8   r   r(   rG   r   eyere   r   r,   termsslicerW  r/   rc   vstack	enumerater}   rd  r   r  r  r  r  pandasr`  r   temp)r0   skip_singleextra_constraintscombine_termsr  r  resultr    identityconstraintscombinedtermrp   rW  constraint_matrixcnamek_constraintcombined_constraintsrq   r   r  res_waldr[  
constraintwtrow	col_namesr`  r  r  s                                 r2   wald_test_termsz&LikelihoodModelResults.wald_test_terms  s   | 	,+++++$ " Mfl/EE#4#<<===6#fm,,--;t$$"#) > >"((..yy{{$,TN! + B BE}} ../@AAA06q9 !#q(( ""D*;#<====#% & Q Q$++UBIhuo4N4N,OPPPPQ 'v|'>?? > >	T$&M(3-$@$@! + B BE}} ../@AAA ""D*;#<====#% & Q Q$++UBIhuo4N4N,OPPPP}U+ +.B BEV V 	 	D*!!*V!<<B<J,<Q,?@C (

2;'''OOC   LL =<<	 	)Z((($$$$$$	(%CCCdL$eDDD!558II
r4   hs皙?c                .    t          | ||||          }|S )a 	  
        Perform pairwise t_test with multiple testing corrected p-values.

        This uses the formula design_info encoding contrast matrix and should
        work for all encodings of a main effect.

        Parameters
        ----------
        term_name : str
            The name of the term for which pairwise comparisons are computed.
            Term names for categorical effects are created by patsy and
            correspond to the main part of the exog names.
        method : {str, list[str]}
            The multiple testing p-value correction to apply. The default is
            'hs'. See stats.multipletesting.
        alpha : float
            The significance level for multiple testing reject decision.
        factor_labels : {list[str], None}
            Labels for the factor levels used for pairwise labels. If not
            provided, then the labels from the formula design_info are used.

        Returns
        -------
        MultiCompResult
            The results are stored as attributes, the main attributes are the
            following two. Other attributes are added for debugging purposes
            or as background information.

            - result_frame : pandas DataFrame with t_test results and multiple
              testing corrected p-values.
            - contrasts : matrix of constraints of the null hypothesis in the
              t_test.

        Notes
        -----
        Status: experimental. Currently only checked for treatment coding with
        and without specified reference level.

        Currently there are no multiple testing corrected confidence intervals
        available.

        Examples
        --------
        >>> res = ols("np.log(Days+1) ~ C(Weight) + C(Duration)", data).fit()
        >>> pw = res.t_test_pairwise("C(Weight)")
        >>> pw.result_frame
                 coef   std err         t         P>|t|  Conf. Int. Low
        2-1  0.632315  0.230003  2.749157  8.028083e-03        0.171563
        3-1  1.302555  0.230003  5.663201  5.331513e-07        0.841803
        3-2  0.670240  0.230003  2.914044  5.119126e-03        0.209488
             Conf. Int. Upp.  pvalue-hs reject-hs
        2-1         1.093067   0.010212      True
        3-1         1.763307   0.000002      True
        3-2         1.130992   0.010212      True
        )r   alphafactor_labels)r   )r0   	term_namer   r  r  r  s         r2   r   z&LikelihoodModelResults.t_test_pairwise  s)    r dIfE,9; ; ;
r4   c                d    ddl m} d} ||| j        |                                 ||          }|S )a  Experimental method for nonlinear prediction and tests

        Parameters
        ----------
        func : callable, f(params)
            nonlinear function of the estimation parameters. The return of
            the function can be vector valued, i.e. a 1-D array
        deriv : function or None
            first derivative or Jacobian of func. If deriv is None, then a
            numerical derivative will be used. If func returns a 1-D array,
            then the `deriv` should have rows corresponding to the elements
            of the return of func.

        Returns
        -------
        nl : instance of `NonlinearDeltaCov` with attributes and methods to
            calculate the results for the prediction or tests

        r   )NonlinearDeltaCovN)deriv	func_args)statsmodels.stats._delta_methodr  r   r  )r0   funcr  r  r  nls         r2   _get_wald_nonlinearz*LikelihoodModelResults._get_wald_nonlinearU  sT    ( 	FEEEEE	tT[$//2C2C%*iA A A 	r4   c                   | j         }| j        r?t          j        }t	          | d| j                  }|                    d|dz  z
  |          }n't          j        }|                    d|dz  z
            }| j        }|||z  z
  }|||z  z   }	|>t          j
        dt                     t          j        |          }||         }|	|         }	t          j        t          ||	                    S )a  
        Construct confidence interval for the fitted parameters.

        Parameters
        ----------
        alpha : float, optional
            The significance level for the confidence interval. The default
            `alpha` = .05 returns a 95% confidence interval.
        cols : array_like, optional
            Specifies which confidence intervals to return.

        .. deprecated: 0.13

           cols is deprecated and will be removed after 0.14 is released.
           cols only works when inputs are NumPy arrays and will fail
           when using pandas Series or DataFrames as input. You can
           subset the confidence intervals using slices.

        Returns
        -------
        array_like
            Each row contains [lower, upper] limits of the confidence interval
            for the corresponding parameter. The first column contains all
            lower, the second column contains all upper limits.

        Notes
        -----
        The confidence interval is based on the standard normal distribution
        if self.use_t is False. If self.use_t is True, then uses a Student's t
        with self.df_resid_inference (or self.df_resid if df_resid_inference is
        not defined) degrees of freedom.

        Examples
        --------
        >>> import statsmodels.api as sm
        >>> data = sm.datasets.longley.load()
        >>> data.exog = sm.add_constant(data.exog)
        >>> results = sm.OLS(data.endog, data.exog).fit()
        >>> results.conf_int()
        array([[-5496529.48322745, -1467987.78596704],
               [    -177.02903529,      207.15277984],
               [      -0.1115811 ,        0.03994274],
               [      -3.12506664,       -0.91539297],
               [      -1.5179487 ,       -0.54850503],
               [      -0.56251721,        0.460309  ],
               [     798.7875153 ,     2859.51541392]])

        >>> results.conf_int(cols=(2,3))
        array([[-0.1115811 ,  0.03994274],
               [-3.12506664, -0.91539297]])
        r  r   rS   Na  cols is deprecated and will be removed after 0.14 is released. cols only works when inputs are NumPy arrays and will fail when using pandas Series or DataFrames as input. Subsets of confidence intervals can be selected using slices of the full confidence interval array.)r  r   r   r  r8   r   ppfr  r   rE   rF   r  r   rc  r   )
r0   r  rp   r  distr   qr   loweruppers
             r2   conf_intzLikelihoodModelResults.conf_intp  s    h h: 	(7Dt%94=IIHUQY11AA:DUQY''AS S M9
    :d##D$KE$KEz$ue,,---r4   c                V    ddl m} |r|                                   || |           dS )ap  
        Save a pickle of this instance.

        Parameters
        ----------
        fname : {str, handle}
            A string filename or a file handle.
        remove_data : bool
            If False (default), then the instance is pickled without changes.
            If True, then all arrays with length nobs are set to None before
            pickling. See the remove_data method.
            In some cases not all arrays will be set to None.

        Notes
        -----
        If remove_data is true and the model result does not implement a
        remove_data method then this will raise an exception.
        r   )save_pickleN)statsmodels.iolib.smpickler  remove_data)r0   fnamer  r  s       r2   savezLikelihoodModelResults.save  sH    ( 	;::::: 	D%     r4   c                $    ddl m}  ||          S )a  
        Load a pickled results instance

        .. warning::

           Loading pickled models is not secure against erroneous or
           maliciously constructed data. Never unpickle data received from
           an untrusted or unauthenticated source.

        Parameters
        ----------
        fname : {str, handle, pathlib.Path}
            A string filename or a file handle.

        Returns
        -------
        Results
            The unpickled results instance.
        r   )load_pickle)r  r  )rl   r  r  s      r2   loadzLikelihoodModelResults.load  s&    , 	;:::::{5!!!r4   c                  
 | j         }i 
t          |          D ]3}	 t                              ||          }|
|<   $# t          $ r Y 0w xY w
fd
D             }|D ]}d| j        |<   d }d t          | dg           D             }d | j        j        D             }| j        |z   |z   D ]}||v r || |           | j	        D ]$}		 d| j        |	<   # t          t          f$ r Y !w xY wdS )a  
        Remove data arrays, all nobs arrays from result and model.

        This reduces the size of the instance, so it can be pickled with less
        memory. Currently tested for use with predict from an unpickled
        results and model instance.

        .. warning::

           Since data and some intermediate results have been removed
           calculating new statistics that require them will raise exceptions.
           The exception will occur the first time an attribute is accessed
           that has been set to None.

        Not fully tested for time series models, tsa, and might delete too much
        for prediction or not all that would be possible.

        The lists of arrays to delete are maintained as attributes of
        the result and model instance, except for cached values. These
        lists could be changed before calling remove_data.

        The attributes to remove are named in:

        model._data_attr : arrays attached to both the model instance
            and the results instance with the same attribute name.

        result._data_in_cache : arrays that may exist as values in
            result._cache

        result._data_attr_model : arrays attached to the model
            instance but not to the results instance
        c                J    g | ]}t          |         t                    | S r@   )r^   r   )r9   rZ   	cls_attrss     r2   rC   z6LikelihoodModelResults.remove_data.<locals>.<listcomp>	  sB     @ @ @A#IaL+>>@a @ @ @r4   Nc                    |                     d          }|                    d          }	 t          t          | g|z             }t	          ||          rt          ||d            d S d S # t          $ r Y d S w xY w)N.rT   )splitr&   r   r8   r   rN   AttributeError)objattpatt_obj_s        r2   wipez0LikelihoodModelResults.remove_data.<locals>.wipe$	  s    		#A5599Dguqy114&& .D$-----. .!   s   :A* *
A87A8c                    g | ]}d |z   S zmodel.r@   rF  s     r2   rC   z6LikelihoodModelResults.remove_data.<locals>.<listcomp>/	  s    RRRqhlRRRr4   _data_attr_modelc                    g | ]}d |z   S r!  r@   rF  s     r2   rC   z6LikelihoodModelResults.remove_data.<locals>.<listcomp>0	  s    BBBqhlBBBr4   )r   dirobject__getattribute__r  r   r8   r   r*   rR  rP   )r0   rl   rW  attr
data_attrsr  
model_only
model_attrr  r:   r  s             @r2   r  z"LikelihoodModelResults.remove_data  s   B n 	HH 	' 	'D'..sD99 #'	$ "   @ @ @ @ @ @ @
 	% 	%D $DK		 		 		 SRGD:Lb,Q,QRRR
BBDJ,ABBB
?Z/*< 	 	Cj   DsOOOO& 	 	C#'C  "H-   	 	s!   >
A
A

CC)(C))Nr   )rC  TN)NNNNNr   )FNNN)r  r  Nr7   )r  N)F)r   r   r   r   r3   r   r}  r   r   setterr   r  r  r  r  r  r  r  r  r  r   r  r	  r  r   r  r  r  r  s   @r2   r   r     sH       X Xz? ? ? ? ? ?<" " " EI%); ; ; ;$   X \" " \" / / \/   \ * * \* ? ? \? HL\ \ \ \~L8 L8 L8 L8\^ ^ ^ ^B 6::>LG LG LG LG\ DH37L L L L\ =A&*; ; ; ;z   6M. M. M. M.^! ! ! !6 " " ["0I I I I I I Ir4   r   c                  0    e Zd ZddddddddZeZdddZdS )LikelihoodResultsWrapperrf   rowsr  )r   r  r  r  r   r   r   )r  r	  N)r   r   r   _attrs_wrap_attrs_wrap_methodsr@   r4   r2   r-  r-  ?	  sH        !& F K MMMr4   r-  c                      e Zd Zed             Zed             Zed             Zed             Zed             Zed             Z	ed             Z
ed             Zed	             ZddZd ZdS )ResultMixinc                    t          | j        dd          }t          | d          r?t          | d          r| j        }nt          | d          r| j        }nd}| j        |z   |z   S | j        j        S )zModel WCr   r   r   r)   r!   r   )r8   r   r   r)   r!   r   r   rZ  )r0   r   r!   s      r2   
df_modelwczResultMixin.df_modelwcV	  s    
 $*i334$$ 
	$t\** ?z** = =8+g55;##r4   c                ,    d| j         z  d| j        z  z   S )zAkaike information criterionrS   )r  r5  rv   s    r2   aiczResultMixin.aich	  s     DH}qDO444r4   c                Z    d| j         z  t          j        | j                  | j        z  z   S )zBayesian information criterionr7  )r  r   logr   r5  rv   s    r2   biczResultMixin.bicm	  s(     DH}rvdi00DODDDr4   c                @    | j                             | j                  S )z*cached Jacobian of log-likelihood
        )r   r:  r   rv   s    r2   
score_obsvzResultMixin.score_obsvr	  s     z##DK000r4   c                @    | j                             | j                  S )z)cached Hessian of log-likelihood
        )r   r   r   rv   s    r2   hessvzResultMixin.hessvx	  s     z!!$+...r4   c                ~    | j         }t          j                            t          j        |j        |                    S )zg
        covariance of parameters based on outer product of jacobian of
        log-likelihood
        )r=  r   r   r   r   r   )r0   jacvs     r2   covjaczResultMixin.covjac~	  s-     y}}RVDFD11222r4   c           	         | j         }| j        }t          j                            |          }t          j        |t          j        t          j        |j        |          |                    S )zcovariance of parameters based on HJJH

        dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood

        name should be covhjh
        )r=  r?  r   r   r   r   r   )r0   rA  r?  hessinvs       r2   covjhjzResultMixin.covjhj	  sQ     
)--&&vgrvbfTVT&:&:GDDEEEr4   c                X    t          j        t          j        | j                            S )zBstandard deviation of parameter estimates based on covHJH
        )r   r  r   rE  rv   s    r2   bsejhjzResultMixin.bsejhj	        wrwt{++,,,r4   c                X    t          j        t          j        | j                            S )zBstandard deviation of parameter estimates based on covjac
        )r   r  r   rB  rv   s    r2   bsejaczResultMixin.bsejac	  rH  r4   r   r   r   r   c           
        g }t          | j        d          rdnd}t          |          D ]}t          j                            | j        | j                  }| j        | j        |ddf         }	nd}	| j                                        }
 | j        j	        | j
        |         fd|	i|
}|r3| j        j        D ]&}t          ||t          | j        |                     '|                    ||          }|                    |j                   t          j        |          }|r|| _        |                    d          |                    d          |fS )	a]  simple bootstrap to get mean and variance of estimator

        see notes

        Parameters
        ----------
        nrep : int
            number of bootstrap replications
        method : str
            optimization method to use
        disp : bool
            If true, then optimization prints results
        store : bool
            If true, then parameter estimates for all bootstrap iterations
            are attached in self.bootstrap_results

        Returns
        -------
        mean : ndarray
            mean of parameter estimates over bootstrap replications
        std : ndarray
            standard deviation of parameter estimates over bootstrap
            replications

        Notes
        -----
        This was mainly written to compare estimators of the standard errors of
        the parameter estimates.  It uses independent random sampling from the
        original endog and exog, and therefore is only correct if observations
        are independently distributed.

        This will be moved to apply only to models with independently
        distributed observations.
        	cloneattrTF)rZ  Nr$   )r   r   r   )r   r   rI  r   randomrandintr   r$   r=   r   r%   rL  rN   r8   r   r/   r   r   bootstrap_resultsmeanstd)r0   nrepr   r   storeresultshascloneattrrA   rvsindexog_resampr   fitmodr'  fitress                 r2   	bootstrapzResultMixin.bootstrap	  st   F &tz;??JttUt 	* 	*AY&&tyty&AAF y$"i	2"
1133I)TZ)$*V*< I I/:I>GI IF E J0 E EDFD'$*d*C*CDDDDZZvDZ99FNN6=))))(7## 	-%,D"||AA77r4   c                    t           )z<
        get_nlfun

        This is not Implemented
        r   )r0   funs     r2   	get_nlfunzResultMixin.get_nlfun	  s
     "!r4   N)r   r   r   r   )r   r   r   r   r5  r8  r;  r=  r?  rB  rE  rG  rJ  rZ  r]  r@   r4   r2   r3  r3  T	  s/       $ $ ^$" 5 5 ^5 E E ^E 1 1 ^1
 / / ^/
 
3 
3 ^
3 F F ^F - - ^-
 - - ^-
:8 :8 :8 :8x" " " " "r4   r3  c                  d    e Zd ZdZd
dZed             Zed             ZddZed	             Z	dS )	_LLRMixinz4Mixin class for Null model and likelihood ratio
    mcfc                .   |                                 }|                    d          rd| j        | j        z  z
  }nX|                    d          s|dv r0dt	          j        | j        | j        z
  d| j        z  z            z
  }nt          d          |S )zC
        McFadden's pseudo-R-squared. `1 - (llf / llnull)`
        r`  r   cox)cslrrS   z)only McFadden and Cox-Snell are available)r  
startswithr  llnullr   expr   rG   )r0   kindprsqs      r2   pseudo_rsquaredz_LLRMixin.pseudo_rsquared	  s     zz||??5!! 	Jtx$+--DD__U## 	Jt|';';rvt{TX5!di-HIIIDDHIIIr4   c                &    d| j         | j        z
  z  S )zM
        Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
        r7  )rf  r  rv   s    r2   llrz_LLRMixin.llr	  s    
 4;)**r4   c                    | j         }| j        }| j        }||z
  }|| _        t          j        j                            ||          S )z
        The chi-squared probability of getting a log-likelihood ratio
        statistic greater than llr.  llr has a chi-squared distribution
        with degrees of freedom `df_model`.
        )rl  r   df_resid_null
df_lr_nullr   distributionsr  r  )r0   rl  df_fulldf_restrlrdfs        r2   
llr_pvaluez_LLRMixin.llr_pvalue
  sG     h-%7""'**3555r4   NTc                6   | j                             dd           | j                             dd           | j                             dd           | j                             dd           t          | d          r| `|
|| j         d<   || _        || _        dS )a  
        Set the fit options for the Null (constant-only) model.

        This resets the cache for related attributes which is potentially
        fragile. This only sets the option, the null model is estimated
        when llnull is accessed, if llnull is not yet in cache.

        Parameters
        ----------
        llnull : {None, float}
            If llnull is not None, then the value will be directly assigned to
            the cached attribute "llnull".
        attach_results : bool
            Sets an internal flag whether the results instance of the null
            model should be attached. By default without calling this method,
            thenull model results are not attached and only the loglikelihood
            value llnull is stored.
        **kwargs
            Additional keyword arguments used as fit keyword arguments for the
            null model. The override and model default values.

        Notes
        -----
        Modifies attributes of this instance, and so has no return.
        rf  Nrl  rt  	prsquaredres_null)r   r&   r   rw  _attach_nullmodel_optim_kwds_null)r0   rf  attach_resultsr1   s       r2   set_null_optionsz_LLRMixin.set_null_options
  s    : 	$'''t$$$d+++T***4$$ 	$*DK!!/ &r4   c                   | j         }|                                                                }t          |dg           D ]}||=  |j        |j        t          j        | j                  fi |}t          | di                                           }d|v r|	                    d          }n't          |d          r|                                }nd}t          dddd	
          }|                    |           |r |j        dd|i|}n9|                    |dddd	          }|                    |j        dddd	          }t          | dd          dur|| _        t#          |j                  | _        |j        | _        |j        S )z:
        Value of the constant-only loglikelihood
        _null_drop_keysry  r   _get_start_params_nullNbfgsFi'  r   )r   r   r   r   r   )r   r   r   r   r   rx  r@   )r   r=   r   r8   r   r%   r   rG  r   r&   r   r~  r   rj   r   r   rw  re   k_nullr   rn  r  )	r0   r   r<   r:   mod_null
optim_kwdssp_nullopt_kwdsrw  s	            r2   rf  z_LLRMixin.llnull=
  s   
 
##%%**,,5"3R88 	 	CS		"5?5;	0B0BKKdKK
 T#5r::??AA
Z'' nn^44GGU455 	2244GGGvu     
### 		;#x|EEEHEEHH  ||5:,1 $ ; ;H  ||5:,1 $ ; ;H 4,e44EAA$DM(/**%.|r4   )r`  rM  )
r   r   r   r   rj  r   rl  rt  r{  rf  r@   r4   r2   r_  r_  	  s             + + ^+ 6 6 ^6'' '' '' ''R - - ^- - -r4   r_  c                  4    e Zd ZdZd Z	 	 	 	 	 	 d
dZdd	ZdS )rH  a  
    A results class for the discrete dependent variable models.

    ..Warning :

    The following description has not been updated to this version/class.
    Where are AIC, BIC, ....? docstring looks like copy from discretemod

    Parameters
    ----------
    model : A DiscreteModel instance
    mlefit : instance of LikelihoodResults
        This contains the numerical optimization results as returned by
        LikelihoodModel.fit(), in a superclass of GnericLikelihoodModels


    Attributes
    ----------
    aic : float
        Akaike information criterion.  -2*(`llf` - p) where p is the number
        of regressors including the intercept.
    bic : float
        Bayesian information criterion. -2*`llf` + ln(`nobs`)*p where p is the
        number of regressors including the intercept.
    bse : ndarray
        The standard errors of the coefficients.
    df_resid : float
        See model definition.
    df_model : float
        See model definition.
    fitted_values : ndarray
        Linear predictor XB.
    llf : float
        Value of the loglikelihood
    llnull : float
        Value of the constant-only loglikelihood
    llr : float
        Likelihood ratio chi-squared statistic; -2*(`llnull` - `llf`)
    llr_pvalue : float
        The chi-squared probability of getting a log-likelihood ratio
        statistic greater than llr.  llr has a chi-squared distribution
        with degrees of freedom `df_model`.
    prsquared : float
        McFadden's pseudo-R-squared. 1 - (`llf`/`llnull`)
    c                z   || _         |j        | _        |j        | _        |j        j        d         | _        t          | j         dd          }t          |d          r&t          j        |j	                  s|j	        | _	        n7t          |j                  | j         j        z
  |z
  }|| _	        || j         _	        t          |d          r&t          j        |j                  s|j        | _        n3| j        j        d         | j	        z
  |z
  | _        | j        | j         _        i | _        | j                            |j                   t          |j                  }| j	        | j         j        z   |z   |k    rt#          j        dt&                     | j        | j        |z
  k    rt#          j        d           d S d S )Nr   r   r   r   z5df_model + k_constant + k_extra differs from k_paramsz%df_resid differs from nobs - k_params)r   r%   r$   rc   r   r8   r   r   r  r   re   r   r)   r   r   rO   rj   rE   rF   UserWarning)r0   r   r   r   r   r   s         r2   r3   z&GenericLikelihoodModelResults.__init__
  s   
[
J	K%a(	 $*i335*%% 	+bhu~.F.F 	+!NDMM6=))DJ,AAGKH$DM"*DJ5*%% 	0bhu~.F.F 	0!NDMM J,Q/$-?'IDM"&-DJV_---v}%% =4:007:hFFM 23>@ @ @ =DI000MABBBBB 10r4   NrP  TFc           
     <    ddl m} |}	 || |||||||	          }
|
S )a  
        Compute prediction results when endpoint transformation is valid.

        Parameters
        ----------
        exog : array_like, optional
            The values for which you want to predict.
        transform : bool, optional
            If the model was fit via a formula, do you want to pass
            exog through the formula. Default is True. E.g., if you fit
            a model y ~ log(x1) + log(x2), and transform is True, then
            you can pass a data structure that contains x1 and x2 in
            their original form. Otherwise, you'd need to log the data
            first.
        which : str
            Which statistic is to be predicted. Default is "mean".
            The available statistics and options depend on the model.
            see the model.predict docstring
        row_labels : list of str or None
            If row_lables are provided, then they will replace the generated
            labels.
        average : bool
            If average is True, then the mean prediction is computed, that is,
            predictions are computed for individual exog and then the average
            over observation is used.
            If average is False, then the results are the predictions for all
            observations, i.e. same length as ``exog``.
        agg_weights : ndarray, optional
            Aggregation weights, only used if average is True.
            The weights are not normalized.
        **kwargs :
            Some models can take additional keyword arguments, such as offset,
            exposure or additional exog in multi-part models like zero inflated
            models.
            See the predict method of the model for the details.

        Returns
        -------
        prediction_results : PredictionResults
            The prediction results instance contains prediction and prediction
            variance and can on demand calculate confidence intervals and
            summary dataframe for the prediction.

        Notes
        -----
        Status: new in 0.14, experimental
        r   )get_prediction)r$   whichre  
row_labelsaverageagg_weights	pred_kwds)&statsmodels.base._prediction_inferencer  )r0   r$   r  re  r  r  r  r1   r  r  r  s              r2   r  z,GenericLikelihoodModelResults.get_prediction
  sR    r 	JIIIII	n!#	 	 	 
r4   r  c                   ddddgfddddd	g}d
dd| j         z  gfdd| j        z  gfg}|| j        j        j        dz   dz   }ddlm}  |            }|                    | |||||           |                    | |||| j	                   |S )a,  Summarize the Regression Results

        Parameters
        ----------
        yname : str, optional
            Default is `y`
        xname : list[str], optional
            Names for the exogenous variables, default is "var_xx".
            Must match the number of parameters in the model
        title : str, optional
            Title for the top table. If not None, then this replaces the
            default title
        alpha : float
            significance level for the confidence intervals

        Returns
        -------
        smry : Summary instance
            this holds the summary tables and text, which can be printed or
            converted to various output formats.

        See Also
        --------
        statsmodels.iolib.summary.Summary : class to hold summary results
        )zDep. Variable:N)zModel:NzMethod:zMaximum Likelihood)zDate:N)zTime:N)zNo. Observations:N)zDf Residuals:N)z	Df Model:N)zLog-Likelihood:NzAIC:z%#8.4gzBIC:N rO  r   )Summary)gleftgrightynamexnametitle)r  r  r  r   )
r8  r;  r   r   r   statsmodels.iolib.summaryr  add_table_2colsadd_table_paramsr   )	r0   r  r  r  r  top_left	top_rightr  smrys	            r2   rp  z%GenericLikelihoodModelResults.summary  s    6 -$!5 67##/+' /x$(234x$(234	
 =J(1C7)CE 	655555wyyT)#(U 	 	D 	D 	Dd%uE$(J 	 	0 	0 	0 r4   )NrP  TNFN)NNNr  )r   r   r   r   r3   r  rp  r@   r4   r2   rH  rH  n
  ss        , ,\#C #C #CN G G G GR5 5 5 5 5 5r4   rH  ):
__future__r   statsmodels.compat.pythonr   	functoolsr   rE   numpyr   r  r\  scipyr   statsmodels.base.datar   statsmodels.base.optimizerr   statsmodels.base.wrapperbasewrapperwrapstatsmodels.formular	   statsmodels.stats.contrastr
   r   r   statsmodels.tools.datar   statsmodels.tools.decoratorsr   r   r   r=  r   r   r   r   statsmodels.tools.toolsr   r   statsmodels.tools.validationr   r   r   r   r   r   r   r  rO  r   ResultsWrapperr-  populate_wrapperr3  r_  rH  r@   r4   r2   <module>r     s   " " " " " " * * * * * *                      - - - - - - 0 0 0 0 0 0 ' ' ' ' ' ' ' ' ' 3 3 3 3 3 3         
 4 3 3 3 3 3         
 4 3 3 3 3 3        4 3 3 3 3 3 3 3 2 2 2 2 2 2 3 > 
 F" F" F" F" F" F" F" F"R|< |< |< |< |<e |< |< |<@C C C C C_ C C CLZ" Z" Z" Z" Z" Z" Z" Z"|P P P P PW P P Pf$    t2   "  .,. . .U" U" U" U" U" U" U" U"p       DR R R R R$:K R R R R Rr4   