
    0Phm                         d Z ddlZddlmZmZ ddlmZ ddlZddl	Z
ddlmZmZmZ ddlmZmZmZmZmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ ddl m!Z!m"Z" ddl m#Z$ dZ% G d deee          Z&dS )zGaussian processes regression.    N)IntegralReal)
itemgetter)	cho_solvecholeskysolve_triangular   )BaseEstimatorMultiOutputMixinRegressorMixin_fit_contextclone)_handle_zeros_in_scale)check_random_state)Interval
StrOptions)_check_optimize_result)validate_data   )RBFKernel)ConstantKernelTc                   0    e Zd ZU dZdeg eeddd          ej        g e	dh          e
dg eeddd          gdgdg eeddd          dgd	gd
Zeed<   	 ddddddddddZ ed          d             ZddZddZ	 ddZd Z fdZ xZS )GaussianProcessRegressora  Gaussian process regression (GPR).

    The implementation is based on Algorithm 2.1 of [RW2006]_.

    In addition to standard scikit-learn estimator API,
    :class:`GaussianProcessRegressor`:

    * allows prediction without prior fitting (based on the GP prior)
    * provides an additional method `sample_y(X)`, which evaluates samples
      drawn from the GPR (prior or posterior) at given inputs
    * exposes a method `log_marginal_likelihood(theta)`, which can be used
      externally for other ways of selecting hyperparameters, e.g., via
      Markov chain Monte Carlo.

    To learn the difference between a point-estimate approach vs. a more
    Bayesian modelling approach, refer to the example entitled
    :ref:`sphx_glr_auto_examples_gaussian_process_plot_compare_gpr_krr.py`.

    Read more in the :ref:`User Guide <gaussian_process>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    kernel : kernel instance, default=None
        The kernel specifying the covariance function of the GP. If None is
        passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed")
        * RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
        the kernel hyperparameters are optimized during fitting unless the
        bounds are marked as "fixed".

    alpha : float or ndarray of shape (n_samples,), default=1e-10
        Value added to the diagonal of the kernel matrix during fitting.
        This can prevent a potential numerical issue during fitting, by
        ensuring that the calculated values form a positive definite matrix.
        It can also be interpreted as the variance of additional Gaussian
        measurement noise on the training observations. Note that this is
        different from using a `WhiteKernel`. If an array is passed, it must
        have the same number of entries as the data used for fitting and is
        used as datapoint-dependent noise level. Allowing to specify the
        noise level directly as a parameter is mainly for convenience and
        for consistency with :class:`~sklearn.linear_model.Ridge`.

    optimizer : "fmin_l_bfgs_b", callable or None, default="fmin_l_bfgs_b"
        Can either be one of the internally supported optimizers for optimizing
        the kernel's parameters, specified by a string, or an externally
        defined optimizer passed as a callable. If a callable is passed, it
        must have the signature::

            def optimizer(obj_func, initial_theta, bounds):
                # * 'obj_func': the objective function to be minimized, which
                #   takes the hyperparameters theta as a parameter and an
                #   optional flag eval_gradient, which determines if the
                #   gradient is returned additionally to the function value
                # * 'initial_theta': the initial value for theta, which can be
                #   used by local optimizers
                # * 'bounds': the bounds on the values of theta
                ....
                # Returned are the best found hyperparameters theta and
                # the corresponding value of the target function.
                return theta_opt, func_min

        Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize`
        is used. If None is passed, the kernel's parameters are kept fixed.
        Available internal optimizers are: `{'fmin_l_bfgs_b'}`.

    n_restarts_optimizer : int, default=0
        The number of restarts of the optimizer for finding the kernel's
        parameters which maximize the log-marginal likelihood. The first run
        of the optimizer is performed from the kernel's initial parameters,
        the remaining ones (if any) from thetas sampled log-uniform randomly
        from the space of allowed theta-values. If greater than 0, all bounds
        must be finite. Note that `n_restarts_optimizer == 0` implies that one
        run is performed.

    normalize_y : bool, default=False
        Whether or not to normalize the target values `y` by removing the mean
        and scaling to unit-variance. This is recommended for cases where
        zero-mean, unit-variance priors are used. Note that, in this
        implementation, the normalisation is reversed before the GP predictions
        are reported.

        .. versionchanged:: 0.23

    copy_X_train : bool, default=True
        If True, a persistent copy of the training data is stored in the
        object. Otherwise, just a reference to the training data is stored,
        which might cause predictions to change if the data is modified
        externally.

    n_targets : int, default=None
        The number of dimensions of the target values. Used to decide the number
        of outputs when sampling from the prior distributions (i.e. calling
        :meth:`sample_y` before :meth:`fit`). This parameter is ignored once
        :meth:`fit` has been called.

        .. versionadded:: 1.3

    random_state : int, RandomState instance or None, default=None
        Determines random number generation used to initialize the centers.
        Pass an int for reproducible results across multiple function calls.
        See :term:`Glossary <random_state>`.

    Attributes
    ----------
    X_train_ : array-like of shape (n_samples, n_features) or list of object
        Feature vectors or other representations of training data (also
        required for prediction).

    y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
        Target values in training data (also required for prediction).

    kernel_ : kernel instance
        The kernel used for prediction. The structure of the kernel is the
        same as the one passed as parameter but with optimized hyperparameters.

    L_ : array-like of shape (n_samples, n_samples)
        Lower-triangular Cholesky decomposition of the kernel in ``X_train_``.

    alpha_ : array-like of shape (n_samples,)
        Dual coefficients of training data points in kernel space.

    log_marginal_likelihood_value_ : float
        The log-marginal-likelihood of ``self.kernel_.theta``.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    GaussianProcessClassifier : Gaussian process classification (GPC)
        based on Laplace approximation.

    References
    ----------
    .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
       "Gaussian Processes for Machine Learning",
       MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_

    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = DotProduct() + WhiteKernel()
    >>> gpr = GaussianProcessRegressor(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    0.3680...
    >>> gpr.predict(X[:2,:], return_std=True)
    (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
    Nr   left)closedfmin_l_bfgs_bbooleanr   random_statekernelalpha	optimizern_restarts_optimizernormalize_ycopy_X_train	n_targetsr   _parameter_constraintsg|=FT)r"   r#   r$   r%   r&   r'   r   c                v    || _         || _        || _        || _        || _        || _        || _        || _        d S Nr    )	selfr!   r"   r#   r$   r%   r&   r'   r   s	            ]/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/sklearn/gaussian_process/_gpr.py__init__z!GaussianProcessRegressor.__init__   sF     
"$8!&("(    )prefer_skip_nested_validationc           	      	     j         )t          dd          t          dd          z   _        nt	           j                    _        t           j                   _         j        j        rd\  }}nd\  }}t           ||dd||	          \  }}|j
        d
k    r|j        d
         nd
} j        &| j        k    rt          d| d j         d           j        rXt          j        |d           _        t%          t          j        |d          d           _        | j        z
   j        z  }nO|j
        dk    r|j        d
         fnd
}t          j        |           _        t          j        |           _        t          j         j                  r{ j        j        d         |j        d         k    rZ j        j        d         d
k    r j        d          _        n1t          d j        j        d          d|j        d          d           j        rt          j        |          n| _         j        rt          j        |          n| _         j         j        j        dk    rd fd	}                     | j        j          j        j!                  g} j"        dk    rt          j#         j        j!                  $                                st          d           j        j!        }	tK           j"                  D ][}
 j        &                    |	dddf         |	ddd
f                   }|'                                         |||	                     \tQ          tS          tU          d
          |                    }|t          j+        |                   d          j        _          j        ,                                 t          j-        |            _.        n& /                     j        j         d           _.                              j                  }|t          j0        |          xx          j        z  cc<   	 tc          |td          d           _3        n6# t          j4        j5        $ r}d j         df|j6        z   |_6         d}~ww xY wto           j3        td          f j        d           _8         S )a  Fit Gaussian process regression model.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features) or list of object
            Feature vectors or other representations of training data.

        y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Target values.

        Returns
        -------
        self : object
            GaussianProcessRegressor class instance.
        N      ?fixedconstant_value_boundslength_scale_boundsnumericTNFT)multi_output	y_numeric	ensure_2ddtyper   zSThe number of targets seen in `y` is different from the parameter `n_targets`. Got z != .r   axisF)copyr	   shapezFalpha must be a scalar or an array with same number of entries as y. ()c                 z    |r!                     | dd          \  }}| | fS                      | d           S )NTF)eval_gradientclone_kernelrG   )log_marginal_likelihood)thetarF   lmlgradr+   s       r,   obj_funcz.GaussianProcessRegressor.fit.<locals>.obj_func(  s]      T $ < <T != ! !IC  4$;& 88U8SSSSr.   zYMultiple optimizer restarts (n_restarts_optimizer>0) requires that all bounds are finite.rH   lowercheck_finitezThe kernel, z, is not returning a positive definite matrix. Try gradually increasing the 'alpha' parameter of your GaussianProcessRegressor estimator.rP   )T)9r!   Cr   kernel_r   r   r   _rngrequires_vector_inputr   ndimrC   r'   
ValueErrorr%   npmean_y_train_meanr   std_y_train_stdzerosonesiterabler"   r&   rA   X_train_y_train_r#   n_dims_constrained_optimizationrJ   boundsr$   isfiniteallrangeuniformappendlistmapr   argmin_check_bounds_paramsminlog_marginal_likelihood_value_rI   diag_indices_fromr   GPR_CHOLESKY_LOWERL_linalgLinAlgErrorargsr   alpha_)r+   Xyr=   r<   n_targets_seenshape_y_statsrM   optimard   	iterationtheta_initial
lml_valuesKexcs   `              r,   fitzGaussianProcessRegressor.fit   sR   " ;S@@@3D D D DLL !--DL&t'899	<- 	+.E99*E9
 
 
1 ()vzzq>%.DN*J*JJ$2J J8<J J J    
	=!#!3!3!3D 6rvaa7H7H7Hu U U UD T''4+<<AA ./Vq[[QWQZMMaM!#!>!>!>D "m < < <D;tz"" 	tz'7':agaj'H'Hz"a''!Z]

 M&*j&6q&9M M?@wqzM M M  
 '+&7>


Q&*&7>


Q>%$,*=*A*AT T T T T T 22 $,"4dl6I F (1,,{4<#677;;== $?   ,!&t'@!A!A  I$(I$5$5fQQQTlF111a4L$Q$QMMM66xPVWW   
 c*Q--8899J!'	*(=(=!>q!ADLL--///356*3E3E2ED//262N2N" 3O 3 3D/ LL''	"
q
!
!"""dj0"""
	q(:OOODGGy$ 	 	 	L4< L L L CH 	  W()M
 
 

 s   "Q? ?R2R--R2c                    |r|rt          d          | j        | j        j        rd\  }}nd\  }}t          | |||d          }t	          | d          s| j        $t          dd	
          t          dd	          z  }n| j        }| j        | j        nd}t          j	        |j
        d         |f                                          }|r? ||          }	|dk    r*t          j        t          j        |	d          |d          }	||	fS |r[|                    |          }
|dk    r*t          j        t          j        |
d          |d          }
|t          j        |
          fS |S |                     || j                  }|| j        z  }| j        |z  | j        z   }|j        dk    r'|j
        d         dk    rt          j        |d          }t-          | j        |j        t2          d          }|rz|                     |          |j        |z  z
  }	 t          j        |	| j        dz            j        g |	j
        dR  }	|	j
        d         dk    rt          j        |	d          }	||	fS |r| j                            |                                          }
|
t          j        d|j        |          z  }
|
dk     }t          j        |          rt?          j         d           d|
|<    t          j        |
| j        dz            j        g |
j
        dR  }
|
j
        d         dk    rt          j        |
d          }
|t          j        |
          fS |S )a  Predict using the Gaussian process regression model.

        We can also predict based on an unfitted model by using the GP prior.
        In addition to the mean of the predictive distribution, optionally also
        returns its standard deviation (`return_std=True`) or covariance
        (`return_cov=True`). Note that at most one of the two can be requested.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features) or list of object
            Query points where the GP is evaluated.

        return_std : bool, default=False
            If True, the standard-deviation of the predictive distribution at
            the query points is returned along with the mean.

        return_cov : bool, default=False
            If True, the covariance of the joint predictive distribution at
            the query points is returned along with the mean.

        Returns
        -------
        y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
            Mean of predictive distribution at query points.

        y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
            Standard deviation of predictive distribution at query points.
            Only returned when `return_std` is True.

        y_cov : ndarray of shape (n_samples, n_samples) or                 (n_samples, n_samples, n_targets), optional
            Covariance of joint predictive distribution at query points.
            Only returned when `return_cov` is True.
        z9At most one of return_std or return_cov can be requested.Nr7   r9   F)r<   r=   resetr`   r1   r2   r3   r5   r   r   rB   )repeatsr@   r?   rN   r	   zij,ji->izAPredicted variances smaller than 0. Setting those variances to 0.g        )!RuntimeErrorr!   rU   r   hasattrrR   r   r'   rX   r]   rC   squeezerepeatexpand_dimsdiagsqrtrS   r`   rv   r\   rZ   rV   r   rr   Trq   outerreshaperA   einsumanywarningswarn)r+   rw   
return_std
return_covr=   r<   r!   r'   y_meany_covy_varK_transVy_var_negatives                 r,   predictz GaussianProcessRegressor.predictl  s   F  	* 	K   ;$+"C.E99*E9$Ye5QQQtZ(( R	{"3g>>>WB B B  *..*D!IXQWQZ$;<<<DDFFF q		q==Iub1192  E u}$ Aq==Iub1192  E rwu~~-- ll1dm44Gt{*F &/$2DDF {Q6<?a#7#7F333 !*<5  A  &Q!#'1 F(91(<==EWu{WTVWWW ;q>Q&&Ju1555Eu}$  ))!,,1133:qsA666 "'6.)) 0M8   -0E.) F(91(<==EWu{WTVWWW ;q>Q&&Ju1555Erwu~~--r.   c                 *   t          |          |                     |d          \  j        dk    r                              j        }n=fdt          j        d                   D             }t          j        |          }|S )aw  Draw samples from Gaussian process and evaluate at X.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Query points where the GP is evaluated.

        n_samples : int, default=1
            Number of samples drawn from the Gaussian process per query point.

        random_state : int, RandomState instance or None, default=0
            Determines random number generation to randomly draw samples.
            Pass an int for reproducible results across multiple function
            calls.
            See :term:`Glossary <random_state>`.

        Returns
        -------
        y_samples : ndarray of shape (n_samples_X, n_samples), or             (n_samples_X, n_targets, n_samples)
            Values of n_samples samples drawn from Gaussian process and
            evaluated at query points.
        T)r   r   c                     g | ]D}                     d d |f         d|f                   j        d d t          j        f         ES )N.)multivariate_normalr   rX   newaxis).0target	n_samplesrngr   r   s     r,   
<listcomp>z5GaussianProcessRegressor.sample_y.<locals>.<listcomp>  sl         ''111f9%uS&['99 AAArzM#  r.   )	r   r   rV   r   r   rg   rC   rX   hstack)r+   rw   r   r   	y_samplesr   r   r   s     `  @@@r,   sample_yz!GaussianProcessRegressor.sample_y  s    0 !..Q488;!//yIIKII       $FLO44	  I 	),,Ir.   c                    ||rt          d          | j        S |r| j                            |          }n| j        }||_        |r || j        d          \  }}n || j                  }|t          j        |          xx         | j        z  cc<   	 t          |t          d          }nJ# t          j        j        $ r3 |r!t          j         t          j        |          fnt          j         cY S w xY w| j        }|j        dk    r|ddt          j        f         }t%          |t          f|d          }	d	t          j        d
||	          z  }
|
t          j        t          j        |                                                    z  }
|
|j        d         dz  t          j        dt          j        z            z  z  }
|
                    d          }|rt          j        d|	|	          }t%          |t          ft          j        |j        d                   d          }||dt          j        f         z  }dt          j        d||          z  }|                    d          }|r||fS |S )a  Return log-marginal likelihood of theta for training data.

        Parameters
        ----------
        theta : array-like of shape (n_kernel_params,) default=None
            Kernel hyperparameters for which the log-marginal likelihood is
            evaluated. If None, the precomputed log_marginal_likelihood
            of ``self.kernel_.theta`` is returned.

        eval_gradient : bool, default=False
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta is returned
            additionally. If True, theta must not be None.

        clone_kernel : bool, default=True
            If True, the kernel attribute is copied. If False, the kernel
            attribute is modified, but may result in a performance improvement.

        Returns
        -------
        log_likelihood : float
            Log-marginal likelihood of theta for training data.

        log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta.
            Only returned when eval_gradient is True.
        Nz.Gradient can only be evaluated for theta!=NoneT)rF   FrN   r   rQ   g      zik,ik->kr   r	   r   r?   z
ik,jk->ijk.g      ?zijl,jik->kl)rW   ro   rS   clone_with_thetarJ   r`   rX   rp   r"   r   rq   rs   rt   inf
zeros_likera   rV   r   r   r   logr   sumrC   pieye)r+   rJ   rF   rG   r!   r   
K_gradientLy_trainr"   log_likelihood_dimslog_likelihood
inner_termK_invlog_likelihood_gradient_dimslog_likelihood_gradients                   r,   rI   z0GaussianProcessRegressor.log_marginal_likelihood  s   > = S !QRRR66 	!\22599FF\F FL 	&"F4=EEEMAzzt}%%A 	
"
q
!
!"""dj0"""	Q"45IIIAAy$ 	Q 	Q 	Q6CPRVGR]51122"&PPP	Q -<1aaam,G 1017OOO #RYz7E%J%JJrvbgajj1155777qwqzA~q25y0A0AAA,00b099  	P <>>J&'
););%  E
 %RZ00J ,/z:2 2 ,( 'C&F&FB&F&O&O# 	"!#:::!!s   B. .AC54C5c                 6   | j         dk    rCt          j                            ||dd|          }t	          d|           |j        |j        }}nHt          | j                   r|                      |||          \  }}nt          d| j          d          ||fS )	Nr   zL-BFGS-BT)methodjacrd   lbfgs)rd   zUnknown optimizer r>   )	r#   scipyoptimizeminimizer   xfuncallablerW   )r+   rM   initial_thetard   opt_res	theta_optfunc_mins          r,   rc   z2GaussianProcessRegressor._constrained_optimization  s    >_,,n--! .  G #7G444"))W[xIIdn%% 	E"&..=QW."X"XIxxC$.CCCDDD(""r.   c                 V    t                                                      }d|_        |S r9   )super__sklearn_tags__requires_fit)r+   tags	__class__s     r,   r   z)GaussianProcessRegressor.__sklearn_tags__  s%    ww''))!r.   r*   )FF)r   r   )NFT)__name__
__module____qualname____doc__r   r   r   rX   ndarrayr   r   r   r(   dict__annotations__r-   r   r   r   r   rI   rc   r   __classcell__)r   s   @r,   r   r      s        ` `F .(4D888"*E j/!233XtD!)(AtF!K!K!K L!{"hxD@@@$G'(	$ 	$D 	 	 	 ) !) ) ) ) )* \555L L 65L\A A A AF% % % %P =As" s" s" s"j# # #$        r.   r   )'r   r   numbersr   r   operatorr   numpyrX   scipy.optimizer   scipy.linalgr   r   r   baser
   r   r   r   r   preprocessing._datar   utilsr   utils._param_validationr   r   utils.optimizer   utils.validationr   kernelsr   r   r   rR   rq   r    r.   r,   <module>r      ss   $ $
  " " " " " " " "               > > > > > > > > > > W W W W W W W W W W W W W W 8 8 8 8 8 8 & & & & & & : : : : : : : : 3 3 3 3 3 3 , , , , , ,                 ( ( ( ( ( ( F
 F
 F
 F
 F
/ F
 F
 F
 F
 F
r.   