
    0Ph-M                       d Z ddlZddlZddlmZmZ ddlmZ ddlm	Z	 ddl
ZddlmZmZmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZ ddlmZ d Z G d d edd                    Z G d de          Z G d d          Z G d d          Z G d d          Z  G d de          Z! G d de          Z" G d de"          Z# G d  d!e"          Z$ G d" d#e          Z% G d$ d%ee e          Z& G d& d'ee e          Z' G d( d)eee          Z( G d* d+e(          Z) G d, d-eee          Z* G d. d/eee          Z+ G d0 d1e          Z,d6d3Z- G d4 d5e          Z.dS )7zRA set of kernels that can be combined by operators and used in Gaussian processes.    N)ABCMetaabstractmethod)
namedtuple)	signature)cdistpdist
squareform)gammakv   )clone)ConvergenceWarning)pairwise_kernels)_num_samplesc                 h   t          j        |                              t                    }t          j        |          dk    rt          d          t          j        |          dk    rF| j        d         |j        d         k    r*t          d|j        d         | j        d         fz            |S )N   z2length_scale cannot be of dimension greater than 1r   zKAnisotropic kernel must have the same number of dimensions as data (%d!=%d))npsqueezeastypefloatndim
ValueErrorshape)Xlength_scales     `/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/sklearn/gaussian_process/kernels.py_check_length_scaler   (   s    :l++22599L	w|q  MNNN	w|!!agajL4Fq4I&I&I*-9-?-BAGAJ,OP
 
 	
     c                   .     e Zd ZdZdZd fd	Zd Z xZS )HyperparameteraI  A kernel hyperparameter's specification in form of a namedtuple.

    .. versionadded:: 0.18

    Attributes
    ----------
    name : str
        The name of the hyperparameter. Note that a kernel using a
        hyperparameter with name "x" must have the attributes self.x and
        self.x_bounds

    value_type : str
        The type of the hyperparameter. Currently, only "numeric"
        hyperparameters are supported.

    bounds : pair of floats >= 0 or "fixed"
        The lower and upper bound on the parameter. If n_elements>1, a pair
        of 1d array with n_elements each may be given alternatively. If
        the string "fixed" is passed as bounds, the hyperparameter's value
        cannot be changed.

    n_elements : int, default=1
        The number of elements of the hyperparameter value. Defaults to 1,
        which corresponds to a scalar hyperparameter. n_elements > 1
        corresponds to a hyperparameter which is vector-valued,
        such as, e.g., anisotropic length-scales.

    fixed : bool, default=None
        Whether the value of this hyperparameter is fixed, i.e., cannot be
        changed during hyperparameter tuning. If None is passed, the "fixed" is
        derived based on the given bounds.

    Examples
    --------
    >>> from sklearn.gaussian_process.kernels import ConstantKernel
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import Hyperparameter
    >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
    >>> kernel = ConstantKernel(constant_value=1.0,
    ...    constant_value_bounds=(0.0, 10.0))

    We can access each hyperparameter:

    >>> for hyperparameter in kernel.hyperparameters:
    ...    print(hyperparameter)
    Hyperparameter(name='constant_value', value_type='numeric',
    bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)

    >>> params = kernel.get_params()
    >>> for key in sorted(params): print(f"{key} : {params[key]}")
    constant_value : 1.0
    constant_value_bounds : (0.0, 10.0)
     r   Nc                    t          |t                    r|dk    rst          j        |          }|dk    rY|j        d         dk    rt          j        ||d          }n1|j        d         |k    r t          d|||j        d         fz            |t          |t                    o|dk    }t          t          |           	                    | |||||          S )Nfixedr   r   z@Bounds on %s should have either 1 or %d dimensions. Given are %d)

isinstancestrr   
atleast_2dr   repeatr   superr    __new__)clsname
value_typebounds
n_elementsr#   	__class__s         r   r)   zHyperparameter.__new__z   s    &#&& 
	&G*;*;]6**FA~~<?a''Yvz1==FF\!_
22$6V\!_=>   =vs++A'0AE^S))11z6:u
 
 	
r   c                     | j         |j         k    oQ| j        |j        k    oAt          j        | j        |j        k              o| j        |j        k    o| j        |j        k    S N)r+   r,   r   allr-   r.   r#   )selfothers     r   __eq__zHyperparameter.__eq__   sg    I# *5#33*t{el233* 5#33* 
ek)	
r   )r   N)__name__
__module____qualname____doc__	__slots__r)   r5   __classcell__r/   s   @r   r    r    4   s^        
5 5~ I
 
 
 
 
 
*
 
 
 
 
 
 
r   r    )r+   r,   r-   r.   r#   c                   (   e Zd ZdZddZd Zd Zed             Zed             Z	ed             Z
e
j        d	             Z
ed
             Zd Zd Zd Zd Zd Zd Zd Zedd            Zed             Zed             Zed             Zd ZdS )Kernela  Base class for all kernels.

    .. versionadded:: 0.18

    Examples
    --------
    >>> from sklearn.gaussian_process.kernels import Kernel, RBF
    >>> import numpy as np
    >>> class CustomKernel(Kernel):
    ...     def __init__(self, length_scale=1.0):
    ...         self.length_scale = length_scale
    ...     def __call__(self, X, Y=None):
    ...         if Y is None:
    ...             Y = X
    ...         return np.inner(X, X if Y is None else Y) ** 2
    ...     def diag(self, X):
    ...         return np.ones(X.shape[0])
    ...     def is_stationary(self):
    ...         return True
    >>> kernel = CustomKernel(length_scale=2.0)
    >>> X = np.array([[1, 2], [3, 4]])
    >>> print(kernel(X))
    [[ 25 121]
     [121 625]]
    Tc                    t                      }| j        }t          |j        d|j                  }t	          |          }g g }}|j                                        D ]a}|j        |j        k    r%|j	        dk    r|
                    |j	                   |j        |j        k    r|
                    |j	                   bt          |          dk    rt          d|d          |D ]}	t          | |	          ||	<   |S )X  Get parameters of this kernel.

        Parameters
        ----------
        deep : bool, default=True
            If True, will return the parameters for this estimator and
            contained subobjects that are estimators.

        Returns
        -------
        params : dict
            Parameter names mapped to their values.
        deprecated_originalr3   r   zmscikit-learn kernels should always specify their parameters in the signature of their __init__ (no varargs). z  doesn't follow this convention.)dictr/   getattr__init__r   
parametersvalueskindVAR_KEYWORDr+   appendVAR_POSITIONALlenRuntimeError)
r3   deepparamsr*   init	init_signargsvarargs	parameterargs
             r   
get_paramszKernel.get_params   s     ns|%:CLIIdOO	Bg"-4466 	/ 	/I~!6669>V;S;SIN+++~!999y~...w<<1, :=?    	- 	-C!$,,F3KKr   c                    |s| S |                      d          }|                                D ]\  }}|                    dd          }t          |          dk    r7|\  }}||vrt	          d|d| d          ||         } |j        di ||i e||vr t	          d|d| j        j        d          t          | ||           | S )	a:  Set the parameters of this kernel.

        The method works on simple kernels as well as on nested kernels.
        The latter have parameters of the form ``<component>__<parameter>``
        so that it's possible to update each component of a nested object.

        Returns
        -------
        self
        T)rM   __r   zInvalid parameter z for kernel zK. Check the list of available parameters with `kernel.get_params().keys()`.r!   )	rU   itemssplitrK   r   
set_paramsr/   r6   setattr)	r3   rN   valid_paramskeyvaluerY   r+   sub_name
sub_objects	            r   rZ   zKernel.set_params   s%     	KD11 ,,.. 	* 	*JCIIdA&&E5zzA~~!&h|++$* AEdddL  
 *$/
%
%::5(9:::: l**$* 33 7 7 79   c5))))r   c                 2    t          |           }||_        |S )zReturns a clone of self with given hyperparameters theta.

        Parameters
        ----------
        theta : ndarray of shape (n_dims,)
            The hyperparameters
        )r   theta)r3   rb   cloneds      r   clone_with_thetazKernel.clone_with_theta  s     tr   c                 &    | j         j        d         S )z>Returns the number of non-fixed hyperparameters of the kernel.r   )rb   r   r3   s    r   n_dimszKernel.n_dims  s     z""r   c                 >      fdt                     D             }|S )z4Returns a list of all hyperparameter specifications.c                 Z    g | ]'}|                     d           t          |          (S )hyperparameter_)
startswithrC   ).0attrr3   s     r   
<listcomp>z*Kernel.hyperparameters.<locals>.<listcomp>  sF     
 
 
011
D$
 
 
r   )dir)r3   rs   ` r   hyperparameterszKernel.hyperparameters  s8    
 
 
 
D		
 
 

 r   c                 *   g }|                                  }| j        D ])}|j        s |                    ||j                            *t          |          dk    r&t          j        t          j        |                    S t          j	        g           S )  Returns the (flattened, log-transformed) non-fixed hyperparameters.

        Note that theta are typically the log-transformed values of the
        kernel's hyperparameters as this representation of the search space
        is more amenable for hyperparameter search, as hyperparameters like
        length-scales naturally live on a log-scale.

        Returns
        -------
        theta : ndarray of shape (n_dims,)
            The non-fixed, log-transformed hyperparameters of the kernel
        r   )
rU   rq   r#   rI   r+   rK   r   loghstackarray)r3   rb   rN   hyperparameters       r   rb   zKernel.theta  s     """2 	: 	:N!' :VN$78999u::>>6")E**+++8B<<r   c                    |                                  }d}| j        D ]s}|j        r
|j        dk    r7t	          j        ||||j        z                      ||j        <   ||j        z  }Lt	          j        ||                   ||j        <   |dz  }t|t          |          k    r!t          d|t          |          fz             | j	        di | dS )Sets the (flattened, log-transformed) non-fixed hyperparameters.

        Parameters
        ----------
        theta : ndarray of shape (n_dims,)
            The non-fixed, log-transformed hyperparameters of the kernel
        r   r   zGtheta has not the correct number of entries. Should be %d; given are %dNr!   )
rU   rq   r#   r.   r   expr+   rK   r   rZ   )r3   rb   rN   irw   s        r   rb   zKernel.theta7  s    """2 	 	N# (1,,.0f!a.";;;</ /~*+ ^...0fU1X.>.>~*+QE

??.12CJJ@   	!!&!!!!!r   c                     d | j         D             }t          |          dk    r&t          j        t          j        |                    S t          j        g           S )Returns the log-transformed bounds on the theta.

        Returns
        -------
        bounds : ndarray of shape (n_dims, 2)
            The log-transformed bounds on the kernel's hyperparameters theta
        c                 *    g | ]}|j         	|j        S r!   )r#   r-   rl   rw   s     r   rn   z!Kernel.bounds.<locals>.<listcomp>_  s4     
 
 
!'
!
 
 
r   r   )rq   rK   r   rt   vstackrv   )r3   r-   s     r   r-   zKernel.boundsV  s\    
 
"&"6
 
 

 v;;??6")F++,,,8B<<r   c                     t          |t                    st          | t          |                    S t          | |          S r1   r$   r>   SumConstantKernelr3   bs     r   __add__zKernel.__add__i  s:    !V$$ 	0t^A..///4||r   c                     t          |t                    st          t          |          |           S t          ||           S r1   r   r   s     r   __radd__zKernel.__radd__n  s:    !V$$ 	0~a(($///1d||r   c                     t          |t                    st          | t          |                    S t          | |          S r1   r$   r>   Productr   r   s     r   __mul__zKernel.__mul__s  s<    !V$$ 	44!2!2333tQr   c                     t          |t                    st          t          |          |           S t          ||           S r1   r   r   s     r   __rmul__zKernel.__rmul__x  s<    !V$$ 	4>!,,d333q$r   c                 "    t          | |          S r1   )Exponentiationr   s     r   __pow__zKernel.__pow__}  s    dA&&&r   c                    t          |           t          |          k    rdS |                                 }|                                }t          t          |                                          t          |                                          z             D ]E}t          j        |                    |d           |                    |d           k              r dS FdS )NFT)typerU   setlistkeysr   anyget)r3   r   params_aparams_br]   s        r   r5   zKernel.__eq__  s    ::a  5??$$<<>>tHMMOO,,tHMMOO/D/DDEE 	 	Cvhll3--c41H1HHII uutr   c           	          d                     | j        j        d                    t	          dj         | j                                      S )Nz{0}({1}), {0:.3g})formatr/   r6   joinmaprb   rf   s    r   __repr__zKernel.__repr__  s?      N#TYYs93CTZ/P/P%Q%Q
 
 	
r   NFc                     dS )zEvaluate the kernel.Nr!   )r3   r   Yeval_gradients       r   __call__zKernel.__call__        r   c                     dS )a  Returns the diagonal of the kernel k(X, X).

        The result of this method is identical to np.diag(self(X)); however,
        it can be evaluated more efficiently since only the diagonal is
        evaluated.

        Parameters
        ----------
        X : array-like of shape (n_samples,)
            Left argument of the returned kernel k(X, Y)

        Returns
        -------
        K_diag : ndarray of shape (n_samples_X,)
            Diagonal of kernel k(X, X)
        Nr!   r3   r   s     r   diagzKernel.diag  r   r   c                     dS ))Returns whether the kernel is stationary.Nr!   rf   s    r   is_stationaryzKernel.is_stationary  r   r   c                     dS )zReturns whether the kernel is defined on fixed-length feature
        vectors or generic objects. Defaults to True for backward
        compatibility.Tr!   rf   s    r   requires_vector_inputzKernel.requires_vector_input  s	    
 tr   c                    t          j        | j        t          j        | j                  j                  }d}| j        D ]}|j        r
t          |j	                  D ]}||df         r;t          j        d|d|j        d|j        |         d         dt                     nD||df         r:t          j        d|d|j        d|j        |         d         dt                     |dz  }d	S )
z?Called after fitting to warn if bounds may have been too tight.r   z&The optimal value found for dimension z of parameter z' is close to the specified lower bound zE. Decreasing the bound and calling fit again may find a better value.r   z' is close to the specified upper bound zE. Increasing the bound and calling fit again may find a better value.N)r   iscloser-   r&   rb   Trq   r#   ranger.   warningswarnr+   r   )r3   
list_closeidxhypdims        r   _check_bounds_paramszKernel._check_bounds_params  s0   ZR]4:-F-F-HII
' 	 	Cy S^,,  c1f% MM ,/33#*S/!:L:L:LN +     Q' 	MM ,/33#*S/!:L:L:LN +   q+	 	r   TNF)r6   r7   r8   r9   rU   rZ   rd   propertyrg   rq   rb   setterr-   r   r   r   r   r   r5   r   r   r   r   r   r   r   r!   r   r   r>   r>      s        4& & & &P& & &P
 
 
 # # X#   X     X . \" " \"<     X $  
  
     
     
' ' '  
 
 

 # # # ^#   ^$ 8 8 ^8   X    r   r>   )	metaclassc                       e Zd ZdZd ZdS )NormalizedKernelMixinzSMixin for kernels which are normalized: k(X, X)=1.

    .. versionadded:: 0.18
    c                 @    t          j        |j        d                   S )  Returns the diagonal of the kernel k(X, X).

        The result of this method is identical to np.diag(self(X)); however,
        it can be evaluated more efficiently since only the diagonal is
        evaluated.

        Parameters
        ----------
        X : ndarray of shape (n_samples_X, n_features)
            Left argument of the returned kernel k(X, Y)

        Returns
        -------
        K_diag : ndarray of shape (n_samples_X,)
            Diagonal of kernel k(X, X)
        r   )r   onesr   r   s     r   r   zNormalizedKernelMixin.diag  s    " wqwqz"""r   N)r6   r7   r8   r9   r   r!   r   r   r   r     s-         
# # # # #r   r   c                       e Zd ZdZd ZdS )StationaryKernelMixinzYMixin for kernels which are stationary: k(X, Y)= f(X-Y).

    .. versionadded:: 0.18
    c                     dS )r   Tr!   rf   s    r   r   z#StationaryKernelMixin.is_stationary  s    tr   N)r6   r7   r8   r9   r   r!   r   r   r   r     s-         
    r   r   c                   (    e Zd ZdZed             ZdS )GenericKernelMixinzMixin for kernels which operate on generic objects such as variable-
    length sequences, trees, and graphs.

    .. versionadded:: 0.22
    c                     dS )z>Whether the kernel works only on fixed-length feature vectors.Fr!   rf   s    r   r   z(GenericKernelMixin.requires_vector_input  s	     ur   N)r6   r7   r8   r9   r   r   r!   r   r   r   r     s9            X  r   r   c                       e Zd ZdZd ZddZed             Zej        d             Zed             Z	dd
Z
d Zd Zed             Zd ZdS )CompoundKernela  Kernel which is composed of a set of other kernels.

    .. versionadded:: 0.18

    Parameters
    ----------
    kernels : list of Kernels
        The other kernels

    Examples
    --------
    >>> from sklearn.gaussian_process.kernels import WhiteKernel
    >>> from sklearn.gaussian_process.kernels import RBF
    >>> from sklearn.gaussian_process.kernels import CompoundKernel
    >>> kernel = CompoundKernel(
    ...     [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
    >>> print(kernel.bounds)
    [[-11.51292546  11.51292546]
     [-11.51292546  11.51292546]]
    >>> print(kernel.n_dims)
    2
    >>> print(kernel.theta)
    [1.09861229 0.69314718]
    c                     || _         d S r1   kernels)r3   r   s     r   rD   zCompoundKernel.__init__  s    r   Tc                 ,    t          | j                  S )r@   r   )rB   r   )r3   rM   s     r   rU   zCompoundKernel.get_params  s     DL))))r   c                 H    t          j        d | j        D                       S )rs   c                     g | ]	}|j         
S r!   )rb   rl   kernels     r   rn   z(CompoundKernel.theta.<locals>.<listcomp>=  s    BBB6&,BBBr   )r   ru   r   rf   s    r   rb   zCompoundKernel.theta/  s%     yBBT\BBBCCCr   c                     | j         j        }t          | j                  D ]\  }}|||z  |dz   |z           |_        dS )zSets the (flattened, log-transformed) non-fixed hyperparameters.

        Parameters
        ----------
        theta : array of shape (n_dims,)
            The non-fixed, log-transformed hyperparameters of the kernel
        r   N)k1rg   	enumerater   rb   )r3   rb   k_dimsr{   r   s        r   rb   zCompoundKernel.theta?  sX     "4<00 	@ 	@IAv Vq1u.>!>?FLL	@ 	@r   c                 H    t          j        d | j        D                       S )zReturns the log-transformed bounds on the theta.

        Returns
        -------
        bounds : array of shape (n_dims, 2)
            The log-transformed bounds on the kernel's hyperparameters theta
        c                     g | ]	}|j         
S r!   )r-   r   s     r   rn   z)CompoundKernel.bounds.<locals>.<listcomp>U  s    CCCF&-CCCr   )r   r   r   rf   s    r   r-   zCompoundKernel.boundsL  s%     yCCdlCCCDDDr   NFc                 `   rg }g }| j         D ]N} |          \  }}|                    |           |                    |dt          j        f                    Ot          j        |          t          j        |d          fS t          j        fd| j         D                       S )a  Return the kernel k(X, Y) and optionally its gradient.

        Note that this compound kernel returns the results of all simple kernel
        stacked along an additional axis.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object,             default=None
            Left argument of the returned kernel k(X, Y)

        Y : array-like of shape (n_samples_X, n_features) or list of object,             default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            is evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of the
            kernel hyperparameter is computed.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
            Kernel k(X, Y)

        K_gradient : ndarray of shape                 (n_samples_X, n_samples_X, n_dims, n_kernels), optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when `eval_gradient`
            is True.
        .   c                 *    g | ]} |          S r!   r!   )rl   r   r   r   r   s     r   rn   z+CompoundKernel.__call__.<locals>.<listcomp>  s'    UUUfffQ=99UUUr   )r   rI   r   newaxisdstackconcatenate)	r3   r   r   r   KK_gradr   K_singleK_grad_singles	    ```     r   r   zCompoundKernel.__call__W  s    @  		WAF, > >*0&A}*E*E'-"""mCO<====9Q<<!:!:::9UUUUUUUUUVVVr   c                     t                     t                    k    s*t           j                  t          j                  k    rdS t          j         fdt          t           j                            D                       S )NFc                 H    g | ]}j         |         j         |         k    S r!   r   )rl   r{   r   r3   s     r   rn   z)CompoundKernel.__eq__.<locals>.<listcomp>  s*    OOOT\!_	!,OOOr   )r   rK   r   r   r2   r   r   s   ``r   r5   zCompoundKernel.__eq__  s|    ::a  C$5$5QY$G$G5vOOOOOeC<M<M6N6NOOO
 
 	
r   c                 H    t          j        d | j        D                       S )r   c                 6    g | ]}|                                 S r!   )r   r   s     r   rn   z0CompoundKernel.is_stationary.<locals>.<listcomp>  s$    III&v++--IIIr   )r   r2   r   rf   s    r   r   zCompoundKernel.is_stationary  s#    vIIDLIIIJJJr   c                 H    t          j        d | j        D                       S )=Returns whether the kernel is defined on discrete structures.c                     g | ]	}|j         
S r!   )r   r   s     r   rn   z8CompoundKernel.requires_vector_input.<locals>.<listcomp>  s    OOOv3OOOr   )r   r   r   rf   s    r   r   z$CompoundKernel.requires_vector_input  s%     vOO$,OOOPPPr   c                 X    t          j        fd| j        D                       j        S )a  Returns the diagonal of the kernel k(X, X).

        The result of this method is identical to `np.diag(self(X))`; however,
        it can be evaluated more efficiently since only the diagonal is
        evaluated.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Argument to the kernel.

        Returns
        -------
        K_diag : ndarray of shape (n_samples_X, n_kernels)
            Diagonal of kernel k(X, X)
        c                 :    g | ]}|                               S r!   )r   )rl   r   r   s     r   rn   z'CompoundKernel.diag.<locals>.<listcomp>  s#    DDDV&++a..DDDr   )r   r   r   r   r   s    `r   r   zCompoundKernel.diag  s/    " yDDDDt|DDDEEGGr   r   r   )r6   r7   r8   r9   rD   rU   r   rb   r   r-   r   r5   r   r   r   r!   r   r   r   r     s	        2  * * * *  D D XD \
@ 
@ \
@ E E XE)W )W )W )WV
 
 
K K K Q Q XQH H H H Hr   r   c                       e Zd ZdZd ZddZed             Zed             Zej	        d             Zed             Z
d	 Zd
 Zed             ZdS )KernelOperatorzEBase class for all kernel operators.

    .. versionadded:: 0.18
    c                 "    || _         || _        d S r1   r   k2)r3   r   r   s      r   rD   zKernelOperator.__init__  s    r   Tc                 h   t          | j        | j                  }|r| j                                                                        }|                    d |D                        | j                                                                        }|                    d |D                        |S )r@   r   c              3   *   K   | ]\  }}d |z   |fV  dS )k1__Nr!   rl   kvals      r   	<genexpr>z,KernelOperator.get_params.<locals>.<genexpr>  /      EE36A:s+EEEEEEr   c              3   *   K   | ]\  }}d |z   |fV  dS )k2__Nr!   r   s      r   r   z,KernelOperator.get_params.<locals>.<genexpr>  r   r   )rB   r   r   rU   rX   updater3   rM   rN   
deep_itemss       r   rU   zKernelOperator.get_params  s     TW--- 	F++--3355JMMEE*EEEEEE++--3355JMMEE*EEEEEEr   c           	          d | j         j        D             }| j        j        D ]>}|                    t	          d|j        z   |j        |j        |j                             ?|S )%Returns a list of all hyperparameter.c                 `    g | ]+}t          d |j        z   |j        |j        |j                  ,S )r   )r    r+   r,   r-   r.   r   s     r   rn   z2KernelOperator.hyperparameters.<locals>.<listcomp>  sQ     
 
 
  ,,)%)	 
 
 
r   r   )	r   rq   r   rI   r    r+   r,   r-   r.   r3   rp   rw   s      r   rq   zKernelOperator.hyperparameters  s    
 
 #''"9
 
 
 #g5 	 	NHH^00"-")"-	     r   c                 T    t          j        | j        j        | j        j                  S rs   )r   rI   r   rb   r   rf   s    r   rb   zKernelOperator.theta  s     y666r   c                 n    | j         j        }|d|         | j         _        ||d         | j        _        dS ry   N)r   rg   rb   r   )r3   rb   k1_dimss      r   rb   zKernelOperator.theta  s3     '.hwhghhr   c                     | j         j        j        dk    r| j        j        S | j        j        j        dk    r| j         j        S t	          j        | j         j        | j        j        f          S )r}   r   )r   r-   sizer   r   r   rf   s    r   r-   zKernelOperator.bounds  sY     7>!##7>!7>!##7>!y$'.$'.9:::r   c                     t          |           t          |          k    rdS | j        |j        k    r| j        |j        k    p| j        |j        k    o| j        |j        k    S r   )r   r   r   r   s     r   r5   zKernelOperator.__eq__  sX    ::a  5143DGqtO 
GqtO/14	
r   c                 f    | j                                         o| j                                        S r   )r   r   r   rf   s    r   r   zKernelOperator.is_stationary  s)    w$$&&B47+@+@+B+BBr   c                 2    | j         j        p| j        j        S r  )r   r   r   rf   s    r   r   z$KernelOperator.requires_vector_input  s     w,M0MMr   Nr   )r6   r7   r8   r9   rD   rU   r   rq   rb   r   r-   r5   r   r   r!   r   r   r   r     s         
     .   X. 7 7 X7 \
( 
( \
( ; ; X;
 
 
C C C N N XN N Nr   r   c                   &    e Zd ZdZddZd Zd ZdS )r   a+  The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
    and combines them via

    .. math::
        k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)

    Note that the `__add__` magic method is overridden, so
    `Sum(RBF(), RBF())` is equivalent to using the + operator
    with `RBF() + RBF()`.


    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    k1 : Kernel
        The first base-kernel of the sum-kernel

    k2 : Kernel
        The second base-kernel of the sum-kernel

    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = Sum(ConstantKernel(2), RBF())
    >>> gpr = GaussianProcessRegressor(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    1.0
    >>> kernel
    1.41**2 + RBF(length_scale=1)
    NFc                    |rQ|                      ||d          \  }}|                     ||d          \  }}||z   t          j        ||f          fS |                      ||          |                     ||          z   S )a  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Left argument of the returned kernel k(X, Y)

        Y : array-like of shape (n_samples_X, n_features) or list of object,                default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            is evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when `eval_gradient`
            is True.
        Tr   )r   r   r   r   r3   r   r   r   K1K1_gradientK2K2_gradients           r   r   zSum.__call__C  s    8  	1"gga$g??OB"gga$g??OB7BI{K&@AAAA771a==4771a==00r   c                 l    | j                             |          | j                            |          z   S )a  Returns the diagonal of the kernel k(X, X).

        The result of this method is identical to `np.diag(self(X))`; however,
        it can be evaluated more efficiently since only the diagonal is
        evaluated.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Argument to the kernel.

        Returns
        -------
        K_diag : ndarray of shape (n_samples_X,)
            Diagonal of kernel k(X, X)
        r   r   r   r   s     r   r   zSum.diagf  '    " w||Aa00r   c                 B    d                     | j        | j                  S )Nz	{0} + {1}r   r   r   rf   s    r   r   zSum.__repr__y      !!$'47333r   r   r6   r7   r8   r9   r   r   r   r!   r   r   r   r     sR        $ $L!1 !1 !1 !1F1 1 1&4 4 4 4 4r   r   c                   &    e Zd ZdZddZd Zd ZdS )r   aY  The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
    and combines them via

    .. math::
        k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)

    Note that the `__mul__` magic method is overridden, so
    `Product(RBF(), RBF())` is equivalent to using the * operator
    with `RBF() * RBF()`.

    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    k1 : Kernel
        The first base-kernel of the product-kernel

    k2 : Kernel
        The second base-kernel of the product-kernel


    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import (RBF, Product,
    ...            ConstantKernel)
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = Product(ConstantKernel(2), RBF())
    >>> gpr = GaussianProcessRegressor(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    1.0
    >>> kernel
    1.41**2 * RBF(length_scale=1)
    NFc           	      j   |r|                      ||d          \  }}|                     ||d          \  }}||z  t          j        ||ddddt          j        f         z  ||ddddt          j        f         z  f          fS |                      ||          |                     ||          z  S )a  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Left argument of the returned kernel k(X, Y)

        Y : array-like of shape (n_samples_Y, n_features) or list of object,            default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            is evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                 optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when `eval_gradient`
            is True.
        Tr  N)r   r   r   r   r   r  s           r   r   zProduct.__call__  s    8  	1"gga$g??OB"gga$g??OB7BIr!!!QQQ
"233[2aaaBJFVCW5WX    771a==4771a==00r   c                 l    | j                             |          | j                            |          z  S   Returns the diagonal of the kernel k(X, X).

        The result of this method is identical to np.diag(self(X)); however,
        it can be evaluated more efficiently since only the diagonal is
        evaluated.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Argument to the kernel.

        Returns
        -------
        K_diag : ndarray of shape (n_samples_X,)
            Diagonal of kernel k(X, X)
        r  r   s     r   r   zProduct.diag  r  r   c                 B    d                     | j        | j                  S )Nz	{0} * {1}r  rf   s    r   r   zProduct.__repr__  r  r   r   r  r!   r   r   r   r   }  sR        % %N#1 #1 #1 #1J1 1 1&4 4 4 4 4r   r   c                       e Zd ZdZd ZddZed             Zed             Zej	        d             Zed             Z
d	 ZddZd Zd Zd Zed             Zd
S )r   a  The Exponentiation kernel takes one base kernel and a scalar parameter
    :math:`p` and combines them via

    .. math::
        k_{exp}(X, Y) = k(X, Y) ^p

    Note that the `__pow__` magic method is overridden, so
    `Exponentiation(RBF(), 2)` is equivalent to using the ** operator
    with `RBF() ** 2`.


    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    kernel : Kernel
        The base kernel

    exponent : float
        The exponent for the base kernel


    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
    ...            Exponentiation)
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
    >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    0.419...
    >>> gpr.predict(X[:1,:], return_std=True)
    (array([635.5...]), array([0.559...]))
    c                 "    || _         || _        d S r1   r   exponent)r3   r   r'  s      r   rD   zExponentiation.__init__
  s     r   Tc                     t          | j        | j                  }|rJ| j                                                                        }|                    d |D                        |S )r@   r&  c              3   *   K   | ]\  }}d |z   |fV  dS )kernel__Nr!   r   s      r   r   z,Exponentiation.get_params.<locals>.<genexpr>  s/      IIFAs:>3/IIIIIIr   )rB   r   r'  rU   rX   r   r   s       r   rU   zExponentiation.get_params  sh     T[4=AAA 	J//117799JMMIIjIIIIIIr   c           	          g }| j         j        D ]>}|                    t          d|j        z   |j        |j        |j                             ?|S )r  r*  )r   rq   rI   r    r+   r,   r-   r.   r  s      r   rq   zExponentiation.hyperparameters"  si     "k9 	 	NHH!44"-")"-	     r   c                     | j         j        S r  r   rb   rf   s    r   rb   zExponentiation.theta1  s     {  r   c                     || j         _        dS r  r-  )r3   rb   s     r   rb   zExponentiation.thetaA  s     "r   c                     | j         j        S )r}   )r   r-   rf   s    r   r-   zExponentiation.boundsL  s     {!!r   c                     t          |           t          |          k    rdS | j        |j        k    o| j        |j        k    S r   )r   r   r'  r   s     r   r5   zExponentiation.__eq__W  s;    ::a  5{ah&F4=AJ+FFr   NFc                     |rV|                      ||d          \  }}|| j        |ddddt          j        f         | j        dz
  z  z  z  }|| j        z  |fS |                      ||d          }|| j        z  S )a  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Left argument of the returned kernel k(X, Y)

        Y : array-like of shape (n_samples_Y, n_features) or list of object,            default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            is evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when `eval_gradient`
            is True.
        Tr  Nr   F)r   r'  r   r   r3   r   r   r   r   
K_gradients         r   r   zExponentiation.__call__\  s    8  	$ KK1DKAAMAz$-!AAAqqq"*,<*=$-RSBS*TTTJdm#Z//Aq66Adm##r   c                 F    | j                             |          | j        z  S r!  )r   r   r'  r   s     r   r   zExponentiation.diag  s!    " {""dm33r   c                 B    d                     | j        | j                  S )Nz
{0} ** {1})r   r   r'  rf   s    r   r   zExponentiation.__repr__  s    ""4;>>>r   c                 4    | j                                         S r  )r   r   rf   s    r   r   zExponentiation.is_stationary  s    {((***r   c                     | j         j        S )r   )r   r   rf   s    r   r   z$Exponentiation.requires_vector_input  s     {00r   r   r   )r6   r7   r8   r9   rD   rU   r   rq   rb   r   r-   r5   r   r   r   r   r   r!   r   r   r   r     s       & &P! ! !   (   X ! ! X! \" " \" " " X"G G G
"$ "$ "$ "$H4 4 4&? ? ?+ + + 1 1 X1 1 1r   r   c                   D    e Zd ZdZddZed             ZddZd	 Zd
 Z	dS )r   a   Constant kernel.

    Can be used as part of a product-kernel where it scales the magnitude of
    the other factor (kernel) or as part of a sum-kernel, where it modifies
    the mean of the Gaussian process.

    .. math::
        k(x_1, x_2) = constant\_value \;\forall\; x_1, x_2

    Adding a constant kernel is equivalent to adding a constant::

            kernel = RBF() + ConstantKernel(constant_value=2)

    is the same as::

            kernel = RBF() + 2


    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    constant_value : float, default=1.0
        The constant value which defines the covariance:
        k(x_1, x_2) = constant_value

    constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on `constant_value`.
        If set to "fixed", `constant_value` cannot be changed during
        hyperparameter tuning.

    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = RBF() + ConstantKernel(constant_value=2)
    >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    0.3696...
    >>> gpr.predict(X[:1,:], return_std=True)
    (array([606.1...]), array([0.24...]))
          ?gh㈵>g     j@c                 "    || _         || _        d S r1   )constant_valueconstant_value_bounds)r3   r<  r=  s      r   rD   zConstantKernel.__init__  s    ,%:"""r   c                 .    t          dd| j                  S )Nr<  numeric)r    r=  rf   s    r   hyperparameter_constant_valuez,ConstantKernel.hyperparameter_constant_value  s    .	4;UVVVr   NFc                 
   ||}n|rt          d          t          j        t          |          t          |          f| j        t          j        | j                  j                  }|r| j        j        sW|t          j        t          |          t          |          df| j        t          j        | j                  j                  fS |t          j	        t          |          t          |          df          fS |S )a  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Left argument of the returned kernel k(X, Y)

        Y : array-like of shape (n_samples_X, n_features) or list of object,             default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            is evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.
            Only supported when Y is None.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),             optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when eval_gradient
            is True.
        N.Gradient can only be evaluated when Y is None.dtyper   r   )
r   r   fullr   r<  rv   rD  r@  r#   emptyr3   r   r   r   r   s        r   r   zConstantKernel.__call__  s   : 9AA 	OMNNNG!__l1oo.(4.//5
 
 

  	5; 
JG%a,q//1=+ ht':;;A    "(LOO\!__a#HIIIIHr   c                     t          j        t          |          | j        t          j        | j                  j                  S r"  rC  )r   rE  r   r<  rv   rD  r   s     r   r   zConstantKernel.diag  s?    " wOO(4.//5
 
 
 	
r   c                 Z    d                     t          j        | j                            S )Nz
{0:.3g}**2)r   r   sqrtr<  rf   s    r   r   zConstantKernel.__repr__&  s#    ""274+>#?#?@@@r   r9  r:  r   )
r6   r7   r8   r9   rD   r   r@  r   r   r   r!   r   r   r   r     s        . .`; ; ; ; W W XW4 4 4 4l
 
 
.A A A A Ar   r   c                   D    e Zd ZdZddZed             ZddZd	 Zd
 Z	dS )WhiteKernelab  White kernel.

    The main use-case of this kernel is as part of a sum-kernel where it
    explains the noise of the signal as independently and identically
    normally-distributed. The parameter noise_level equals the variance of this
    noise.

    .. math::
        k(x_1, x_2) = noise\_level \text{ if } x_i == x_j \text{ else } 0


    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    noise_level : float, default=1.0
        Parameter controlling the noise level (variance)

    noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'noise_level'.
        If set to "fixed", 'noise_level' cannot be changed during
        hyperparameter tuning.

    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
    >>> gpr = GaussianProcessRegressor(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    0.3680...
    >>> gpr.predict(X[:2,:], return_std=True)
    (array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
    r9  r:  c                 "    || _         || _        d S r1   )noise_levelnoise_level_bounds)r3   rP  rQ  s      r   rD   zWhiteKernel.__init__S  s    &"4r   c                 .    t          dd| j                  S )NrP  r?  )r    rQ  rf   s    r   hyperparameter_noise_levelz&WhiteKernel.hyperparameter_noise_levelW      mY8OPPPr   NFc                    ||rt          d          || j        t          j        t	          |                    z  }|r| j        j        sB|| j        t          j        t	          |                    ddddt          j        f         z  fS |t          j        t	          |          t	          |          df          fS |S t          j	        t	          |          t	          |          f          S )a  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : array-like of shape (n_samples_X, n_features) or list of object
            Left argument of the returned kernel k(X, Y)

        Y : array-like of shape (n_samples_X, n_features) or list of object,            default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            is evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.
            Only supported when Y is None.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),            optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when eval_gradient
            is True.
        NrB  r   )
r   rP  r   eyer   rS  r#   r   rF  zerosrG  s        r   r   zWhiteKernel.__call__[  s    : =]=MNNN9 26,q//#:#::A 	6< N(26,q//+B+B111aaaCS+TT 
 bhQa!'LMMMM8\!__l1oo>???r   c                     t          j        t          |          | j        t          j        | j                  j                  S rI  )r   rE  r   rP  rv   rD  r   s     r   r   zWhiteKernel.diag  s=    " wOOT-RXd>N5O5O5U
 
 
 	
r   c                 L    d                     | j        j        | j                  S )Nz{0}(noise_level={1:.3g}))r   r/   r6   rP  rf   s    r   r   zWhiteKernel.__repr__  s'    )00N#T%5
 
 	
r   rL  r   )
r6   r7   r8   r9   rD   r   rS  r   r   r   r!   r   r   rN  rN  *  s        & &P5 5 5 5 Q Q XQ-@ -@ -@ -@^
 
 
*
 
 
 
 
r   rN  c                   T    e Zd ZdZddZed             Zed             Zdd	Zd
 Z	dS )RBFa	  Radial basis function kernel (aka squared-exponential kernel).

    The RBF kernel is a stationary kernel. It is also known as the
    "squared exponential" kernel. It is parameterized by a length scale
    parameter :math:`l>0`, which can either be a scalar (isotropic variant
    of the kernel) or a vector with the same number of dimensions as the inputs
    X (anisotropic variant of the kernel). The kernel is given by:

    .. math::
        k(x_i, x_j) = \exp\left(- \frac{d(x_i, x_j)^2}{2l^2} \right)

    where :math:`l` is the length scale of the kernel and
    :math:`d(\cdot,\cdot)` is the Euclidean distance.
    For advice on how to set the length scale parameter, see e.g. [1]_.

    This kernel is infinitely differentiable, which implies that GPs with this
    kernel as covariance function have mean square derivatives of all orders,
    and are thus very smooth.
    See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.

    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    length_scale : float or ndarray of shape (n_features,), default=1.0
        The length scale of the kernel. If a float, an isotropic kernel is
        used. If an array, an anisotropic kernel is used where each dimension
        of l defines the length-scale of the respective feature dimension.

    length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'length_scale'.
        If set to "fixed", 'length_scale' cannot be changed during
        hyperparameter tuning.

    References
    ----------
    .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
        Advice on Covariance functions".
        <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_

    .. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
        "Gaussian Processes for Machine Learning". The MIT Press.
        <http://www.gaussianprocess.org/gpml/>`_

    Examples
    --------
    >>> from sklearn.datasets import load_iris
    >>> from sklearn.gaussian_process import GaussianProcessClassifier
    >>> from sklearn.gaussian_process.kernels import RBF
    >>> X, y = load_iris(return_X_y=True)
    >>> kernel = 1.0 * RBF(1.0)
    >>> gpc = GaussianProcessClassifier(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpc.score(X, y)
    0.9866...
    >>> gpc.predict_proba(X[:2,:])
    array([[0.8354..., 0.03228..., 0.1322...],
           [0.7906..., 0.0652..., 0.1441...]])
    r9  r:  c                 "    || _         || _        d S r1   )r   length_scale_bounds)r3   r   r]  s      r   rD   zRBF.__init__  s    (#6   r   c                 d    t          j        | j                  ot          | j                  dk    S )Nr   )r   iterabler   rK   rf   s    r   anisotropiczRBF.anisotropic  s*    {4,--L#d6G2H2H12LLr   c                     | j         r)t          dd| j        t          | j                            S t          dd| j                  S Nr   r?  )r`  r    r]  rK   r   rf   s    r   hyperparameter_length_scalezRBF.hyperparameter_length_scale  sP     	!(D%&&	   ni9QRRRr   NFc                 @   t          j        |          }t          || j                  }|Pt	          ||z  d          }t          j        d|z            }t          |          }t          j        |d           n@|rt          d          t          ||z  ||z  d          }t          j        d|z            }|r| j
        j        r/|t          j        |j        d         |j        d         df          fS | j        r|j        d         dk    r-|t          |          z  ddddt           j        f         }||fS | j        rW|ddt           j        ddf         |t           j        ddddf         z
  dz  |dz  z  }||d	t           j        f         z  }||fS dS |S )
  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : ndarray of shape (n_samples_X, n_features)
            Left argument of the returned kernel k(X, Y)

        Y : ndarray of shape (n_samples_Y, n_features), default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            if evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.
            Only supported when Y is None.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                 optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when `eval_gradient`
            is True.
        Nsqeuclideanmetricg      r   rB  r   r   .)r   r&   r   r   r   rz   r	   fill_diagonalr   r   rc  r#   rF  r   r`  r   )r3   r   r   r   r   distsr   r3  s           r   r   zRBF.__call__  s   8 M!*1d.?@@9!l*=AAAEte|$$A1AQ"""" S !QRRR!l*A,<]SSSEte|$$A 	/5 %"(AGAJ
A#>????% 	%);A)>!)C)C*U"3"33QQQ2:5EF
*}$! %2:qqq 01Abj!!!QQQ6F4GGAM !O
 aRZ00
*}$% % Hr   c           	          | j         rKd                    | j        j        d                    t          dj        | j                                      S d                    | j        j        t          j        | j                  d                   S )Nz{0}(length_scale=[{1}])r   r   z{0}(length_scale={1:.3g})r   )	r`  r   r/   r6   r   r   r   r   ravelrf   s    r   r   zRBF.__repr__2  s     	,33'		#i.0ABBCC  
 /55'$2C)D)DQ)G  r   rL  r   )
r6   r7   r8   r9   rD   r   r`  rc  r   r   r!   r   r   r[  r[    s        < <|7 7 7 7 M M XM S S XS9 9 9 9v	 	 	 	 	r   r[  c                   2     e Zd ZdZd
 fd	ZddZd	 Z xZS )Maternu  Matern kernel.

    The class of Matern kernels is a generalization of the :class:`RBF`.
    It has an additional parameter :math:`\nu` which controls the
    smoothness of the resulting function. The smaller :math:`\nu`,
    the less smooth the approximated function is.
    As :math:`\nu\rightarrow\infty`, the kernel becomes equivalent to
    the :class:`RBF` kernel. When :math:`\nu = 1/2`, the Matérn kernel
    becomes identical to the absolute exponential kernel.
    Important intermediate values are
    :math:`\nu=1.5` (once differentiable functions)
    and :math:`\nu=2.5` (twice differentiable functions).

    The kernel is given by:

    .. math::
         k(x_i, x_j) =  \frac{1}{\Gamma(\nu)2^{\nu-1}}\Bigg(
         \frac{\sqrt{2\nu}}{l} d(x_i , x_j )
         \Bigg)^\nu K_\nu\Bigg(
         \frac{\sqrt{2\nu}}{l} d(x_i , x_j )\Bigg)



    where :math:`d(\cdot,\cdot)` is the Euclidean distance,
    :math:`K_{\nu}(\cdot)` is a modified Bessel function and
    :math:`\Gamma(\cdot)` is the gamma function.
    See [1]_, Chapter 4, Section 4.2, for details regarding the different
    variants of the Matern kernel.

    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    length_scale : float or ndarray of shape (n_features,), default=1.0
        The length scale of the kernel. If a float, an isotropic kernel is
        used. If an array, an anisotropic kernel is used where each dimension
        of l defines the length-scale of the respective feature dimension.

    length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'length_scale'.
        If set to "fixed", 'length_scale' cannot be changed during
        hyperparameter tuning.

    nu : float, default=1.5
        The parameter nu controlling the smoothness of the learned function.
        The smaller nu, the less smooth the approximated function is.
        For nu=inf, the kernel becomes equivalent to the RBF kernel and for
        nu=0.5 to the absolute exponential kernel. Important intermediate
        values are nu=1.5 (once differentiable functions) and nu=2.5
        (twice differentiable functions). Note that values of nu not in
        [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
        (appr. 10 times higher) since they require to evaluate the modified
        Bessel function. Furthermore, in contrast to l, nu is kept fixed to
        its initial value and not optimized.

    References
    ----------
    .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
        "Gaussian Processes for Machine Learning". The MIT Press.
        <http://www.gaussianprocess.org/gpml/>`_

    Examples
    --------
    >>> from sklearn.datasets import load_iris
    >>> from sklearn.gaussian_process import GaussianProcessClassifier
    >>> from sklearn.gaussian_process.kernels import Matern
    >>> X, y = load_iris(return_X_y=True)
    >>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
    >>> gpc = GaussianProcessClassifier(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpc.score(X, y)
    0.9866...
    >>> gpc.predict_proba(X[:2,:])
    array([[0.8513..., 0.0368..., 0.1117...],
            [0.8086..., 0.0693..., 0.1220...]])
    r9  r:        ?c                 Z    t                                          ||           || _        d S r1   )r(   rD   nu)r3   r   r]  rq  r/   s       r   rD   zMatern.__init__  s)    ':;;;r   NFc           	      h	    t          j                  t           j                  }t	          |z  d          }n)|rt          d          t          |z  |z  d          } j        dk    rt          j        |           }nb j        dk    r4|t          j
        d          z  }d|z   t          j        |           z  }n# j        d	k    r<|t          j
        d
          z  }d|z   |dz  dz  z   t          j        |           z  }n܉ j        t           j        k    rt          j        |dz   dz            }n|}||dk    xx         t          j        t                    j        z  cc<   t          j
        d j        z            |z  }|                    dd j        z
  z  t!           j                  z             || j        z  z  }|t#           j        |          z  }$t%          |          }t          j        |d           |r j        j        r1t          j        j        d         j        d         df          }||fS  j        r=ddt           j        ddf         t           j        ddddf         z
  dz  |dz  z  }	n)t%          |dz            ddddt           j        f         }	 j        dk    rt          j
        |	                    d                    ddddt           j        f         }
t          j        |	          }t          j        |	|
||
dk               |dt           j        f         |z  }n j        dk    rVd|	z  t          j        t          j
        d|	                    d          z                       dt           j        f         z  }n j        d	k    r^t          j
        d
|	                    d          z            dt           j        f         }d|	z  |dz   z  t          j        |           z  }nL j        t           j        k    r|	|dt           j        f         z  }n fd}|t;           j        |d          fS  j        s:||ddddf                             d          ddddt           j        f         fS ||fS |S )re  N	euclideanrg  rB  g      ?ro  r   r9  g      @   r   g      @g       @        r   r   )axis)outwhere.g?c                 B                          |                     S r1   )rd   )rb   r   r   r3   s    r   fzMatern.__call__.<locals>.f  s#    7400771===r   绽|=)r   r&   r   r   r   r   r   rq  rz   mathrK  inffinfor   epsfillr
   r   r	   ri  rc  r#   rF  r   r`  r   sum
zeros_likedivide_approx_fprimerb   )r3   r   r   r   r   rj  r   tmpr3  Ddenominatordivide_resultr{  s   ```          r   r   zMatern.__call__  s   8 M!*1d.?@@9!l*;???EE S !QRRR!l*A,<[QQQE7c>>vAAW^^	!$AqBFA2JJ&AAW^^	!$Aq1a4#:%3AAW{S())AAAa3hKKK28E??..KKK)AK((1,CFFA#-(E$'NN:;;;dgADGS!!!A91AQ""" )	/5 %Xqwqz171:q&ABB
*}$  ;qqq"*aaa'(1RZAAA-=+>>1DVWXuax((AAArz)9:w#~~ gaeeemm44QQQ2:5EF "a 0 0	%%*	    sBJ/-?

CURVRWQr]-C-C,C%D%DS"*_%UU

Cga!%%))m,,S"*_=&]cAg6E

BF""3
?!33

> > > > > > > .Q>>>># %*QQQT*..r22111aaa3CDDD*}$Hr   c           	      8   | j         rQd                    | j        j        d                    t          dj        | j                            | j                  S d                    | j        j        t          j	        | j                  d         | j                  S )Nz#{0}(length_scale=[{1}], nu={2:.3g})r   r   z%{0}(length_scale={1:.3g}, nu={2:.3g})r   )
r`  r   r/   r6   r   r   r   rq  r   rl  rf   s    r   r   zMatern.__repr__  s     		8??'		#i.0ABBCC   ;AA'$2C)D)DQ)G  r   )r9  r:  ro  r   )r6   r7   r8   r9   rD   r   r   r;   r<   s   @r   rn  rn  >  sr        M M^     e e e eN
 
 
 
 
 
 
r   rn  c                   \    e Zd ZdZ	 	 	 	 ddZed             Zed             Zdd	Zd
 Z	dS )RationalQuadratica  Rational Quadratic kernel.

    The RationalQuadratic kernel can be seen as a scale mixture (an infinite
    sum) of RBF kernels with different characteristic length scales. It is
    parameterized by a length scale parameter :math:`l>0` and a scale
    mixture parameter :math:`\alpha>0`. Only the isotropic variant
    where length_scale :math:`l` is a scalar is supported at the moment.
    The kernel is given by:

    .. math::
        k(x_i, x_j) = \left(
        1 + \frac{d(x_i, x_j)^2 }{ 2\alpha  l^2}\right)^{-\alpha}

    where :math:`\alpha` is the scale mixture parameter, :math:`l` is
    the length scale of the kernel and :math:`d(\cdot,\cdot)` is the
    Euclidean distance.
    For advice on how to set the parameters, see e.g. [1]_.

    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    length_scale : float > 0, default=1.0
        The length scale of the kernel.

    alpha : float > 0, default=1.0
        Scale mixture parameter

    length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'length_scale'.
        If set to "fixed", 'length_scale' cannot be changed during
        hyperparameter tuning.

    alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'alpha'.
        If set to "fixed", 'alpha' cannot be changed during
        hyperparameter tuning.

    References
    ----------
    .. [1] `David Duvenaud (2014). "The Kernel Cookbook:
        Advice on Covariance functions".
        <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_

    Examples
    --------
    >>> from sklearn.datasets import load_iris
    >>> from sklearn.gaussian_process import GaussianProcessClassifier
    >>> from sklearn.gaussian_process.kernels import RationalQuadratic
    >>> X, y = load_iris(return_X_y=True)
    >>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
    >>> gpc = GaussianProcessClassifier(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpc.score(X, y)
    0.9733...
    >>> gpc.predict_proba(X[:2,:])
    array([[0.8881..., 0.0566..., 0.05518...],
            [0.8678..., 0.0707... , 0.0614...]])
    r9  r:  c                 >    || _         || _        || _        || _        d S r1   )r   alphar]  alpha_bounds)r3   r   r  r]  r  s        r   rD   zRationalQuadratic.__init__E  s)     )
#6 (r   c                 .    t          dd| j                  S rb  r    r]  rf   s    r   rc  z-RationalQuadratic.hyperparameter_length_scaleQ  s    ni9QRRRr   c                 .    t          dd| j                  S )Nr  r?  )r    r  rf   s    r   hyperparameter_alphaz&RationalQuadratic.hyperparameter_alphaU      gy$2CDDDr   NFc                    t          t          j        | j                            dk    rt	          d          t          j        |          }|\t          t          |d                    }|d| j        z  | j        dz  z  z  }d|z   }|| j         z  }t          j	        |d           nG|rt          d          t          ||d          }d|d| j        z  | j        dz  z  z  z   | j         z  }|r| j        j        s-||z  | j        dz  |z  z  }|ddddt          j        f         }n-t          j        |j        d         |j        d         df          }| j        j        sN|| j         t          j        |          z  |d| j        dz  z  |z  z  z   z  }	|	ddddt          j        f         }	n-t          j        |j        d         |j        d         df          }	|t          j        |	|f          fS |S )	a  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : ndarray of shape (n_samples_X, n_features)
            Left argument of the returned kernel k(X, Y)

        Y : ndarray of shape (n_samples_Y, n_features), default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            if evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.
            Only supported when Y is None.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when eval_gradient
            is True.
        r   zeRationalQuadratic kernel only supports isotropic version, please use a single scalar for length_scaleNrf  rg  r   rB  r   )rK   r   
atleast_1dr   AttributeErrorr&   r	   r   r  ri  r   r   rc  r#   r   rF  r   r  rt   r   )
r3   r   r   r   rj  r  baser   length_scale_gradientalpha_gradients
             r   r   zRationalQuadratic.__call__Y  s1   6 r}T.//00144 >   M!9uQ}===>>E1tz>D,=q,@@ACs7Dtzk!AQ"""" S !QRRR!Q}555EUa$*nt/@!/CCDD$*TA 	39 N(-	T5F5ID5P(Q%(=aaaBJ>N(O%%(*!'!*agaj!1L(M(M% ,2 G!"ZK"&,,.q4#4a#77$>?@" "0111bj0@!A!#171:qwqz1*E!F!Fbi1F GHHHHHr   c                 X    d                     | j        j        | j        | j                  S )Nz({0}(alpha={1:.3g}, length_scale={2:.3g}))r   r/   r6   r  r   rf   s    r   r   zRationalQuadratic.__repr__  s+    9@@N#TZ1B
 
 	
r   r9  r9  r:  r:  r   )
r6   r7   r8   r9   rD   r   rc  r  r   r   r!   r   r   r  r    s        < <@ ' 
) 
) 
) 
) S S XS E E XEA A A AF
 
 
 
 
r   r  c                   \    e Zd ZdZ	 	 	 	 ddZed             Zed             Zdd	Zd
 Z	dS )ExpSineSquareda  Exp-Sine-Squared kernel (aka periodic kernel).

    The ExpSineSquared kernel allows one to model functions which repeat
    themselves exactly. It is parameterized by a length scale
    parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
    Only the isotropic variant where :math:`l` is a scalar is
    supported at the moment. The kernel is given by:

    .. math::
        k(x_i, x_j) = \text{exp}\left(-
        \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)

    where :math:`l` is the length scale of the kernel, :math:`p` the
    periodicity of the kernel and :math:`d(\cdot,\cdot)` is the
    Euclidean distance.

    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------

    length_scale : float > 0, default=1.0
        The length scale of the kernel.

    periodicity : float > 0, default=1.0
        The periodicity of the kernel.

    length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'length_scale'.
        If set to "fixed", 'length_scale' cannot be changed during
        hyperparameter tuning.

    periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'periodicity'.
        If set to "fixed", 'periodicity' cannot be changed during
        hyperparameter tuning.

    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import ExpSineSquared
    >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
    >>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
    >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    0.0144...
    >>> gpr.predict(X[:2,:], return_std=True)
    (array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
    r9  r:  c                 >    || _         || _        || _        || _        d S r1   )r   periodicityr]  periodicity_bounds)r3   r   r  r]  r  s        r   rD   zExpSineSquared.__init__  s*     )&#6 "4r   c                 .    t          dd| j                  S )zReturns the length scaler   r?  r  rf   s    r   rc  z*ExpSineSquared.hyperparameter_length_scale  s     ni9QRRRr   c                 .    t          dd| j                  S )Nr  r?  )r    r  rf   s    r   hyperparameter_periodicityz)ExpSineSquared.hyperparameter_periodicity  rT  r   NFc                    t          j        |          }|lt          t          |d                    }t           j        |z  | j        z  }t          j        |          }t          j        d|| j        z  dz  z            }nl|rt          d          t          ||d          }t          j        dt          j        t           j        | j        z  |z            | j        z  dz  z            }|rt          j        |          }| j        j        s0d| j        dz  z  |dz  z  |z  }	|	ddddt           j        f         }	n-t          j        |j        d         |j        d	         df          }	| j        j        s3d|z  | j        dz  z  |z  |z  |z  }
|
ddddt           j        f         }
n-t          j        |j        d         |j        d	         df          }
|t          j        |	|
f          fS |S )
re  Nrs  rg  r   rB     r   r   )r   r&   r	   r   pir  sinrz   r   r   r   cosrc  r#   r   rF  r   r  r   )r3   r   r   r   rj  rT   
sin_of_argr   
cos_of_argr  periodicity_gradients              r   r   zExpSineSquared.__call__  s   8 M!9uQ{;;;<<E%%-$"22CJrZ$*;;AABBAA S !QRRR!Q{333EbfRUT%55=>>ARRWXXX A  	J39 N()D,=q,@(@:q=(PST(T%(=aaaBJ>N(O%%(*!'!*agaj!1L(M(M%28 MGd/22Z?*LqP % (<AAAqqq"*<L'M$$')xQWQZ0K'L'L$bi!68L MNNNNHr   c                 X    d                     | j        j        | j        | j                  S )Nz.{0}(length_scale={1:.3g}, periodicity={2:.3g}))r   r/   r6   r   r  rf   s    r   r   zExpSineSquared.__repr__-  s,    ?FFN#T%68H
 
 	
r   r  r   )
r6   r7   r8   r9   rD   r   rc  r  r   r   r!   r   r   r  r    s        4 4p '&
5 
5 
5 
5 S S XS Q Q XQ= = = =~
 
 
 
 
r   r  c                   J    e Zd ZdZddZed             ZddZd	 Zd
 Z	d Z
dS )
DotProducta\  Dot-Product kernel.

    The DotProduct kernel is non-stationary and can be obtained from linear
    regression by putting :math:`N(0, 1)` priors on the coefficients
    of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
    on the bias. The DotProduct kernel is invariant to a rotation of
    the coordinates about the origin, but not translations.
    It is parameterized by a parameter sigma_0 :math:`\sigma`
    which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
    the kernel is called the homogeneous linear kernel, otherwise
    it is inhomogeneous. The kernel is given by

    .. math::
        k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j

    The DotProduct kernel is commonly combined with exponentiation.

    See [1]_, Chapter 4, Section 4.2, for further details regarding the
    DotProduct kernel.

    Read more in the :ref:`User Guide <gp_kernels>`.

    .. versionadded:: 0.18

    Parameters
    ----------
    sigma_0 : float >= 0, default=1.0
        Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
        the kernel is homogeneous.

    sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'sigma_0'.
        If set to "fixed", 'sigma_0' cannot be changed during
        hyperparameter tuning.

    References
    ----------
    .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
        "Gaussian Processes for Machine Learning". The MIT Press.
        <http://www.gaussianprocess.org/gpml/>`_

    Examples
    --------
    >>> from sklearn.datasets import make_friedman2
    >>> from sklearn.gaussian_process import GaussianProcessRegressor
    >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
    >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
    >>> kernel = DotProduct() + WhiteKernel()
    >>> gpr = GaussianProcessRegressor(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpr.score(X, y)
    0.3680...
    >>> gpr.predict(X[:2,:], return_std=True)
    (array([653.0..., 592.1...]), array([316.6..., 316.6...]))
    r9  r:  c                 "    || _         || _        d S r1   )sigma_0sigma_0_bounds)r3   r  r  s      r   rD   zDotProduct.__init__l  s    ,r   c                 .    t          dd| j                  S )Nr  r?  )r    r  rf   s    r   hyperparameter_sigma_0z!DotProduct.hyperparameter_sigma_0p  s    iD4GHHHr   NFc                    t          j        |          }|!t          j        ||          | j        dz  z   }n1|rt	          d          t          j        ||          | j        dz  z   }|r|| j        j        sAt          j        |j        d         |j        d         df          }d| j        dz  z  |d<   ||fS |t          j        |j        d         |j        d         df          fS |S )  Return the kernel k(X, Y) and optionally its gradient.

        Parameters
        ----------
        X : ndarray of shape (n_samples_X, n_features)
            Left argument of the returned kernel k(X, Y)

        Y : ndarray of shape (n_samples_Y, n_features), default=None
            Right argument of the returned kernel k(X, Y). If None, k(X, X)
            if evaluated instead.

        eval_gradient : bool, default=False
            Determines whether the gradient with respect to the log of
            the kernel hyperparameter is computed.
            Only supported when Y is None.

        Returns
        -------
        K : ndarray of shape (n_samples_X, n_samples_Y)
            Kernel k(X, Y)

        K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                optional
            The gradient of the kernel k(X, X) with respect to the log of the
            hyperparameter of the kernel. Only returned when `eval_gradient`
            is True.
        Nr   rB  r   r   ).r   )	r   r&   innerr  r   r  r#   rF  r   r2  s         r   r   zDotProduct.__call__t  s    8 M!9Aq0AA S !QRRRAq0A 	.4 @Xqwqz171:q&ABB
%&q%8
6"*}$"(AGAJ
A#>????Hr   c                 D    t          j        d||          | j        dz  z   S )a  Returns the diagonal of the kernel k(X, X).

        The result of this method is identical to np.diag(self(X)); however,
        it can be evaluated more efficiently since only the diagonal is
        evaluated.

        Parameters
        ----------
        X : ndarray of shape (n_samples_X, n_features)
            Left argument of the returned kernel k(X, Y).

        Returns
        -------
        K_diag : ndarray of shape (n_samples_X,)
            Diagonal of kernel k(X, X).
        zij,ij->ir   )r   einsumr  r   s     r   r   zDotProduct.diag  s#    " yQ**T\1_<<r   c                     dS )r   Fr!   rf   s    r   r   zDotProduct.is_stationary  s    ur   c                 L    d                     | j        j        | j                  S )Nz{0}(sigma_0={1:.3g}))r   r/   r6   r  rf   s    r   r   zDotProduct.__repr__  s    %,,T^-DdlSSSr   rL  r   )r6   r7   r8   r9   rD   r   r  r   r   r   r   r!   r   r   r  r  3  s        6 6p- - - - I I XI, , , ,\= = =&  T T T T Tr   r  r!   c                     || f|z    }t          j        |j        d         |j        d         t          |           ft                    }t          j        t          |           ft                    }t          t          |                     D ]3}d||<   ||z  } || |z   f|z    |z
  ||         z  |d d d d |f<   d||<   4|S )Nr   r   r9  ru  )r   rW  r   rK   r   r   )	xkr{  epsilonrQ   f0gradeir   ds	            r   r  r    s    	
bUT\	B8RXa["(1+s2ww7??D	3r77*e	$	$B3r77^^  1bLb1fY-/"4!<QQQ1W1Kr   c                   R    e Zd ZdZ	 	 	 	 ddZed             Zdd	Zd
 Zd Z	d Z
dS )PairwiseKernela	  Wrapper for kernels in sklearn.metrics.pairwise.

    A thin wrapper around the functionality of the kernels in
    sklearn.metrics.pairwise.

    Note: Evaluation of eval_gradient is not analytic but numeric and all
          kernels support only isotropic distances. The parameter gamma is
          considered to be a hyperparameter and may be optimized. The other
          kernel parameters are set directly at initialization and are kept
          fixed.

    .. versionadded:: 0.18

    Parameters
    ----------
    gamma : float, default=1.0
        Parameter gamma of the pairwise kernel specified by metric. It should
        be positive.

    gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
        The lower and upper bound on 'gamma'.
        If set to "fixed", 'gamma' cannot be changed during
        hyperparameter tuning.

    metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial",               "rbf", "laplacian", "sigmoid", "cosine"} or callable,               default="linear"
        The metric to use when calculating kernel between instances in a
        feature array. If metric is a string, it must be one of the metrics
        in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
        If metric is "precomputed", X is assumed to be a kernel matrix.
        Alternatively, if metric is a callable function, it is called on each
        pair of instances (rows) and the resulting value recorded. The callable
        should take two arrays from X as input and return a value indicating
        the distance between them.

    pairwise_kernels_kwargs : dict, default=None
        All entries of this dict (if any) are passed as keyword arguments to
        the pairwise kernel function.

    Examples
    --------
    >>> from sklearn.datasets import load_iris
    >>> from sklearn.gaussian_process import GaussianProcessClassifier
    >>> from sklearn.gaussian_process.kernels import PairwiseKernel
    >>> X, y = load_iris(return_X_y=True)
    >>> kernel = PairwiseKernel(metric='rbf')
    >>> gpc = GaussianProcessClassifier(kernel=kernel,
    ...         random_state=0).fit(X, y)
    >>> gpc.score(X, y)
    0.9733...
    >>> gpc.predict_proba(X[:2,:])
    array([[0.8880..., 0.05663..., 0.05532...],
           [0.8676..., 0.07073..., 0.06165...]])
    r9  r:  linearNc                 >    || _         || _        || _        || _        d S r1   )r
   gamma_boundsrh  pairwise_kernels_kwargs)r3   r
   r  rh  r  s        r   rD   zPairwiseKernel.__init__	  s(     
('>$$$r   c                 .    t          dd| j                  S )Nr
   r?  )r    r  rf   s    r   hyperparameter_gammaz#PairwiseKernel.hyperparameter_gamma	  r  r   Fc                 H     j          j         i t          j                  t          f j         j        dd}|r[ j        j        r/|t          j        j	        d         j	        d         df          fS  fd}|t           j        |d          fS |S )r  NTrh  r
   filter_paramsr   c                 V    t          fj        t          j        |           ddS )NTr  )r   rh  r   rz   )r
   r   r   r  r3   s    r   r{  z"PairwiseKernel.__call__.<locals>.fA	  sD    +  ${ fUmm&*  2  r   r|  )r  r   r&   r   rh  r
   r  r#   rF  r   r  rb   )r3   r   r   r   r   r{  r  s   ```   @r   r   zPairwiseKernel.__call__	  s    8 #'">'/&(#M!
 ;*
 
 &
 
  	(. ?"(AGAJ
A#>????        .Q>>>>Hr   c                 R    t          j        | d|                                          S )r   r   )r   apply_along_axisrl  r   s     r   r   zPairwiseKernel.diagO	  s%    $ "4A..44666r   c                     | j         dv S )r   )rbfrg  rf   s    r   r   zPairwiseKernel.is_stationaryc	  s    {g%%r   c                 X    d                     | j        j        | j        | j                  S )Nz{0}(gamma={1}, metric={2}))r   r/   r6   r
   rh  rf   s    r   r   zPairwiseKernel.__repr__g	  s*    +22N#TZ
 
 	
r   )r9  r:  r  Nr   )r6   r7   r8   r9   rD   r   r  r   r   r   r   r!   r   r   r  r    s        6 6t   $
? 
? 
? 
? E E XE: : : :x7 7 7(& & &
 
 
 
 
r   r  )r!   )/r9   r}  r   abcr   r   collectionsr   inspectr   numpyr   scipy.spatial.distancer   r   r	   scipy.specialr
   r   r  r   
exceptionsr   metrics.pairwiser   utils.validationr   r   r    r>   r   r   r   r   r   r   r   r   r   rN  r[  rn  r  r  r  r  r  r!   r   r   <module>r     s   X X.   ' ' ' ' ' ' ' ' " " " " " "           ; ; ; ; ; ; ; ; ; ; # # # # # # # #       + + + + + + / / / / / / + + + + + +	 	 	b
 b
 b
 b
 b
JQ b
 b
 b
Jt t t t tw t t t tn	# # # # # # # #4       
 
 
 
 
 
 
 
aH aH aH aH aHV aH aH aHHsN sN sN sN sNV sN sN sNl^4 ^4 ^4 ^4 ^4. ^4 ^4 ^4Ba4 a4 a4 a4 a4n a4 a4 a4H|1 |1 |1 |1 |1V |1 |1 |1~GA GA GA GA GA*,> GA GA GATx
 x
 x
 x
 x
');V x
 x
 x
vV V V V V
!6 V V VrE E E E ES E E EPY
 Y
 Y
 Y
 Y
-/Df Y
 Y
 Y
xN
 N
 N
 N
 N
*,A6 N
 N
 N
bGT GT GT GT GT GT GT GTV	 	 	 	`
 `
 `
 `
 `
V `
 `
 `
 `
 `
r   