
    0Ph#                         d Z ddlmZ ddlZddlmZmZmZm	Z	 ddl
mZ ddlmZmZ ddlmZmZ dd	lmZmZmZ  G d
 deee          ZdS )zKernel ridge regression.    )RealN   )BaseEstimatorMultiOutputMixinRegressorMixin_fit_context)_solve_cholesky_kernel)PAIRWISE_KERNEL_FUNCTIONSpairwise_kernels)Interval
StrOptions)_check_sample_weightcheck_is_fittedvalidate_datac            
       T    e Zd ZU dZ eeddd          dg e e ej	                              dhz            e
g eeddd          dg eeddd          g eeddd          gedgd	Zeed
<   	 ddddddddZddZ fdZ ed          dd            Zd Z xZS )KernelRidgea  Kernel ridge regression.

    Kernel ridge regression (KRR) combines ridge regression (linear least
    squares with l2-norm regularization) with the kernel trick. It thus
    learns a linear function in the space induced by the respective kernel and
    the data. For non-linear kernels, this corresponds to a non-linear
    function in the original space.

    The form of the model learned by KRR is identical to support vector
    regression (SVR). However, different loss functions are used: KRR uses
    squared error loss while support vector regression uses epsilon-insensitive
    loss, both combined with l2 regularization. In contrast to SVR, fitting a
    KRR model can be done in closed-form and is typically faster for
    medium-sized datasets. On the other hand, the learned model is non-sparse
    and thus slower than SVR, which learns a sparse model for epsilon > 0, at
    prediction-time.

    This estimator has built-in support for multi-variate regression
    (i.e., when y is a 2d-array of shape [n_samples, n_targets]).

    Read more in the :ref:`User Guide <kernel_ridge>`.

    Parameters
    ----------
    alpha : float or array-like of shape (n_targets,), default=1.0
        Regularization strength; must be a positive float. Regularization
        improves the conditioning of the problem and reduces the variance of
        the estimates. Larger values specify stronger regularization.
        Alpha corresponds to ``1 / (2C)`` in other linear models such as
        :class:`~sklearn.linear_model.LogisticRegression` or
        :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
        assumed to be specific to the targets. Hence they must correspond in
        number. See :ref:`ridge_regression` for formula.

    kernel : str or callable, default="linear"
        Kernel mapping used internally. This parameter is directly passed to
        :class:`~sklearn.metrics.pairwise.pairwise_kernels`.
        If `kernel` is a string, it must be one of the metrics
        in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed".
        If `kernel` is "precomputed", X is assumed to be a kernel matrix.
        Alternatively, if `kernel` is a callable function, it is called on
        each pair of instances (rows) and the resulting value recorded. The
        callable should take two rows from X as input and return the
        corresponding kernel value as a single number. This means that
        callables from :mod:`sklearn.metrics.pairwise` are not allowed, as
        they operate on matrices, not single samples. Use the string
        identifying the kernel instead.

    gamma : float, default=None
        Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
        and sigmoid kernels. Interpretation of the default value is left to
        the kernel; see the documentation for sklearn.metrics.pairwise.
        Ignored by other kernels.

    degree : float, default=3
        Degree of the polynomial kernel. Ignored by other kernels.

    coef0 : float, default=1
        Zero coefficient for polynomial and sigmoid kernels.
        Ignored by other kernels.

    kernel_params : dict, default=None
        Additional parameters (keyword arguments) for kernel function passed
        as callable object.

    Attributes
    ----------
    dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets)
        Representation of weight vector(s) in kernel space

    X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features)
        Training data, which is also required for prediction. If
        kernel == "precomputed" this is instead the precomputed
        training matrix, of shape (n_samples, n_samples).

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    sklearn.gaussian_process.GaussianProcessRegressor : Gaussian
        Process regressor providing automatic kernel hyperparameters
        tuning and predictions uncertainty.
    sklearn.linear_model.Ridge : Linear ridge regression.
    sklearn.linear_model.RidgeCV : Ridge regression with built-in
        cross-validation.
    sklearn.svm.SVR : Support Vector Regression accepting a large variety
        of kernels.

    References
    ----------
    * Kevin P. Murphy
      "Machine Learning: A Probabilistic Perspective", The MIT Press
      chapter 14.4.3, pp. 492-493

    Examples
    --------
    >>> from sklearn.kernel_ridge import KernelRidge
    >>> import numpy as np
    >>> n_samples, n_features = 10, 5
    >>> rng = np.random.RandomState(0)
    >>> y = rng.randn(n_samples)
    >>> X = rng.randn(n_samples, n_features)
    >>> krr = KernelRidge(alpha=1.0)
    >>> krr.fit(X, y)
    KernelRidge(alpha=1.0)
    r   Nleft)closedz
array-likeprecomputedneitheralphakernelgammadegreecoef0kernel_params_parameter_constraintsr   linear   )r   r   r   r   r   c                Z    || _         || _        || _        || _        || _        || _        d S Nr   )selfr   r   r   r   r   r   s          T/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/sklearn/kernel_ridge.py__init__zKernelRidge.__init__   s5     


*    c                     t          | j                  r
| j        pi }n| j        | j        | j        d}t          ||f| j        dd|S )N)r   r   r   T)metricfilter_params)callabler   r   r   r   r   r   )r#   XYparamss       r$   _get_kernelzKernelRidge._get_kernel   s[    DK   	W'-2FF#zT[4:VVF1WT[WWPVWWWr&   c                     t                                                      }d|j        _        | j        dk    |j        _        |S )NTr   )super__sklearn_tags__
input_tagssparser   pairwise)r#   tags	__class__s     r$   r1   zKernelRidge.__sklearn_tags__   s8    ww''))!%#';-#? r&   T)prefer_skip_nested_validationc                    t          | ||ddd          \  }}|%t          |t                    st          ||          }|                     |          }t          j        | j                  }d}t          |j	                  dk    r|
                    dd          }d}| j        dk    }t          |||||          | _        |r| j                                        | _        || _        | S )	a  Fit Kernel Ridge regression model.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            Training data. If kernel == "precomputed" this is instead
            a precomputed kernel matrix, of shape (n_samples, n_samples).

        y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Target values.

        sample_weight : float or array-like of shape (n_samples,), default=None
            Individual weights for each sample, ignored if None is passed.

        Returns
        -------
        self : object
            Returns the instance itself.
        csrcscT)accept_sparsemulti_output	y_numericNFr   r   )r   
isinstancefloatr   r.   np
atleast_1dr   lenshapereshaper   r	   
dual_coef_ravelX_fit_)r#   r+   ysample_weightKr   rH   copys           r$   fitzKernelRidge.fit   s    , !Qn4SW
 
 
1 $Zu-M-M$0BBMQdj))qw<<1		"a  AE{m+0AumTRR 	6"o3355DOr&   c                     t          |            t          | |dd          }|                     || j                  }t	          j        || j                  S )a)  Predict using the kernel ridge model.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            Samples. If kernel == "precomputed" this is instead a
            precomputed kernel matrix, shape = [n_samples,
            n_samples_fitted], where n_samples_fitted is the number of
            samples used in the fitting for this estimator.

        Returns
        -------
        C : ndarray of shape (n_samples,) or (n_samples, n_targets)
            Returns predicted values.
        r9   F)r<   reset)r   r   r.   rI   rB   dotrG   )r#   r+   rL   s      r$   predictzKernelRidge.predict   sS      	$uMMMQ,,va)))r&   )r   r"   )__name__
__module____qualname____doc__r   r   r   setr
   keysr*   dictr   __annotations__r%   r.   r1   r   rN   rR   __classcell__)r6   s   @r$   r   r      s        r rj (4D888,GJss949;;<<NOO
 (4D888$?8D!T&999:(4tI>>>?
$ 
$D 
 
 
 + + + + + +"X X X X     \555* * * 65*X* * * * * * *r&   r   )rV   numbersr   numpyrB   baser   r   r   r   linear_model._ridger	   metrics.pairwiser
   r   utils._param_validationr   r   utils.validationr   r   r   r    r&   r$   <module>rd      s     
           O O O O O O O O O O O O 7 7 7 7 7 7 I I I I I I I I 9 9 9 9 9 9 9 9 R R R R R R R R R R_* _* _* _* _*"NM _* _* _* _* _*r&   