
    0Phv                        d Z ddlZddlmZ ddlmZ ddlmZmZ ddl	Z
ddlmZ ddlmZ ddlmZ d	d
lmZ d	dlmZmZmZmZmZmZ d	dlmZ d	dlmZ d	dlmZm Z m!Z! d	dl"m#Z#m$Z$ d	dl%m&Z& d	dl'm(Z(m)Z)m*Z*m+Z+ d	dl,m-Z-m.Z.m/Z/m0Z0m1Z1 d	dl2m3Z3m4Z4 d	dl5m6Z6m7Z7 d	dl8m9Z9m:Z:m;Z;m<Z< d	dl=m>Z> d	dl?m@Z@mAZA d	dlBmCZCmDZDmEZEmFZFmGZGmHZHmIZI  G d deee          ZJ	 	 d1dZKd2dZL G d d          ZM	 d3d ZN G d! d"ee          ZO e1d#gd#geePd$dg e/ed	dd%&          g e0d'd(h          gd)d*+          dd,d'd-d.            ZQ G d/ d0e3          ZRdS )4z0Methods for calibrating predicted probabilities.    N)	signature)log)IntegralReal)minimize)expit)Bunch   )HalfBinomialLoss)BaseEstimatorClassifierMixinMetaEstimatorMixinRegressorMixin_fit_contextclone)FrozenEstimator)IsotonicRegression)LeaveOneOutcheck_cvcross_val_predict)LabelEncoderlabel_binarize)	LinearSVC)_safe_indexingcolumn_or_1dget_tags	indexable)
HasMethodsHiddenInterval
StrOptionsvalidate_params)"_BinaryClassifierCurveDisplayMixin_validate_style_kwargs)_get_response_values_process_predict_proba)MetadataRouterMethodMapping_routing_enabledprocess_routing)check_classification_targets)Paralleldelayed)_check_method_params_check_pos_label_consistency_check_response_method_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fittedc                   (    e Zd ZU dZ eddg           eddg          dg eddh          gd e ed	h                    gedgd
 edh          gdZe	e
d<   	 dddddddZd Z ed          dd            Zd Zd Zd Z fdZ xZS )CalibratedClassifierCVa   Probability calibration with isotonic regression or logistic regression.

    This class uses cross-validation to both estimate the parameters of a
    classifier and subsequently calibrate a classifier. With default
    `ensemble=True`, for each cv split it
    fits a copy of the base estimator to the training subset, and calibrates it
    using the testing subset. For prediction, predicted probabilities are
    averaged across these individual calibrated classifiers. When
    `ensemble=False`, cross-validation is used to obtain unbiased predictions,
    via :func:`~sklearn.model_selection.cross_val_predict`, which are then
    used for calibration. For prediction, the base estimator, trained using all
    the data, is used. This is the prediction method implemented when
    `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC`
    estimators (see :ref:`User Guide <scores_probabilities>` for details).

    Already fitted classifiers can be calibrated by wrapping the model in a
    :class:`~sklearn.frozen.FrozenEstimator`. In this case all provided
    data is used for calibration. The user has to take care manually that data
    for model fitting and calibration are disjoint.

    The calibration is based on the :term:`decision_function` method of the
    `estimator` if it exists, else on :term:`predict_proba`.

    Read more in the :ref:`User Guide <calibration>`.
    In order to learn more on the CalibratedClassifierCV class, see the
    following calibration examples:
    :ref:`sphx_glr_auto_examples_calibration_plot_calibration.py`,
    :ref:`sphx_glr_auto_examples_calibration_plot_calibration_curve.py`, and
    :ref:`sphx_glr_auto_examples_calibration_plot_calibration_multiclass.py`.

    Parameters
    ----------
    estimator : estimator instance, default=None
        The classifier whose output need to be calibrated to provide more
        accurate `predict_proba` outputs. The default classifier is
        a :class:`~sklearn.svm.LinearSVC`.

        .. versionadded:: 1.2

    method : {'sigmoid', 'isotonic'}, default='sigmoid'
        The method to use for calibration. Can be 'sigmoid' which
        corresponds to Platt's method (i.e. a logistic regression model) or
        'isotonic' which is a non-parametric approach. It is not advised to
        use isotonic calibration with too few calibration samples
        ``(<<1000)`` since it tends to overfit.

    cv : int, cross-validation generator, or iterable, default=None
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:

        - None, to use the default 5-fold cross-validation,
        - integer, to specify the number of folds.
        - :term:`CV splitter`,
        - An iterable yielding (train, test) splits as arrays of indices.

        For integer/None inputs, if ``y`` is binary or multiclass,
        :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
        neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
        is used.

        Refer to the :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.

        .. versionchanged:: 0.22
            ``cv`` default value if None changed from 3-fold to 5-fold.

        .. versionchanged:: 1.6
            `"prefit"` is deprecated. Use :class:`~sklearn.frozen.FrozenEstimator`
            instead.

    n_jobs : int, default=None
        Number of jobs to run in parallel.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors.

        Base estimator clones are fitted in parallel across cross-validation
        iterations. Therefore parallelism happens only when `cv != "prefit"`.

        See :term:`Glossary <n_jobs>` for more details.

        .. versionadded:: 0.24

    ensemble : bool, or "auto", default="auto"
        Determines how the calibrator is fitted.

        "auto" will use `False` if the `estimator` is a
        :class:`~sklearn.frozen.FrozenEstimator`, and `True` otherwise.

        If `True`, the `estimator` is fitted using training data, and
        calibrated using testing data, for each `cv` fold. The final estimator
        is an ensemble of `n_cv` fitted classifier and calibrator pairs, where
        `n_cv` is the number of cross-validation folds. The output is the
        average predicted probabilities of all pairs.

        If `False`, `cv` is used to compute unbiased predictions, via
        :func:`~sklearn.model_selection.cross_val_predict`, which are then
        used for calibration. At prediction time, the classifier used is the
        `estimator` trained on all the data.
        Note that this method is also internally implemented  in
        :mod:`sklearn.svm` estimators with the `probabilities=True` parameter.

        .. versionadded:: 0.24

        .. versionchanged:: 1.6
            `"auto"` option is added and is the default.

    Attributes
    ----------
    classes_ : ndarray of shape (n_classes,)
        The class labels.

    n_features_in_ : int
        Number of features seen during :term:`fit`. Only defined if the
        underlying estimator exposes such an attribute when fit.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Only defined if the
        underlying estimator exposes such an attribute when fit.

        .. versionadded:: 1.0

    calibrated_classifiers_ : list (len() equal to cv or 1 if `ensemble=False`)
        The list of classifier and calibrator pairs.

        - When `ensemble=True`, `n_cv` fitted `estimator` and calibrator pairs.
          `n_cv` is the number of cross-validation folds.
        - When `ensemble=False`, the `estimator`, fitted on all the data, and fitted
          calibrator.

        .. versionchanged:: 0.24
            Single calibrated classifier case when `ensemble=False`.

    See Also
    --------
    calibration_curve : Compute true and predicted probabilities
        for a calibration curve.

    References
    ----------
    .. [1] Obtaining calibrated probability estimates from decision trees
           and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001

    .. [2] Transforming Classifier Scores into Accurate Multiclass
           Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)

    .. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
           Regularized Likelihood Methods, J. Platt, (1999)

    .. [4] Predicting Good Probabilities with Supervised Learning,
           A. Niculescu-Mizil & R. Caruana, ICML 2005

    Examples
    --------
    >>> from sklearn.datasets import make_classification
    >>> from sklearn.naive_bayes import GaussianNB
    >>> from sklearn.calibration import CalibratedClassifierCV
    >>> X, y = make_classification(n_samples=100, n_features=2,
    ...                            n_redundant=0, random_state=42)
    >>> base_clf = GaussianNB()
    >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3)
    >>> calibrated_clf.fit(X, y)
    CalibratedClassifierCV(...)
    >>> len(calibrated_clf.calibrated_classifiers_)
    3
    >>> calibrated_clf.predict_proba(X)[:5, :]
    array([[0.110..., 0.889...],
           [0.072..., 0.927...],
           [0.928..., 0.071...],
           [0.928..., 0.071...],
           [0.071..., 0.928...]])
    >>> from sklearn.model_selection import train_test_split
    >>> X, y = make_classification(n_samples=100, n_features=2,
    ...                            n_redundant=0, random_state=42)
    >>> X_train, X_calib, y_train, y_calib = train_test_split(
    ...        X, y, random_state=42
    ... )
    >>> base_clf = GaussianNB()
    >>> base_clf.fit(X_train, y_train)
    GaussianNB()
    >>> from sklearn.frozen import FrozenEstimator
    >>> calibrated_clf = CalibratedClassifierCV(FrozenEstimator(base_clf))
    >>> calibrated_clf.fit(X_calib, y_calib)
    CalibratedClassifierCV(...)
    >>> len(calibrated_clf.calibrated_classifiers_)
    1
    >>> calibrated_clf.predict_proba([[-0.5, 0.5]])
    array([[0.936..., 0.063...]])
    fitpredict_probadecision_functionNisotonicsigmoid	cv_objectprefitbooleanauto	estimatormethodcvn_jobsensemble_parameter_constraints)rB   rC   rD   rE   c                L    || _         || _        || _        || _        || _        d S Nr@   )selfrA   rB   rC   rD   rE   s         S/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/sklearn/calibration.py__init__zCalibratedClassifierCV.__init__  s+     #     c                     | j         5t          d          }t                      r|                    d           n| j         }|S )z8Resolve which estimator to return (default is LinearSVC)Nr   )random_stateTsample_weight)rA   r   r)   set_fit_request)rI   rA   s     rJ   _get_estimatorz%CalibratedClassifierCV._get_estimator  sO    >! "q111I!! >)))===IrL   Fprefer_skip_nested_validationc           	          t                     t                    \  t                                                      j        }|dk    rt          t                     }g  _         j        dk    rt          j
        d           t           j        dg            j        j         _        t          ddg	          \  }}|j        d
k    r|                    dd
          }t#          | j         j                  } j                            |           nt)                                                    }	|	j         _        t-                      rt/           dfdi|nt1          j                  j        }
d|
v }.|s,t5                    j        }t          j
        d| d           t9                      t9          i           _        t9          |          _        |rj        j        d<   t           j        t<                    r j        }n$t?           j        d          r j        j         }nd}|rHtC          j"        tC          j#        d          d
         |k               rtI          d| d| d          t           j        tJ                    rtI          d          tM           j        d          }|rNtO           j(                  } | fd |j)        fi j        j)        D                        _        ntU                    }tW          |ddg          j        }tY          ||| j(        j        j                  }t[           j                  dk    r?|dk    r#t]          |d j         j        d
                   }|                    dd
          } |j        fi j        j         t#          || j         j                  } j                            |            j        d          j        }t?          |d!          r|j/         _/        t?          |d"          r|j0         _0         S )#aO  Fit the calibrated model.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training data.

        y : array-like of shape (n_samples,)
            Target values.

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted.

        **fit_params : dict
            Parameters to pass to the `fit` method of the underlying
            classifier.

        Returns
        -------
        self : object
            Returns an instance of self.
        Nr?   r=   zThe `cv='prefit'` option is deprecated in 1.6 and will be removed in 1.8. You can use CalibratedClassifierCV(FrozenEstimator(estimator)) instead.classes_)
attributesr9   r8   response_methodr
   r7   rP   zSince aW   does not appear to accept sample_weight, sample weights will only be used for the calibration itself. This can be caused by a limitation of the current scikit-learn API. See the following issue for more details: https://github.com/scikit-learn/scikit-learn/issues/21134. Be warned that the result of the calibration is likely to be incorrect.)split)r7   n_splitsT)return_countszRequesting z.-fold cross-validation but provided less than z! examples for at least one class.zLeaveOneOut cross-validation does not allowall classes to be present in test splits. Please use a cross-validation generator that allows all classes to appear in every test and train split.)
classifier)rD   c              3      K   | ]N\  }} t          t                    t                    ||j        j        j        j         	  	        V  OdS ))traintestrB   classesrP   
fit_paramsN)r-   _fit_classifier_calibrator_pairr   rB   rV   rA   r7   )	.0r`   ra   XrA   routed_paramsrP   rI   ys	      rJ   	<genexpr>z-CalibratedClassifierCV.fit.<locals>.<genexpr>  s       8 8 $t =G;<<i((#!#{ $&3#0#:#>
 
 
8 8 8 8 8 8rL   )rA   rf   rh   rC   rB   rD   params   binary)y_predtarget_typerb   	pos_labelr   n_features_in_feature_names_in_)1r+   r   r1   rR   rE   
isinstancer   calibrated_classifiers_rC   warningswarnr4   rA   rV   r%   ndimreshape_fit_calibratorrB   appendr   r7   r)   r*   r   
parameterstype__name__r	   splitterinthasattrr\   npanyunique
ValueErrorr   r   r,   rD   r[   r   r0   r   lenr&   rp   rq   )rI   rf   rh   rP   rc   	_ensemblepredictions_calibrated_classifierlabel_encoder_fit_parameterssupports_swestimator_namen_foldsrC   parallelthis_estimatormethod_name	first_clfrA   rg   s   ````               @@rJ   r7   zCalibratedClassifierCV.fit$  sc   6 	%Q'''A1$0BBM''))	M	&y/BBBI')$7hM   DN
|DDDD N3DM1!4o F  NK
 1$$)11"a88$3% %! (//0EFFFF *^^//22N*3DM!! Q /! ! #0! !	! ! "+9=!9!9!D-? ,[,%))__%=NM& & & &	 	 	 !&).R&*/J*?*?*?' ,,CPM+/@ $'3'' '*-- '* 26")AT"B"B"B1"E"OPP  B' B BB B B  
 $';//  K   $'1666B 6K#4;777/7x 8 8 8 8 8 8 8 8 8 (0rx1'U'U8N8T'U'U8 8 8 0 0,, "'y!1!14"(/:    0,&;(26   t}%%**"o55&<#.(0$(M&*mA&6	' ' ' #."5"5b!"<"<K""1aGG=+B+FGGG )8"MK!) )% ,334IJJJ03=	9.// 	;"+":D9122 	A%.%@D"rL   c                    t          |            t          j        t          |          t	          | j                  f          }| j        D ]}|                    |          }||z  }|t	          | j                  z  }|S )a  Calibrated probabilities of classification.

        This function returns calibrated probabilities of classification
        according to each class on an array of test vectors X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The samples, as accepted by `estimator.predict_proba`.

        Returns
        -------
        C : ndarray of shape (n_samples, n_classes)
            The predicted probas.
        )r4   r   zerosr2   r   rV   rs   r8   )rI   rf   
mean_probar   probas        rJ   r8   z$CalibratedClassifierCV.predict_proba  s      	 X|ADM0B0BCDD
%)%A 	  	 !)77::E%JJc$6777
rL   c                     t          |            | j        t          j        |                     |          d                   S )a  Predict the target of new samples.

        The predicted class is the class that has the highest probability,
        and can thus be different from the prediction of the uncalibrated classifier.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The samples, as accepted by `estimator.predict`.

        Returns
        -------
        C : ndarray of shape (n_samples,)
            The predicted class.
        r
   axis)r4   rV   r   argmaxr8   )rI   rf   s     rJ   predictzCalibratedClassifierCV.predict  s<      	}RYt'9'9!'<'<1EEEFFrL   c                 f   t          | j        j                                      |                               |                                 t                                          dd                                        | j        t                                          dd                    }|S )aK  Get metadata routing of this object.

        Please check :ref:`User Guide <metadata_routing>` on how the routing
        mechanism works.

        Returns
        -------
        routing : MetadataRouter
            A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
            routing information.
        )ownerr7   )callercallee)rA   method_mappingr[   )r}   r   )r'   	__class__r|   add_self_requestaddrR   r(   rC   )rI   routers     rJ   get_metadata_routingz+CalibratedClassifierCV.get_metadata_routing  s     !8999d##S--//,22%2NN    S,22%2PP    	 rL   c                     t                                                      }t          |                                           j        j        |j        _        |S rH   )super__sklearn_tags__r   rR   
input_tagssparse)rI   tagsr   s     rJ   r   z'CalibratedClassifierCV.__sklearn_tags__)  sA    ww''))!)$*=*=*?*?!@!@!K!RrL   rH   )r|   
__module____qualname____doc__r   r!   r   r   rF   dict__annotations__rK   rR   r   r7   r8   r   r   r   __classcell__)r   s   @rJ   r6   r6   <   s        } }B J/00J2344

 :z95667FF::xj#9#9::;T"

F8 4 45
$ 
$D 
 
 
 ! ! ! ! ! !   \&+  v v v	 vp  8G G G&  4        rL   r6   c	                    t          |||          }	t          ||          t          ||          }}
t          ||          t          ||          }} | j        |
|fi |	 t          | |ddg          \  }}|j        dk    r|                    dd          }|dnt          ||          }t          | |||||          }|S )	a-  Fit a classifier/calibration pair on a given train/test split.

    Fit the classifier on the train set, compute its predictions on the test
    set and use the predictions as input to fit the calibrator along with the
    test labels.

    Parameters
    ----------
    estimator : estimator instance
        Cloned base estimator.

    X : array-like, shape (n_samples, n_features)
        Sample data.

    y : array-like, shape (n_samples,)
        Targets.

    train : ndarray, shape (n_train_indices,)
        Indices of the training subset.

    test : ndarray, shape (n_test_indices,)
        Indices of the testing subset.

    method : {'sigmoid', 'isotonic'}
        Method to use for calibration.

    classes : ndarray, shape (n_classes,)
        The target classes.

    sample_weight : array-like, default=None
        Sample weights for `X`.

    fit_params : dict, default=None
        Parameters to pass to the `fit` method of the underlying
        classifier.

    Returns
    -------
    calibrated_classifier : _CalibratedClassifier instance
    )rj   indicesr9   r8   rX   r
   rZ   NrO   )r.   r   r7   r%   rv   rw   rx   )rA   rf   rh   r`   ra   rB   rb   rP   rc   fit_params_trainX_trainy_trainX_testy_testr   r   sw_testr   s                     rJ   rd   rd   /  s    f ,Aj%PPP%a//51I1IWG#At,,nQ.E.EFFIM'777&6777),o>  NK
 1!))"a00#+ddt1T1TG+;w   ! rL   c                    t          ||          }t                                          |          }|                    | j                  }g }	t          ||j                  D ]`\  }
}|dk    rt          d          }nt                      }|                    ||dd|
f         |           |		                    |           at          | |	||          }|S )a  Fit calibrator(s) and return a `_CalibratedClassifier`
    instance.

    `n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
    However, if `n_classes` equals 2, one calibrator is fitted.

    Parameters
    ----------
    clf : estimator instance
        Fitted classifier.

    predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1)                     when binary.
        Raw predictions returned by the un-calibrated base classifier.

    y : array-like, shape (n_samples,)
        The targets.

    classes : ndarray, shape (n_classes,)
        All the prediction classes.

    method : {'sigmoid', 'isotonic'}
        The method to use for calibration.

    sample_weight : ndarray, shape (n_samples,), default=None
        Sample weights. If None, then samples are equally weighted.

    Returns
    -------
    pipeline : _CalibratedClassifier instance
    )rb   r:   clip)out_of_boundsN)rB   rb   )r   r   r7   	transformrV   zipTr   _SigmoidCalibrationry   _CalibratedClassifier)clfr   rh   rb   rB   rP   Ylabel_encoderpos_class_indicescalibrators	class_idx	this_pred
calibratorpipelines                 rJ   rx   rx   x  s    @ 	q'***A NN&&w//M%//==K #$5{} E E ' '	9Z+&AAAJJ,..Jy!AAAyL/=AAA:&&&&$S+fgVVVHOrL   c                   $    e Zd ZdZdddZd ZdS )r   a$  Pipeline-like chaining a fitted classifier and its fitted calibrators.

    Parameters
    ----------
    estimator : estimator instance
        Fitted classifier.

    calibrators : list of fitted estimator instances
        List of fitted calibrators (either 'IsotonicRegression' or
        '_SigmoidCalibration'). The number of calibrators equals the number of
        classes. However, if there are 2 classes, the list contains only one
        fitted calibrator.

    classes : array-like of shape (n_classes,)
        All the prediction classes.

    method : {'sigmoid', 'isotonic'}, default='sigmoid'
        The method to use for calibration. Can be 'sigmoid' which
        corresponds to Platt's method or 'isotonic' which is a
        non-parametric approach based on isotonic regression.
    r;   )rB   c                >    || _         || _        || _        || _        d S rH   )rA   r   rb   rB   )rI   rA   r   rb   rB   s        rJ   rK   z_CalibratedClassifier.__init__  s#    "&rL   c                     t          | j        |ddg          \  }}|j        dk    r|                    dd          }t	          | j                  }t                                          | j                  }|                    | j        j	                  }t          j        t          |          |f          }t          ||j        | j                  D ]-\  }}	}
|dk    r|dz  }|
                    |	          |dd|f<   .|dk    rd|dddf         z
  |ddd	f<   n^t          j        |d
          ddt          j        f         }t          j        |d|z            }t          j        ||||d	k              }d|d|k     |dk    z  <   |S )a  Calculate calibrated probabilities.

        Calculates classification calibrated probabilities
        for each class, in a one-vs-all manner, for `X`.

        Parameters
        ----------
        X : ndarray of shape (n_samples, n_features)
            The sample data.

        Returns
        -------
        proba : array, shape (n_samples, n_classes)
            The predicted probabilities. Can be exact zeros.
        r9   r8   rX   r
   rZ   rk   N      ?r   r   )outwheregrZ|
 ?)r%   rA   rv   rw   r   rb   r   r7   r   rV   r   r   r2   r   r   r   r   sumnewaxis	full_likedivide)rI   rf   r   r   	n_classesr   r   r   r   r   r   denominatoruniform_probas                rJ   r8   z#_CalibratedClassifier.predict_proba  s     .N0/B
 
 
Q
 q  %--b!44K%%	$**4<88)33DN4KLL,q//956603{}d.>1
 1
 	@ 	@,Iy* A~~ Q	","4"4Y"?"?E!!!Y, >>aaad+E!!!Q$KK&Q///2:>K LI>>MI{[A=M  E
 8;sU{u
234rL   N)r|   r   r   r   rK   r8    rL   rJ   r   r     sM         , CL     8 8 8 8 8rL   r      c                 2    t                      t          |          } d}t          j        t          j                            }||k    r|}|z  |dk    }6|                                         }|                                          }n1t          t          j        |                    }|j        d         |z
  }t          j        | j                  |dz   |dz   z  |dk    <   d|dz   z  |dk    <   t                       fd}	t          j
        dt          |dz   |dz   z            g          }
t          |	|
dd	d
dt          j        t
                    j        z  d          }|j        }|d         |z  |d         fS )aN  Probability Calibration with sigmoid method (Platt 2000)

    Parameters
    ----------
    predictions : ndarray of shape (n_samples,)
        The decision function or predict proba for the samples.

    y : ndarray of shape (n_samples,)
        The targets.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights. If None, then samples are equally weighted.

    Returns
    -------
    a : float
        The slope.

    b : float
        The intercept.

    References
    ----------
    Platt, "Probabilistic Outputs for Support Vector Machines"
    r   r   Ndtypeg       @c                 8   | d         z  | d         z                        	j                   }                    |
          \  }}|                                }t	          j        | z  |                                 gt          j                  }||fS )Nr   r
   r   )y_trueraw_predictionrP   )astyper   loss_gradientr   r   asarrayfloat64)ABr   lglossgradFr   bin_lossr   rP   s         rJ   	loss_gradz'_sigmoid_calibration.<locals>.loss_gradA  s    
 a519r!u,44;;L4MMM%%)' & 
 
1
 uuww
 zA26AEEGG8,BJ???TzrL           zL-BFGS-BTgư>@   )gtolftol)rB   jacoptionsr
   )r   r   maxabsr   floatshape
zeros_liker   r   arrayr   r   finfoepsx)r   rh   rP   max_abs_prediction_thresholdscale_constantmax_predictionmask_negative_samplesprior0prior1r   AB0
opt_resultAB_r   r   r   s   ` `          @@@rJ   _sigmoid_calibrationr    s   8 {++KQAANVBF1II&&N 555' 
 F  56;;==!6 67<<>>rv34455f$
a{0111A#.Aa!eHv|$Aa1fI!!H        & (Cfslv|<==>
?
?C%,,
 
	 	 	J ,C
 q6N"CF**rL   c                        e Zd ZdZddZd ZdS )r   zSigmoid regression model.

    Attributes
    ----------
    a_ : float
        The slope.

    b_ : float
        The intercept.
    Nc                     t          |          }t          |          }t          ||          \  }}t          |||          \  | _        | _        | S )a  Fit the model using X, y as training data.

        Parameters
        ----------
        X : array-like of shape (n_samples,)
            Training data.

        y : array-like of shape (n_samples,)
            Training target.

        sample_weight : array-like of shape (n_samples,), default=None
            Sample weights. If None, then samples are equally weighted.

        Returns
        -------
        self : object
            Returns an instance of self.
        )r   r   r  a_b_)rI   rf   rh   rP   s       rJ   r7   z_SigmoidCalibration.fitt  sJ    & OOOOA1/1mDDrL   c                 `    t          |          }t          | j        |z  | j        z              S )a  Predict new data by linear interpolation.

        Parameters
        ----------
        T : array-like of shape (n_samples,)
            Data to predict from.

        Returns
        -------
        T_ : ndarray of shape (n_samples,)
            The predicted data.
        )r   r   r  r  )rI   r   s     rJ   r   z_SigmoidCalibration.predict  s-     OOtw{TW,-...rL   rH   )r|   r   r   r   r7   r   r   rL   rJ   r   r   h  sA        	 	   4/ / / / /rL   r   z
array-liker>   left)closeduniformquantile)r   y_probro   n_binsstrategyTrS      )ro   r  r  c                   t          |           } t          |          }t          | |           t          ||           }|                                dk     s|                                dk    rt          d          t          j        |           }t          |          dk    rt          d| d          | |k    } |dk    r2t          j	        dd|dz             }t          j
        ||dz            }n/|d	k    rt          j	        d
d|dz             }nt          d          t          j        |dd         |          }t          j        ||t          |                    }	t          j        || t          |                    }
t          j        |t          |                    }|dk    }|
|         ||         z  }|	|         ||         z  }||fS )a  Compute true and predicted probabilities for a calibration curve.

    The method assumes the inputs come from a binary classifier, and
    discretize the [0, 1] interval into bins.

    Calibration curves may also be referred to as reliability diagrams.

    Read more in the :ref:`User Guide <calibration>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        True targets.

    y_prob : array-like of shape (n_samples,)
        Probabilities of the positive class.

    pos_label : int, float, bool or str, default=None
        The label of the positive class.

        .. versionadded:: 1.1

    n_bins : int, default=5
        Number of bins to discretize the [0, 1] interval. A bigger number
        requires more data. Bins with no samples (i.e. without
        corresponding values in `y_prob`) will not be returned, thus the
        returned arrays may have less than `n_bins` values.

    strategy : {'uniform', 'quantile'}, default='uniform'
        Strategy used to define the widths of the bins.

        uniform
            The bins have identical widths.
        quantile
            The bins have the same number of samples and depend on `y_prob`.

    Returns
    -------
    prob_true : ndarray of shape (n_bins,) or smaller
        The proportion of samples whose class is the positive class, in each
        bin (fraction of positives).

    prob_pred : ndarray of shape (n_bins,) or smaller
        The mean predicted probability in each bin.

    References
    ----------
    Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
    Probabilities With Supervised Learning, in Proceedings of the 22nd
    International Conference on Machine Learning (ICML).
    See section 4 (Qualitative Analysis of Predictions).

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.calibration import calibration_curve
    >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
    >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9,  1.])
    >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
    >>> prob_true
    array([0. , 0.5, 1. ])
    >>> prob_pred
    array([0.2  , 0.525, 0.85 ])
    r   r
   z!y_prob has values outside [0, 1].rk   z9Only binary classification is supported. Provided labels .r  d   r  r   r   zSInvalid entry to 'strategy' input. Strategy must be either 'quantile' or 'uniform'.rZ   )weights	minlength)r  )r   r3   r/   minr   r   r   r   r   linspace
percentilesearchsortedbincount)r   r  ro   r  r  labels	quantilesbinsbinidsbin_sumsbin_true	bin_totalnonzero	prob_true	prob_preds                  rJ   calibration_curver%    s   d &!!F&!!FFF+++,Y??Izz||a6::<<!++<===YvF
6{{QQQQQ
 
 	
 y F:K1fqj11	}VY_55	Y		{3VaZ006
 
 	

 _T!B$Z00F{66SYYGGGH{66SYYGGGHFc$ii888I1nG!Ig$66I!Ig$66IirL   c                   |    e Zd ZdZddddZdddddZedd	ddddd
d            Zedd	ddddd
d            ZdS )CalibrationDisplaya
  Calibration curve (also known as reliability diagram) visualization.

    It is recommended to use
    :func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or
    :func:`~sklearn.calibration.CalibrationDisplay.from_predictions`
    to create a `CalibrationDisplay`. All parameters are stored as attributes.

    Read more about calibration in the :ref:`User Guide <calibration>` and
    more about the scikit-learn visualization API in :ref:`visualizations`.

    For an example on how to use the visualization, see
    :ref:`sphx_glr_auto_examples_calibration_plot_calibration_curve.py`.

    .. versionadded:: 1.0

    Parameters
    ----------
    prob_true : ndarray of shape (n_bins,)
        The proportion of samples whose class is the positive class (fraction
        of positives), in each bin.

    prob_pred : ndarray of shape (n_bins,)
        The mean predicted probability in each bin.

    y_prob : ndarray of shape (n_samples,)
        Probability estimates for the positive class, for each sample.

    estimator_name : str, default=None
        Name of estimator. If None, the estimator name is not shown.

    pos_label : int, float, bool or str, default=None
        The positive class when computing the calibration curve.
        By default, `pos_label` is set to `estimators.classes_[1]` when using
        `from_estimator` and set to 1 when using `from_predictions`.

        .. versionadded:: 1.1

    Attributes
    ----------
    line_ : matplotlib Artist
        Calibration curve.

    ax_ : matplotlib Axes
        Axes with calibration curve.

    figure_ : matplotlib Figure
        Figure containing the curve.

    See Also
    --------
    calibration_curve : Compute true and predicted probabilities for a
        calibration curve.
    CalibrationDisplay.from_predictions : Plot calibration curve using true
        and predicted labels.
    CalibrationDisplay.from_estimator : Plot calibration curve using an
        estimator and data.

    Examples
    --------
    >>> from sklearn.datasets import make_classification
    >>> from sklearn.model_selection import train_test_split
    >>> from sklearn.linear_model import LogisticRegression
    >>> from sklearn.calibration import calibration_curve, CalibrationDisplay
    >>> X, y = make_classification(random_state=0)
    >>> X_train, X_test, y_train, y_test = train_test_split(
    ...     X, y, random_state=0)
    >>> clf = LogisticRegression(random_state=0)
    >>> clf.fit(X_train, y_train)
    LogisticRegression(random_state=0)
    >>> y_prob = clf.predict_proba(X_test)[:, 1]
    >>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10)
    >>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob)
    >>> disp.plot()
    <...>
    N)r   ro   c                L    || _         || _        || _        || _        || _        d S rH   r#  r$  r  r   ro   )rI   r#  r$  r  r   ro   s         rJ   rK   zCalibrationDisplay.__init__e  s,     #","rL   T)axnameref_linec                   |                      ||          \  | _        | _        }| j        d| j         dnd}ddd}|||d	<   t	          ||          }d
}|| j                                        d         v }	|r$|	s"| j                            ddgddgd|            | j        j        | j        | j        fi |d         | _	        | j        
                    d           d| }
d| }| j                            |
|           | S )aT  Plot visualization.

        Extra keyword arguments will be passed to
        :func:`matplotlib.pyplot.plot`.

        Parameters
        ----------
        ax : Matplotlib Axes, default=None
            Axes object to plot on. If `None`, a new figure and axes is
            created.

        name : str, default=None
            Name for labeling curve. If `None`, use `estimator_name` if
            not `None`, otherwise no labeling is shown.

        ref_line : bool, default=True
            If `True`, plots a reference line representing a perfectly
            calibrated classifier.

        **kwargs : dict
            Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.

        Returns
        -------
        display : :class:`~sklearn.calibration.CalibrationDisplay`
            Object that stores computed values.
        )r*  r+  Nz(Positive class: ) s-)marker	linestylelabelzPerfectly calibratedr
   r   zk:)r4  zlower right)loczMean predicted probability zFraction of positives )xlabelylabel)_validate_plot_paramsax_figure_ro   r$   get_legend_handles_labelsplotr$  r#  line_legendset)rI   r*  r+  r,  kwargsinfo_pos_labeldefault_line_kwargsline_kwargsref_line_labelexisting_ref_liner6  r7  s               rJ   r<  zCalibrationDisplay.plotn  sO   8 (,'A'ARd'A'S'S$$, 6:^5O11111UW 	 *-3??+/(,-@&II/*dh.P.P.R.RST.UU 	F- 	FHMM1a&1a&$nMEEE"TX]4>4>QQ[QQRST
 	M***?~??:.::F6222rL   r  r  r  r  ro   r+  r,  r*  c          
      l    |                      |||d||          \  }}} | j        ||f||||||	d|
S )a  Plot calibration curve using a binary classifier and data.

        A calibration curve, also known as a reliability diagram, uses inputs
        from a binary classifier and plots the average predicted probability
        for each bin against the fraction of positive classes, on the
        y-axis.

        Extra keyword arguments will be passed to
        :func:`matplotlib.pyplot.plot`.

        Read more about calibration in the :ref:`User Guide <calibration>` and
        more about the scikit-learn visualization API in :ref:`visualizations`.

        .. versionadded:: 1.0

        Parameters
        ----------
        estimator : estimator instance
            Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
            in which the last estimator is a classifier. The classifier must
            have a :term:`predict_proba` method.

        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            Input values.

        y : array-like of shape (n_samples,)
            Binary target values.

        n_bins : int, default=5
            Number of bins to discretize the [0, 1] interval into when
            calculating the calibration curve. A bigger number requires more
            data.

        strategy : {'uniform', 'quantile'}, default='uniform'
            Strategy used to define the widths of the bins.

            - `'uniform'`: The bins have identical widths.
            - `'quantile'`: The bins have the same number of samples and depend
              on predicted probabilities.

        pos_label : int, float, bool or str, default=None
            The positive class when computing the calibration curve.
            By default, `estimators.classes_[1]` is considered as the
            positive class.

            .. versionadded:: 1.1

        name : str, default=None
            Name for labeling curve. If `None`, the name of the estimator is
            used.

        ref_line : bool, default=True
            If `True`, plots a reference line representing a perfectly
            calibrated classifier.

        ax : matplotlib axes, default=None
            Axes object to plot on. If `None`, a new figure and axes is
            created.

        **kwargs : dict
            Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.

        Returns
        -------
        display : :class:`~sklearn.calibration.CalibrationDisplay`.
            Object that stores computed values.

        See Also
        --------
        CalibrationDisplay.from_predictions : Plot calibration curve using true
            and predicted labels.

        Examples
        --------
        >>> import matplotlib.pyplot as plt
        >>> from sklearn.datasets import make_classification
        >>> from sklearn.model_selection import train_test_split
        >>> from sklearn.linear_model import LogisticRegression
        >>> from sklearn.calibration import CalibrationDisplay
        >>> X, y = make_classification(random_state=0)
        >>> X_train, X_test, y_train, y_test = train_test_split(
        ...     X, y, random_state=0)
        >>> clf = LogisticRegression(random_state=0)
        >>> clf.fit(X_train, y_train)
        LogisticRegression(random_state=0)
        >>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
        >>> plt.show()
        r8   )rY   ro   r+  rF  )!_validate_and_get_response_valuesfrom_predictions)clsrA   rf   rh   r  r  ro   r+  r,  r*  r@  r  s               rJ   from_estimatorz!CalibrationDisplay.from_estimator  s    N #&"G"G+ #H #
 #
	4 $s#

 

 

 

 

 
	
rL   c                    |                      ||d||          \  }
}t          |||||          \  }} | |||||
          } |j        d||d|	S )a  Plot calibration curve using true labels and predicted probabilities.

        Calibration curve, also known as reliability diagram, uses inputs
        from a binary classifier and plots the average predicted probability
        for each bin against the fraction of positive classes, on the
        y-axis.

        Extra keyword arguments will be passed to
        :func:`matplotlib.pyplot.plot`.

        Read more about calibration in the :ref:`User Guide <calibration>` and
        more about the scikit-learn visualization API in :ref:`visualizations`.

        .. versionadded:: 1.0

        Parameters
        ----------
        y_true : array-like of shape (n_samples,)
            True labels.

        y_prob : array-like of shape (n_samples,)
            The predicted probabilities of the positive class.

        n_bins : int, default=5
            Number of bins to discretize the [0, 1] interval into when
            calculating the calibration curve. A bigger number requires more
            data.

        strategy : {'uniform', 'quantile'}, default='uniform'
            Strategy used to define the widths of the bins.

            - `'uniform'`: The bins have identical widths.
            - `'quantile'`: The bins have the same number of samples and depend
              on predicted probabilities.

        pos_label : int, float, bool or str, default=None
            The positive class when computing the calibration curve.
            By default `pos_label` is set to 1.

            .. versionadded:: 1.1

        name : str, default=None
            Name for labeling curve.

        ref_line : bool, default=True
            If `True`, plots a reference line representing a perfectly
            calibrated classifier.

        ax : matplotlib axes, default=None
            Axes object to plot on. If `None`, a new figure and axes is
            created.

        **kwargs : dict
            Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.

        Returns
        -------
        display : :class:`~sklearn.calibration.CalibrationDisplay`.
            Object that stores computed values.

        See Also
        --------
        CalibrationDisplay.from_estimator : Plot calibration curve using an
            estimator and data.

        Examples
        --------
        >>> import matplotlib.pyplot as plt
        >>> from sklearn.datasets import make_classification
        >>> from sklearn.model_selection import train_test_split
        >>> from sklearn.linear_model import LogisticRegression
        >>> from sklearn.calibration import CalibrationDisplay
        >>> X, y = make_classification(random_state=0)
        >>> X_train, X_test, y_train, y_test = train_test_split(
        ...     X, y, random_state=0)
        >>> clf = LogisticRegression(random_state=0)
        >>> clf.fit(X_train, y_train)
        LogisticRegression(random_state=0)
        >>> y_prob = clf.predict_proba(X_test)[:, 1]
        >>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
        >>> plt.show()
        N)rP   ro   r+  )r  r  ro   r)  )r*  r,  r   )!_validate_from_predictions_paramsr%  r<  )rJ  r   r  r  r  ro   r+  r,  r*  r@  pos_label_validatedr#  r$  disps                 rJ   rI  z#CalibrationDisplay.from_predictions   s    @ %($I$IF$)$ %J %
 %
!T  1F6H	 
  
  
	9 s)
 
 
 ty<B<<V<<<rL   )	r|   r   r   r   rK   r<  classmethodrK  rI  r   rL   rJ   r'  r'    s        J JZ ?Cd# # # # # D4 4 4 4 4 4l  y
 y
 y
 y
 [y
v  n= n= n= n= [n= n= n=rL   r'  )NNrH   )Nr   )Sr   rt   inspectr   mathr   numbersr   r   numpyr   scipy.optimizer   scipy.specialr   sklearn.utilsr	   _lossr   baser   r   r   r   r   r   frozenr   r:   r   model_selectionr   r   r   preprocessingr   r   svmr   utilsr   r   r   r   utils._param_validationr   r   r    r!   r"   utils._plottingr#   r$   utils._responser%   r&   utils.metadata_routingr'   r(   r)   r*   utils.multiclassr+   utils.parallelr,   r-   utils.validationr.   r/   r0   r1   r2   r3   r4   r6   rd   rx   r   r  r   strr%  r'  r   rL   rJ   <module>rg     s   6 6
              " " " " " " " "     # # # # # #             # # # # # #                $ # # # # # ( ( ( ( ( ( E E E E E E E E E E 7 7 7 7 7 7 7 7       D D D D D D D D D D D D              X W W W W W W W I I I I I I I I            ; : : : : : - - - - - - - -                 p p p p p_.@- p p pv F! F! F! F!R- - - -`U U U U U U U Uv FHc+ c+ c+ c+L4/ 4/ 4/ 4/ 4/.- 4/ 4/ 4/n ..CD18Haf===>ZJ 7889  #'	 	 	 l  l  l  l 	 	l ^w= w= w= w= w=; w= w= w= w= w=rL   