
    0PhD                     J    d Z ddlmZ ddlZddlmZmZ ddlm	Z	 d
dZ
dd	ZdS )z
Common code for all metrics.

    )combinationsN   )check_arraycheck_consistent_length)type_of_targetc           	      6   d}||vr"t          d                    |                    t          |          }|dvr"t          d                    |                    |dk    r | |||          S t          |||           t	          |          }t	          |          }d}|}d}	|d	k    rK| t          j        ||j        d                   }|                                }|                                }n|d
k    r|=t          j	        t          j
        |t          j        |d                    d          }	nt          j	        |d          }	t          j        |		                                d          rdS n|dk    r|}	d}d}|j        dk    r|                    d          }|j        dk    r|                    d          }|j        |         }
t          j        |
f          }t          |
          D ]g}|                    |g|                                          }|                    |g|                                          } | |||          ||<   h|5|	t          j        |	          }	d||	dk    <   t          j        ||	          S |S )aM  Average a binary metric for multilabel classification.

    Parameters
    ----------
    y_true : array, shape = [n_samples] or [n_samples, n_classes]
        True binary labels in binary label indicators.

    y_score : array, shape = [n_samples] or [n_samples, n_classes]
        Target scores, can either be probability estimates of the positive
        class, confidence values, or binary decisions.

    average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
        If ``None``, the scores for each class are returned. Otherwise,
        this determines the type of averaging performed on the data:

        ``'micro'``:
            Calculate metrics globally by considering each element of the label
            indicator matrix as a label.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label).
        ``'samples'``:
            Calculate metrics for each instance, and find their average.

        Will be ignored when ``y_true`` is binary.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    binary_metric : callable, returns shape [n_classes]
        The binary metric function to use.

    Returns
    -------
    score : float or array of shape [n_classes]
        If not ``None``, average the score, else return the score for each
        classes.

    )Nmicromacroweightedsampleszaverage has to be one of {0})binaryzmultilabel-indicatorz{0} format is not supportedr   )sample_weight   Nr	   r   )r   r   )axisg        r   weights)
ValueErrorformatr   r   r   nprepeatshaperavelsummultiplyreshapeisclosendimzerosrangetakeasarrayaverage)binary_metricy_truey_scorer#   r   average_optionsy_typenot_average_axisscore_weightaverage_weight	n_classesscorecy_true_c	y_score_cs                  U/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/sklearn/metrics/_base.py_average_binary_scorer2      s   V FOo%%7>>OOPPPF##F7776==fEEFFF}VWMJJJJFG];;;  F'""G LN'#9\6<?CCL--//	J		#VFBJ|W$E$EFFQ  NN  VF333N:n((**C00 	1	 
I		%{a((|q//'**./IHi\""E9 R R;;s)9;::@@BBLL!+;L<<BBDD	 =9LQQQa %  Z77N)*E.A%&z%8888    r
   c                 P   t          ||           t          j        |          }|j        d         }||dz
  z  dz  }t          j        |          }|dk    }|rt          j        |          nd}	t          t          |d                    D ]\  }
\  }}||k    }||k    }t          j        ||          }|rt          j        |          |	|
<   ||         }||         } | ||||f                   } | ||||f                   }||z   dz  ||
<   t          j        ||	          S )aL  Average one-versus-one scores for multiclass classification.

    Uses the binary metric for one-vs-one multiclass classification,
    where the score is computed according to the Hand & Till (2001) algorithm.

    Parameters
    ----------
    binary_metric : callable
        The binary metric function to use that accepts the following as input:
            y_true_target : array, shape = [n_samples_target]
                Some sub-array of y_true for a pair of classes designated
                positive and negative in the one-vs-one scheme.
            y_score_target : array, shape = [n_samples_target]
                Scores corresponding to the probability estimates
                of a sample belonging to the designated positive class label

    y_true : array-like of shape (n_samples,)
        True multiclass labels.

    y_score : array-like of shape (n_samples, n_classes)
        Target scores corresponding to probability estimates of a sample
        belonging to a particular class.

    average : {'macro', 'weighted'}, default='macro'
        Determines the type of averaging performed on the pairwise binary
        metric scores:
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean. This does not take label imbalance into account. Classes
            are assumed to be uniformly distributed.
        ``'weighted'``:
            Calculate metrics for each label, taking into account the
            prevalence of the classes.

    Returns
    -------
    score : float
        Average of the pairwise binary metric scores.
    r   r   r   r   Nr   )	r   r   uniquer   empty	enumerater   
logical_orr#   )r$   r%   r&   r#   y_true_uniquer,   n_pairspair_scoresis_weighted
prevalenceixaba_maskb_maskab_maska_trueb_truea_true_scoreb_true_scores                       r1   _average_multiclass_ovo_scorerH   ~   sO   P FG,,,If%%M#A&I9q=)Q.G(7##KZ'K&1;'"""tJ  ]A > >?? < <
FQ11-// 	1Z00JrN$}VWWaZ-@AA$}VWWaZ-@AA',6!;B:k:6666r3   )N)r
   )__doc__	itertoolsr   numpyr   utilsr   r   utils.multiclassr   r2   rH    r3   r1   <module>rO      s     # " " " " "     8 8 8 8 8 8 8 8 - - - - - -j j j jZC7 C7 C7 C7 C7 C7r3   