
    ZPh=                        d Z ddlZddlZddlZddlmZ ddlZddlZ	ddl
mZmZ ddlmZmZ ddlmZ ddlmZmZ ddlmZ dd	lmZmZ d
dlmZ  edgdgddgeej        dgd eh d          gdgddgdd          ddddddd            Z edgdgddgeej        dgd eh d          gddgdd          dddddd            Z edgdgddgeej        dgd eh d          gddgdd          dddddd            Z  edgdgddgeej        dgd eh d          gddg eej!        ddd          gdd          dddddd d!            Z" eej!        gd"gd#d          d$dd#d%            Z# edgdgddgddgddg eej        ddd          gej!        gd"g ed&h           eej        ddd'          gd(	d          dddd
d$d)d&d*d+            Z$ edgdgddgd,d          dd-d.            Z%dS )/ab  Metrics to assess performance on a classification task given class
predictions. The available metrics are complementary from the metrics available
in scikit-learn.

Functions named as ``*_score`` return a scalar value to maximize: the higher
the better

Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
    N)	signature)mean_absolute_errorprecision_recall_fscore_support)_check_targets_prf_divide)LabelEncoder)Interval
StrOptions)unique_labels)check_consistent_lengthcolumn_or_1d   )validate_paramsz
array-like>   macromicrobinarysamplesweighted)y_truey_predlabels	pos_labelaveragewarn_forsample_weightT)prefer_skip_nested_validation   )sensitivityspecificityr   r   r   r   r   c          	         d}||vr%|dk    rt          dt          |          z             t          | |          \  }} }t          | |          }	|dk    rJ|dk    r2||	vr*t	          |	          dk     rdS t          d|d|	          |g}n7t          d|z            |d	vr!t          j        d
|d|dt                     ||	}d}
n:t	          |          }
t          j	        |t          j
        |	|d          g          }|                    d          rt          d          |dk    rt          d          t                      }|                    |           |                    |           } |                    |          }|j        }| |k    }| |         }|t          j        |          |         }nd}t	          |          r%t          j        ||t	          |                    }n%t          j        t	          |                    x}x}}t	          |          r$t          j        ||t	          |                    }t	          |           r$t          j        | |t	          |                    }| j        ||z   |z
  z
  }t          j        ||d|
                   }||         }||         }||         }||         }|dk    rt          j        |                                g          }t          j        |                                g          }t          j        |                                g          }t          j        |                                g          }t          j        dd          5  t1          |||z   |z
  dd||          }t1          ||dd||          }ddd           n# 1 swxY w Y   |dk    r|}|                                dk    rdS n|dk    r|}nd}|I|dk    st	          |          dk    sJ t          j        ||           }t          j        ||           }d}|||fS )!a  Compute sensitivity, specificity, and support for each class.

    The sensitivity is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
    of true positives and ``fn`` the number of false negatives. The sensitivity
    quantifies the ability to avoid false negatives_[1].

    The specificity is the ratio ``tn / (tn + fp)`` where ``tn`` is the number
    of true negatives and ``fn`` the number of false negatives. The specificity
    quantifies the ability to avoid false positives_[1].

    The support is the number of occurrences of each class in ``y_true``.

    If ``pos_label is None`` and in binary classification, this function
    returns the average sensitivity and specificity if ``average``
    is one of ``'weighted'``.

    Read more in the :ref:`User Guide <sensitivity_specificity>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated targets as returned by a classifier.

    labels : array-like, default=None
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average. For multilabel targets,
        labels are column indices. By default, all labels in ``y_true`` and
        ``y_pred`` are used in sorted order.

    pos_label : str, int or None, default=1
        The class to report if ``average='binary'`` and the data is binary.
        If ``pos_label is None`` and in binary classification, this function
        returns the average sensitivity and specificity if ``average``
        is one of ``'weighted'``.
        If the data are multiclass, this will be ignored;
        setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
        scores for that label only.

    average : str, default=None
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:

        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).

    warn_for : tuple or set of {{"sensitivity", "specificity"}}, for internal use
        This determines which warnings will be made in the case that this
        function is being used to return only one of its metrics.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    sensitivity : float (if `average is None`) or ndarray of             shape (n_unique_labels,)
        The sensitivity metric.

    specificity : float (if `average is None`) or ndarray of             shape (n_unique_labels,)
        The specificity metric.

    support : int (if `average is None`) or ndarray of             shape (n_unique_labels,)
        The number of occurrences of each label in ``y_true``.

    References
    ----------
    .. [1] `Wikipedia entry for the Sensitivity and specificity
           <https://en.wikipedia.org/wiki/Sensitivity_and_specificity>`_

    Examples
    --------
    >>> import numpy as np
    >>> from imblearn.metrics import sensitivity_specificity_support
    >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
    >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
    >>> sensitivity_specificity_support(y_true, y_pred, average='macro')
    (0.33..., 0.66..., None)
    >>> sensitivity_specificity_support(y_true, y_pred, average='micro')
    (0.33..., 0.66..., None)
    >>> sensitivity_specificity_support(y_true, y_pred, average='weighted')
    (0.33..., 0.66..., None)
    )Nr   r   r   r   r   zaverage has to be one of r   )        r"   r   z
pos_label=z is not a valid label: zITarget is %s but average='binary'. Please choose another average setting.)Nr   zNote that pos_label (set to z+) is ignored when average != 'binary' (got zE). You may use labels=[pos_label] to specify a single positive class.NTassume_unique
multilabelz$imblearn does not support multilabelr   z{Sample-based precision, recall, fscore is not meaningful outside multilabel classification. See the accuracy_score instead.weights	minlengthr   ignoredivideinvalidr   	predictedr   truer   r   )r   r   Nr   r'   )
ValueErrorstrr   r   lenwarningswarnUserWarningnphstack	setdiff1d
startswithr   fit	transformclasses_asarraybincountzerossizesearchsortedarraysumerrstater   r   )r   r   r   r   r   r   r   average_optionsy_typepresent_labelsn_labelslesorted_labelstptp_binstp_bins_weightstp_sumtrue_sumpred_sumtn_sumindicesr   r   r'   s                           `/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/imblearn/metrics/_classification.pysensitivity_specificity_supportrT   "   s   F FOo%%'X*=*=4s?7K7KKLLL+FF;;FFF"6622N(X..~&&**(=$*$99nn6    [FF24:;   
)	#	# yy'''# 	
 	
 	
 ~v;;R\.&MMMN
 
 && +!?@@@	I		>
 
 	
 ^^
vf%%f%% v*$ j77;OO"Ow<< 	A[CKK  FF
 ,.8CKK+@+@@H@x&v;; 	Y{6=CPVKKXXXHv;; 	Y{6=CPVKKXXXH 8 3f <= /-		1BCCG$G$'6::<<.))8X\\^^,--8X\\^^,--6::<<.)) 
Hh	7	7	7 
 

 "X&
 
 "HmVWh
 

 
 
 
 
 
 
 
 
 
 
 
 
 
 
& *;;==A: 	I		(""c+&6&6!&;&;&;&;jg>>>jg>>>X--s   /N??OO)r   r   r   r   r   r   r   )r   r   r   r   c          	      :    t          | ||||d|          \  }}}|S )a  Compute the sensitivity.

    The sensitivity is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
    of true positives and ``fn`` the number of false negatives. The sensitivity
    quantifies the ability to avoid false negatives.

    The best value is 1 and the worst value is 0.

    Read more in the :ref:`User Guide <sensitivity_specificity>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated targets as returned by a classifier.

    labels : array-like, default=None
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average.

    pos_label : str, int or None, default=1
        The class to report if ``average='binary'`` and the data is binary.
        If ``pos_label is None`` and in binary classification, this function
        returns the average sensitivity if ``average`` is one of ``'weighted'``.
        If the data are multiclass, this will be ignored;
        setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
        scores for that label only.

    average : str, default=None
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:

        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    specificity : float (if `average is None`) or ndarray of             shape (n_unique_labels,)
        The specifcity metric.

    Examples
    --------
    >>> import numpy as np
    >>> from imblearn.metrics import sensitivity_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> sensitivity_score(y_true, y_pred, average='macro')
    0.33...
    >>> sensitivity_score(y_true, y_pred, average='micro')
    0.33...
    >>> sensitivity_score(y_true, y_pred, average='weighted')
    0.33...
    >>> sensitivity_score(y_true, y_pred, average=None)
    array([1., 0., 0.])
    )r   r    rT   )r   r   r   r   r   r   s_s           rS   sensitivity_scorerY   +  ;    N .!#  GAq! H    c          	      :    t          | ||||d|          \  }}}|S )a  Compute the specificity.

    The specificity is the ratio ``tn / (tn + fp)`` where ``tn`` is the number
    of true negatives and ``fp`` the number of false positives. The specificity
    quantifies the ability to avoid false positives.

    The best value is 1 and the worst value is 0.

    Read more in the :ref:`User Guide <sensitivity_specificity>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated targets as returned by a classifier.

    labels : array-like, default=None
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average.

    pos_label : str, int or None, default=1
        The class to report if ``average='binary'`` and the data is binary.
        If ``pos_label is None`` and in binary classification, this function
        returns the average specificity if ``average`` is one of ``'weighted'``.
        If the data are multiclass, this will be ignored;
        setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
        scores for that label only.

    average : str, default=None
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:

        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    specificity : float (if `average is None`) or ndarray of             shape (n_unique_labels,)
        The specificity metric.

    Examples
    --------
    >>> import numpy as np
    >>> from imblearn.metrics import specificity_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> specificity_score(y_true, y_pred, average='macro')
    0.66...
    >>> specificity_score(y_true, y_pred, average='micro')
    0.66...
    >>> specificity_score(y_true, y_pred, average='weighted')
    0.66...
    >>> specificity_score(y_true, y_pred, average=None)
    array([0.75, 0.5 , 0.75])
    )r   r    rV   )r   r   r   r   r   r   rX   rW   s           rS   specificity_scorer]     rZ   r[   >   r   r   r   r   r   
multiclassleft)closed)r   r   r   r   r   r   
correctionr^   r"   )r   r   r   r   ra   c          	         ||dk    r1t          | ||||d|          \  }}}	t          j        ||z            S t          | |          }
||
}d}n:t	          |          }t          j        |t          j        |
|d          g          }t                      }|                    |           |	                    |           } |	                    |          }|j
        }| |k    }| |         }|t          j        |          |         }nd}t	          |          r%t          j        ||t	          |                    }n#t          j        t	          |                    x}}t	          |           r$t          j        | |t	          |                    }t          j        ||d|                   }||         }||         }t          j        dd	          5  t!          ||d
ddd
          }ddd           n# 1 swxY w Y   |||dk    <   t          j        dd	          5  t"          j                            |          }ddd           n# 1 swxY w Y   t)          |t          j        j        j                  rdS |S )u  Compute the geometric mean.

    The geometric mean (G-mean) is the root of the product of class-wise
    sensitivity. This measure tries to maximize the accuracy on each of the
    classes while keeping these accuracies balanced. For binary classification
    G-mean is the squared root of the product of the sensitivity
    and specificity. For multi-class problems it is a higher root of the
    product of sensitivity for each class.

    For compatibility with other imbalance performance measures, G-mean can be
    calculated for each class separately on a one-vs-rest basis when
    ``average != 'multiclass'``.

    The best value is 1 and the worst value is 0. Traditionally if at least one
    class is unrecognized by the classifier, G-mean resolves to zero. To
    alleviate this property, for highly multi-class the sensitivity of
    unrecognized classes can be "corrected" to be a user specified value
    (instead of zero). This option works only if ``average == 'multiclass'``.

    Read more in the :ref:`User Guide <imbalanced_metrics>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated targets as returned by a classifier.

    labels : array-like, default=None
        The set of labels to include when ``average != 'binary'``, and their
        order if ``average is None``. Labels present in the data can be
        excluded, for example to calculate a multiclass average ignoring a
        majority negative class, while labels not present in the data will
        result in 0 components in a macro average.

    pos_label : str, int or None, default=1
        The class to report if ``average='binary'`` and the data is binary.
        If ``pos_label is None`` and in binary classification, this function
        returns the average geometric mean if ``average`` is one of
        ``'weighted'``.
        If the data are multiclass, this will be ignored;
        setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
        scores for that label only.

    average : str or None, default='multiclass'
        If ``None``, the scores for each class are returned. Otherwise, this
        determines the type of averaging performed on the data:

        ``'binary'``:
            Only report results for the class specified by ``pos_label``.
            This is applicable only if targets (``y_{true,pred}``) are binary.
        ``'micro'``:
            Calculate metrics globally by counting the total true positives,
            false negatives and false positives.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'multiclass'``:
            No average is taken.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label). This
            alters 'macro' to account for label imbalance; it can result in an
            F-score that is not between precision and recall.
        ``'samples'``:
            Calculate metrics for each instance, and find their average (only
            meaningful for multilabel classification where this differs from
            :func:`accuracy_score`).

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    correction : float, default=0.0
        Substitutes sensitivity of unrecognized classes from zero to a given
        value.

    Returns
    -------
    geometric_mean : float
        Returns the geometric mean.

    Notes
    -----
    See :ref:`sphx_glr_auto_examples_evaluation_plot_metrics.py`.

    References
    ----------
    .. [1] Kubat, M. and Matwin, S. "Addressing the curse of
       imbalanced training sets: one-sided selection" ICML (1997)

    .. [2] Barandela, R., Sánchez, J. S., Garcıa, V., & Rangel, E. "Strategies
       for learning in class imbalance problems", Pattern Recognition,
       36(3), (2003), pp 849-851.

    Examples
    --------
    >>> from imblearn.metrics import geometric_mean_score
    >>> y_true = [0, 1, 2, 0, 1, 2]
    >>> y_pred = [0, 2, 1, 0, 0, 1]
    >>> geometric_mean_score(y_true, y_pred)
    0.0
    >>> geometric_mean_score(y_true, y_pred, correction=0.001)
    0.010...
    >>> geometric_mean_score(y_true, y_pred, average='macro')
    0.471...
    >>> geometric_mean_score(y_true, y_pred, average='micro')
    0.471...
    >>> geometric_mean_score(y_true, y_pred, average='weighted')
    0.471...
    >>> geometric_mean_score(y_true, y_pred, average=None)
    array([0.866...,  0.       ,  0.       ])
    Nr^   )r   r   r    Tr#   r&   r)   r*   recallr.   r   r"   )rT   r6   sqrtr   r2   r7   r8   r   r:   r;   r<   r=   r>   r?   rA   rD   r   spstatsgmean
isinstancemacoreMaskedConstant)r   r   r   r   r   r   ra   sensperX   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rR   rc   rg   s                         rS   geometric_mean_scorern     s   X '\1153'
 
 
S! wsSy!!!&vv66>#FHH6{{HYnfDQQQR F ^^
vf%%f%% v*$ j77;OO"Ow<< 	6[CKK  FF
 !#V 5 55Hvv;; 	Y{6=CPVKKXXXH /-		1BCCG$[(;;; 	U 	U 8VT8TTF	U 	U 	U 	U 	U 	U 	U 	U 	U 	U 	U 	U 	U 	U 	U(v{[(;;; 	+ 	+HNN6**E	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ eRUZ677 	3s$   G::G>G>$ IIIbooleanalphasquaredg?c                       fd}|S )uD  Balance any scoring function using the index balanced accuracy.

    This factory function wraps scoring function to express it as the
    index balanced accuracy (IBA). You need to use this function to
    decorate any scoring function.

    Only metrics requiring ``y_pred`` can be corrected with the index
    balanced accuracy. ``y_score`` cannot be used since the dominance
    cannot be computed.

    Read more in the :ref:`User Guide <imbalanced_metrics>`.

    Parameters
    ----------
    alpha : float, default=0.1
        Weighting factor.

    squared : bool, default=True
        If ``squared`` is True, then the metric computed will be squared
        before to be weighted.

    Returns
    -------
    iba_scoring_func : callable,
        Returns the scoring metric decorated which will automatically compute
        the index balanced accuracy.

    Notes
    -----
    See :ref:`sphx_glr_auto_examples_evaluation_plot_metrics.py`.

    References
    ----------
    .. [1] García, Vicente, Javier Salvador Sánchez, and Ramón Alberto
       Mollineda. "On the effectiveness of preprocessing methods when dealing
       with different levels of class imbalance." Knowledge-Based Systems 25.1
       (2012): 13-21.

    Examples
    --------
    >>> from imblearn.metrics import geometric_mean_score as gmean
    >>> from imblearn.metrics import make_index_balanced_accuracy as iba
    >>> gmean = iba(alpha=0.1, squared=True)(gmean)
    >>> y_true = [1, 0, 0, 1, 0, 1]
    >>> y_pred = [0, 0, 1, 1, 0, 1]
    >>> print(gmean(y_true, y_pred, average=None))
    [0.44...  0.44...]
    c                 L     t          j                    fd            }|S )Nc                  :   t                    }t          |j                                                  }t          g d          }|                    |          rt          dj         d           |j        | i |                                  j	        i j
        }rt          j        |d          }t          t                    }t          |j                                                  }|                    t          j                                                            }fd|D             }	j        dk    rd|	v r|	d         dk    rd	|	d<   nj        d
k    sj        dk    rd|	d<   t          di |	\  }
}}|
|z
  }d|z  z   |z  S )N)y_scorey_proby2zThe function zh has an unsupported attribute. Metric with`y_pred` are the only supported metrics is the only supported.r   c                 ,    i | ]}|j         |         S  )	arguments).0kargs_scoring_funcs     rS   
<dictcomp>zYmake_index_balanced_accuracy.<locals>.decorate.<locals>.compute_score.<locals>.<dictcomp>4  s$    WWWAa!2!<Q!?WWWr[   rn   r   r^   r   accuracy_scorejaccard_scorer   g      ?rz   )r   set
parameterskeysintersectionAttributeError__name__bindapply_defaultsargskwargsr6   powerrT   r{   )r   r   signature_scoring_funcparams_scoring_funcprohibitied_y_pred_scoresignature_sens_specparams_sens_speccommon_paramsargs_sens_specr   r   rX   	dominancer~   rq   scoring_funcrr   s                 @rS   compute_scorezEmake_index_balanced_accuracy.<locals>.decorate.<locals>.compute_score  s   %.|%<%<""%&<&G&L&L&N&N"O"O "%%@%@%@!A!A!../BCC $"L$9 " " "   !< 6 ;T LV L L,,...!\#4#9V=N=UVVF -&!,,"+,K"L"L"#6#A#F#F#H#HII,99%/446677 M XWWWWWWN$(>>>..%i0L@@4;y1%)999(O;; -5y)*I + + + +'Ka $k1I%)++v55r[   )	functoolswraps)r   r   rq   rr   s   ` rS   decoratez.make_index_balanced_accuracy.<locals>.decorate  sE    		&	&.	6 .	6 .	6 .	6 .	6 .	6 
'	&.	6` r[   rz   )rq   rr   r   s   `` rS   make_index_balanced_accuracyr     s+    l2 2 2 2 2 2h Or[   r4   both)	r   r   r   target_namesr   digitsrq   output_dictzero_divisionF)r   r   r   r   rq   r   r   c                   |t          | |          }nt          j        |          }d}	|d |D             }t          d |D                       }
t          |
t	          |	          |          }g d}d|z  }|dz  }|d                    d	 |D                       z  }|d
z  }dg|z   }|t          |          z  }|d
z  }t          | ||d||          \  }}}}t          | ||d|          }t          | ||d|          } t          |d          t                    } || ||d|          }i }t          |          D ]\  }}i }||         g}t          |dd         ||         ||         ||         ||         ||         ||         g          D ]$\  }}|d                    ||          gz  }|||<   %|||          gz  }||         ||d         <   ||t          |          z  z  }||||         <   |d
z  }|	g}t          |dd         t          j        ||          t          j        ||          t          j        ||          t          j        ||          t          j        ||          t          j        ||          g          D ]'\  }}|d                    ||          gz  }||d| <   (|t          j        |           gz  }||t          |          z  z  }t          j        |          |d<   |r|S |S )al  Build a classification report based on metrics used with imbalanced dataset.

    Specific metrics have been proposed to evaluate the classification
    performed on imbalanced dataset. This report compiles the
    state-of-the-art metrics: precision/recall/specificity, geometric
    mean, and index balanced accuracy of the
    geometric mean.

    Read more in the :ref:`User Guide <classification_report>`.

    Parameters
    ----------
    y_true : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.

    y_pred : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.

    labels : array-like of shape (n_labels,), default=None
        Optional list of label indices to include in the report.

    target_names : list of str of shape (n_labels,), default=None
        Optional display names matching the labels (same order).

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    digits : int, default=2
        Number of digits for formatting output floating point values.
        When ``output_dict`` is ``True``, this will be ignored and the
        returned values will not be rounded.

    alpha : float, default=0.1
        Weighting factor.

    output_dict : bool, default=False
        If True, return output as dict.

        .. versionadded:: 0.8

    zero_division : "warn" or {0, 1}, default="warn"
        Sets the value to return when there is a zero division. If set to
        "warn", this acts as 0, but warnings are also raised.

        .. versionadded:: 0.8

    Returns
    -------
    report : string / dict
        Text summary of the precision, recall, specificity, geometric mean,
        and index balanced accuracy.
        Dictionary returned if output_dict is True. Dictionary has the
        following structure::

            {'label 1': {'pre':0.5,
                         'rec':1.0,
                         ...
                        },
             'label 2': { ... },
              ...
            }

    Examples
    --------
    >>> import numpy as np
    >>> from imblearn.metrics import classification_report_imbalanced
    >>> y_true = [0, 1, 2, 2, 2]
    >>> y_pred = [0, 0, 2, 2, 1]
    >>> target_names = ['class 0', 'class 1', 'class 2']
    >>> print(classification_report_imbalanced(y_true, y_pred,     target_names=target_names))
                       pre       rec       spe        f1       geo       iba       sup
    <BLANKLINE>
        class 0       0.50      1.00      0.75      0.67      0.87      0.77         1
        class 1       0.00      0.00      0.75      0.00      0.00      0.00         1
        class 2       1.00      0.67      1.00      0.80      0.82      0.64         3
    <BLANKLINE>
    avg / total       0.70      0.60      0.90      0.61      0.66      0.54         5
    <BLANKLINE>
    Nzavg / totalc                     g | ]}| S rz   rz   )r|   labels     rS   
<listcomp>z4classification_report_imbalanced.<locals>.<listcomp>  s    777u5
777r[   c              3   4   K   | ]}t          |          V  d S )N)r2   )r|   cns     rS   	<genexpr>z3classification_report_imbalanced.<locals>.<genexpr>  s(      44SWW444444r[   )prerecrm   f1geoibasupz%% %dsz   c                     g | ]}d S )z% 9srz   )r|   rX   s     rS   r   z4classification_report_imbalanced.<locals>.<listcomp>  s    ---V---r[   
 )r   r   r   r   )r   r   r   Trp   r   z
{0:0.{1}f}r/   avg_total_support)r   r6   r=   maxr2   jointupler   r]   rn   r   	enumeratezipformatr   rC   )r   r   r   r   r   r   rq   r   r   last_line_heading
name_widthwidthheadersfmtreport	precisionrc   r   supportr   geo_mean	iba_gmeanr   report_dictir   report_dict_labelvalues
score_namescore_values                                 rS    classification_report_imbalancedr   N  s   f ~vv..F##%7777744|44444J
C 122F;;E>>>G
U
C4KC388--W---...C4KCdWnG5>>!F
dNF &E##& & &"Ivr7 $#  K $#  H H,5$GGG I )#  C Kf%% 9 95q/"'*AbDM!q	A1A
(
 
(
 	8 	8#J |**;??@@F,7j))gaj?##)0'"+&#f%%'8LO$$
dNF   F#&"Jy'222Jvw///J{G444Jr7+++Jx111JsG,,,	

$ 
$ 7 7
K 	<&&{F;;<<+6':''((
"&//#$$F
cE&MM!!F#%6'??K  Mr[   )r   r   r   r   c          	         t          | |          \  }} }|t          |          }nt          j        | j                  }t          | ||           t          | |          }g }|D ]Q}t          j        | |k              }|                    t          | |         ||         ||                              Rt          j
        |          t          |          z  S )ao  Compute Macro-Averaged MAE for imbalanced ordinal classification.

    This function computes each MAE for each class and average them,
    giving an equal weight to each class.

    Read more in the :ref:`User Guide <macro_averaged_mean_absolute_error>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated targets as returned by a classifier.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float or ndarray of floats
        Macro-Averaged MAE output is non-negative floating point.
        The best value is 0.0.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.metrics import mean_absolute_error
    >>> from imblearn.metrics import macro_averaged_mean_absolute_error
    >>> y_true_balanced = [1, 1, 2, 2]
    >>> y_true_imbalanced = [1, 2, 2, 2]
    >>> y_pred = [1, 2, 1, 2]
    >>> mean_absolute_error(y_true_balanced, y_pred)
    0.5
    >>> mean_absolute_error(y_true_imbalanced, y_pred)
    0.25
    >>> macro_averaged_mean_absolute_error(y_true_balanced, y_pred)
    0.5
    >>> macro_averaged_mean_absolute_error(y_true_imbalanced, y_pred)
    0.16...
    Nr   )r   r   r6   onesshaper   r   flatnonzeroappendr   rC   r2   )r   r   r   rX   r   maepossible_classrR   s           rS   "macro_averaged_mean_absolute_errorr   .  s    h 'vv66Avv $]33--FFM:::66**F
C  	
 	
.>!9::

ww+G4  	
 	
 	
 	
 6#;;S!!r[   )&__doc__r   numbersr3   inspectr   numpyr6   scipyre   sklearn.metricsr   r   sklearn.metrics._classificationr   r   sklearn.preprocessingr   sklearn.utils._param_validationr	   r
   sklearn.utils.multiclassr   sklearn.utils.validationr   r   utils._sklearn_compatr   r1   IntegralrT   rY   r]   Realrn   r   r   r   rz   r[   rS   <module>r      s>  	 	                     P P P P P P P P G G G G G G G G . . . . . . @ @ @ @ @ @ @ @ 2 2 2 2 2 2 J J J J J J J J 3 3 3 3 3 3 ..&7+T2JJJJKK
 "N&-  #'  & +w. w. w. w. w.t ..&7+T2JJJJKK
 '-
 
 #'  $ c c c c cL ..&7+T2JJJJKK
 '-
 
 #'  $ c c c c cL ..&7+T2JQQQ 
 '-xafEEEF  #'  * z z z z# "zz |n)55"&   +.t f f f f	 fR ..&%t,&-8G,afEEEF,!{Jx  HW%q!F;;;
  #'  * 
L L L L# "L^ ..&- 
 #'   IM ?" ?" ?" ?" ?" ?" ?"r[   