
    0Php                     n   d dl Z d dlmZmZ d dlmZmZ d dlZd dl	m
Z ddlmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZmZ dd	lmZmZ dd
lmZ ddlm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z- ddl+m.Z/ ddl+m0Z1 g dZ2d Z3 G d dee          Z4 G d dee4e          Z5d Z6	 	 	 	 	 ddZ7dS )    N)ABCMetaabstractmethod)IntegralReal   )BaseEstimatorClassifierMixin_fit_context)ConvergenceWarningNotFittedError)LabelEncoder)check_arraycheck_random_statecolumn_or_1dcompute_class_weight)Interval
StrOptions)safe_sparse_dot)available_if)_ovr_decision_functioncheck_classification_targets)_check_large_sparse_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fittedvalidate_data   )
_liblinear)_libsvm)_libsvm_sparse)c_svcnu_svc	one_classepsilon_svrnu_svrc           	         | j         d         dz   }g }t          j        t          j        dg|g                    }t	          |          D ]}|||         ||dz            ddf         }t	          |dz   |          D ]}|||         ||dz            ddf         }	| |dz
  ||         ||dz            f         }
| |||         ||dz            f         }|                    t          |
|          t          ||	          z              |S )zGenerate primal coefficients from dual coefficients
    for the one-vs-one multi class LibSVM in the case
    of a linear kernel.r   r   N)shapenpcumsumhstackrangeappendr   )	dual_coef	n_supportsupport_vectorsn_classcoefsv_locsclass1sv1class2sv2alpha1alpha2s               Q/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/sklearn/svm/_base.py_one_vs_one_coefr;   %   sA    oa 1$G Di	A3	"23344G.. U Ugfo
0CCQQQFGFQJ00 
	U 
	UF!'&/GFQJ4G"G"JKC vz76?WVaZ=P+PPQFvwv!9L'LLMF KK44vs7S7SSTTTT
	U K    c                   f    e Zd ZU dZ eh d          eg eeddd          g eddh           eed	dd          g eeddd
          g eed	dd
          g eed	dd          g eed	dd          g eed	dd          gdgdg eeddd
          g edh          e	dgdg eeddd          gdgdZ
e	ed<   g dZed             Z fdZ ed          d)d            Zd Zd Zd Zd Zd Zd Zd  Zd! Zd" Zd# Zd$ Zd% Zed&             Zd' Z ed(             Z! xZ"S )*
BaseLibSVMzBase class for estimators that use libsvm as backing library.

    This implements support vector machine classification and regression.

    Parameter documentation is in the derived `SVC` class.
    >   rbfpolylinearsigmoidprecomputedr   Nleft)closedscaleauto        neitherright      ?booleanbalancedverboserandom_statekerneldegreegammacoef0tolCnuepsilon	shrinkingprobability
cache_sizeclass_weightrN   max_iterrP   _parameter_constraints)rA   r@   r?   rB   rC   c                 4   | j         t          vr t          dt          d| j         d          || _        || _        || _        || _        || _        || _        || _	        || _
        |	| _        |
| _        || _        || _        || _        || _        || _        d S )Nzimpl should be one of z, z
 was given)_implLIBSVM_IMPL
ValueErrorrR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   rN   r^   rP   )selfrR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   rN   r^   rP   s                   r:   __init__zBaseLibSVM.__init__j   s    & :[((*<GKKT   

"&$( (r<   c                     t                                                      }| j        dk    |j        _        | j        dk    |j        _        |S NrC   )super__sklearn_tags__rR   
input_tagspairwisesparserd   tags	__class__s     r:   ri   zBaseLibSVM.__sklearn_tags__   s?    ww''))#';-#? !%!=r<   T)prefer_skip_nested_validationc           	      (	   t          | j                  }t          j        |          }|r| j        dk    rt          d          |ot          | j                   | _        t          | j                  rt          ||           n#t          | ||t          j        ddd          \  }}|                     |          }t          j        |g n|t          j                  }t                              | j                  }t#          |          }|d	k    r5||j        d
         k    r$t'          dd|d|j        d
         dz             | j        dk    rJ||j        d         k    r9t'          d                    |j        d
         |j        d                             |j        d
         d
k    r1|j        d
         |k    r t'          d|j        d|j        d          t          | j                  rdn| j        }|dk    rd| _        nt-          | j        t0                    r| j        dk    rv|r?|                    |                                          |                                d	z  z
  n|                                }	|	d
k    rd|j        d         |	z  z  nd| _        nG| j        dk    rd|j        d         z  | _        n&t-          | j        t8                    r| j        | _        | j        r| j        n| j        }
| j        rtA          dd           |!                    t          j"        d          j#                  } |
||||||           tI          |d          r|j        n|f| _%        | j&        '                                | _(        | j)        | _*        | j        dv r5tW          | j,                  d	k    r| xj&        dz  c_&        | j)         | _)        | j        r| j*        j-        n| j*        }t          j.        | j(                  /                                }t          j.        |          /                                }|r|st'          d           | j        dv r| j0        | _1        n| j0        2                                | _1        | S )!a  Fit the SVM model according to the given training data.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)                 or (n_samples, n_samples)
            Training vectors, where `n_samples` is the number of samples
            and `n_features` is the number of features.
            For kernel="precomputed", the expected shape of X is
            (n_samples, n_samples).

        y : array-like of shape (n_samples,)
            Target values (class labels in classification, real numbers in
            regression).

        sample_weight : array-like of shape (n_samples,), default=None
            Per-sample weights. Rescale C per sample. Higher weights
            force the classifier to put more emphasis on these points.

        Returns
        -------
        self : object
            Fitted estimator.

        Notes
        -----
        If X and y are not C-ordered and contiguous arrays of np.float64 and
        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.

        If X is a dense array, then the other methods will not support sparse
        matrices as input.
        rC   z-Sparse precomputed kernels are not supported.rW   csrF)dtypeorderaccept_sparseaccept_large_sparseNrs   r   r   z"X and y have incompatible shapes.
zX has z samples, but y has .r   zDPrecomputed matrix must be a square matrix. Input is a {}x{} matrix.z.sample_weight and X have incompatible shapes: z vs zT
Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).rH   rF   rK   rG   z[LibSVM] endi)random_seedr(   r"   r#   rO   zxThe dual coefficients or intercepts are not finite. The input data may contain large values and need to be preprocessed.)3r   rP   spissparserR   	TypeErrorcallable_sparser   r   r)   float64_validate_targetsasarrayrb   indexra   r   r(   rc   format_gamma
isinstancerT   strmultiplymeanvarr   _sparse_fit
_dense_fitrN   printrandintiinfomaxhasattr
shape_fit_
intercept_copy_intercept_
dual_coef__dual_coef_lenclasses_dataisfiniteall	_num_itern_iter_item)rd   Xysample_weightrndrl   solver_type	n_samplesrR   X_varfitseedr.   intercept_finitenessdual_coef_finitenesss                  r:   r   zBaseLibSVM.fit   s   D !!233Q 	Mdk]22KLLL;ht{&;&;";DK   	#Aq)))) j#$)  DAq ""1%%
'BB]"*
 
 
 "''
33 !OO	!	QWQZ 7 7557@yy!'!***MN  
 ;-''I,C,C,,2F171:qwqz,J,J  
 q!A%%-*=a*@I*M*M*
 !&&&	1   #+4;"7"7HT[]"" DKK
C(( 	%zW$$DJWA,,..!&&((q@@PQPUPUPWPW<AQJJcQWQZ%%788Cv%%!AGAJ.
D)) 	%*DK"&,CdDO< 	&*"%%%%{{28C==,--Aq-f$GGGG &-Q%8%8J!''yl
  ?//11?:,,,T]1C1Cq1H1HOOr!OO#.DO-1\OD$))t?O	!{4+;<<@@BB!{95599;;$ 	)= 	!   :,,,>DLL>..00DLr<   c                 b    t          |d                              t          j        d          S )zxValidation of y and class_weight.

        Default implementation for SVR and one-class; overridden in BaseSVC.
        TwarnF)r   )r   astyper)   r   )rd   r   s     r:   r   zBaseLibSVM._validate_targets'  s,    
 AD)))00%0HHHr<   c                 z    | j         dv sJ | j         dk    r$t          j        d| j        z  t                     d S d S )Nr   r   r   znSolver terminated early (max_iter=%i).  Consider pre-processing your data with StandardScaler or MinMaxScaler.)fit_status_warningsr   r^   r   rd   s    r:   _warn_from_fit_statusz BaseLibSVM._warn_from_fit_status.  s_    6))))q  M359]C #	     ! r<   c                    t          | j                  rG|| _        |                     |          }|j        d         |j        d         k    rt          d          t          j        | j                   t          j	        ||fi d|d|dt          | dt          j        d                    d|d	| j        d
| j        d| j        d| j        d| j        d| j        d| j        d| j        d| j        d| j        d| j        d|\	  | _        | _        | _        | _        | _        | _        | _        | _        | _         | !                                 d S )Nr   r   z(X.shape[0] should be equal to X.shape[1]svm_typer   r]   class_weight_rR   rW   rX   r[   rS   rZ   rV   r\   rU   rT   rY   r^   r}   )"r   rR   _BaseLibSVM__Xfit_compute_kernelr(   rc   libsvmset_verbosity_wraprN   r   getattrr)   emptyrW   rX   r[   rS   rZ   rV   r\   rU   r   rY   r^   support_support_vectors_
_n_supportr   r   _probA_probBr   r   r   )rd   r   r   r   r   rR   r}   s          r:   r   zBaseLibSVM._dense_fit8  s   DK   	M DK$$Q''AwqzQWQZ'' !KLLL!$,/// J
 
 
 ![
 (-	

 !DDD
 6
 ff
 ww
 ((
 ;;
 nn
 
 
 **
 ++
  LL!
" ]]#
$ $%

	
M!OOOKKN, 	""$$$$$r<   c                 h   t          j        |j        t           j        d          |_        |                                 | j                            |          }t          j        | j	                   t          j
        |j        d         |j        |j        |j        |||| j        | j        | j        | j        | j        t'          | dt          j        d                    || j        | j        | j        t1          | j                  t1          | j                  | j        |          \	  | _        | _        }| _        | _        | _         | _!        | _"        | _#        | $                                 tK          | d          rtM          | j'                  dz
  }	nd}	| j        j        d         }
t          j(        t          j)        |
          |	          }|
stU          j+        g           | _,        d S t          j)        d|j-        dz   |j-        |	z            }tU          j+        |||f|	|
f          | _,        d S )NrW   rs   rt   r   r   r   r   ).r)   r   r   r   sort_indices_sparse_kernelsr   libsvm_sparser   rN   libsvm_sparse_trainr(   indicesindptrrS   r   rU   rV   rW   r   r   rX   r\   rY   intrZ   r[   r^   r   r   r   r   r   r   r   r   r   r   r   r   tilearanger   
csr_matrixr   size)rd   r   r   r   r   rR   r}   kernel_typedual_coef_datar1   n_SVdual_coef_indicesdual_coef_indptrs                r:   r   zBaseLibSVM._sparse_fitg  s    AF"*C@@@	*0088(666 -GAJFIHKKJHFD/28A;;77GOL !!M+
 

	
M!OOKKN2 	""$$$4$$ 	$-((1,GGG$*1-GBIdOOW== 	 mB//DOOO!y$)A-/@/E/O    !m!24DEQU DOOOr<   c                 l    |                      |          }| j        r| j        n| j        } ||          S )a  Perform regression on samples in X.

        For an one-class model, +1 (inlier) or -1 (outlier) is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            The predicted values.
        )_validate_for_predictr   _sparse_predict_dense_predict)rd   r   predicts      r:   r   zBaseLibSVM.predict  s<      &&q))*.,O$&&D<Owqzzr<   c                    |                      |          }|j        dk    rt          |dd          }| j        }t	          | j                  rHd}|j        d         | j        d         k    r*t          d|j        d         | j        d         fz            t          	                    | j
                  }t          j        || j        | j        | j        | j        | j        | j        | j        ||| j        | j        | j        | j                  S )	Nr   rW   F)rt   rv   rC   r   MX.shape[1] = %d should be equal to %d, the number of samples at training time)r   rR   rS   rU   rT   r\   )r   ndimr   rR   r   r(   r   rc   rb   r   ra   r   r   r   r   r   r   r   r   r   rS   rU   r   r\   )rd   r   rR   r   s       r:   r   zBaseLibSVM._dense_predict  s     ##6Q;;ASeDDDADK   	"FwqzT_Q/// =wqz4?1#567   $$TZ00~M!OKK;*+
 
 
 	
r<   c                     | j         }t          |          rd}| j                            |          }d}t	          j        |j        |j        |j        | j	        j        | j	        j        | j	        j        | j
        j        | j        t                              | j                  || j        | j        | j        | j        |t%          | dt'          j        d                    | j        | j        | j        | j        | j        | j        | j                  S )NrC   rH   r   r   )rR   r   r   r   r   libsvm_sparse_predictr   r   r   r   r   r   rb   ra   rS   r   rU   rV   r   r)   r   rX   rY   rZ   r[   r   r   r   )rd   r   rR   r   rW   s        r:   r   zBaseLibSVM._sparse_predict  s    F 	#"F*00882FIH!&!)!(!dj))KKJHD/28A;;77GLNOKK/
 
 	
r<   c                     t          | j                  rd|                     || j                  }t          j        |          r|                                }t          j        |t          j        d          }|S )z0Return the data transformed by a callable kernelrW   r   )	r   rR   r   r   r   toarrayr)   r   r   rd   r   rR   s      r:   r   zBaseLibSVM._compute_kernel  sj    DK   	@ [[DK00F{6"" *))
63???Ar<   c                 *   |                      |          }|                     |          }| j        r|                     |          }n|                     |          }| j        dv r-t          | j                  dk    r|                                 S |S )af  Evaluates the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)

        Returns
        -------
        X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
        r~   r   )	r   r   r   _sparse_decision_function_dense_decision_functionra   r   r   ravel)rd   r   dec_funcs      r:   _decision_functionzBaseLibSVM._decision_function	  s     &&q))  ##< 	855a88HH44Q77H :,,,T]1C1Cq1H1HNN$$$$r<   c                 X   t          |t          j        dd          }| j        }t	          |          rd}t          j        || j        | j        | j	        | j
        | j        | j        | j        t                              | j                  || j        | j        | j        | j                  S )NrW   F)rs   rt   rv   rC   r   rR   rS   r\   rU   rT   )r   r)   r   rR   r   r   decision_functionr   r   r   r   r   r   r   rb   r   ra   rS   r\   rU   r   r   s      r:   r   z#BaseLibSVM._dense_decision_function'  s    3ERRRF 	#"F'M!OKK &&tz22;*+
 
 
 	
r<   c                 ~   t          j        |j        t           j        d          |_        | j        }t          |d          rd}| j                            |          }t          j	        |j        |j
        |j        | j        j        | j        j
        | j        j        | j        j        | j        t                              | j                  || j        | j        | j        | j        | j        t-          | dt          j        d                    | j        | j        | j        | j        | j        | j        | j                  S )NrW   r   __call__rC   r   r   )r)   r   r   r   rR   r   r   r   r   libsvm_sparse_decision_functionr   r   r   r   r   rb   ra   rS   r   rU   rV   rW   r   r   rX   rY   rZ   r[   r   r   r   rd   r   rR   r   s       r:   r   z$BaseLibSVM._sparse_decision_function?  s
   AF"*C@@@6:&& 	#"F*0088<FIH!&!)!(!dj))KKJHFD/28A;;77GLNOKK/
 
 	
r<   c           	      $   t          |            t          | j                  s t          | |dt          j        ddd          }| j        r(t          j        |          st          j	        |          }| j        r|
                                 t          j        |          r?| j        s8t          | j                  s$t          dt          |           j        z            | j        dk    rF|j        d         | j        d         k    r*t          d	|j        d         | j        d         fz            | j        }| j        sP|j        dk    rE| j                                        |j        d         k    rt          d
| j        j         d          |S )Nrr   rW   F)ru   rs   rt   rv   resetz3cannot use sparse input in %r trained on dense datarC   r   r   r   zThe internal representation of z was altered)r   r   rR   r   r)   r   r   r   r   r   r   rc   type__name__r(   r   r   r   
n_support_sumro   )rd   r   svs      r:   r   z BaseLibSVM._validate_for_predictb  s   $$ 		#j$)  A < 	!A 	!a  A< 	NN;q>> 	$, 	x7L7L 	Et**%&  
 ;-''wqzT_Q/// =wqz4?1#567   "| 	!0C0C0E0ERS0T0TW$.2IWWW   r<   c                     | j         dk    rt          d          |                                 }t          j        |          rd|j        j        _        nd|j        _        |S )zWeights assigned to the features when `kernel="linear"`.

        Returns
        -------
        ndarray of shape (n_features, n_classes)
        rA   z2coef_ is only available when using a linear kernelF)rR   AttributeError	_get_coefr   r   r   flags	writeablerd   r2   s     r:   coef_zBaseLibSVM.coef_  sc     ;("" !UVVV~~ ;t 	)(-DIO%% $)DJ r<   c                 6    t          | j        | j                  S N)r   r   r   r   s    r:   r   zBaseLibSVM._get_coef  s    t/1FGGGr<   c                     	 t          |            n# t          $ r t          w xY wt                              | j                  }|dv r| j        S t          j        | j        d         g          S )z)Number of support vectors for each class.r   r   )	r   r   r   rb   r   ra   r   r)   array)rd   r   s     r:   r   zBaseLibSVM.n_support_  s    	!D!!!! 	! 	! 	!  	! $$TZ00v?" 8T_Q/0111s    $r  )#r   
__module____qualname____doc__r   r   r   r   r   dictr_   __annotations__r   r   re   ri   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   propertyr   r   r   __classcell__ro   s   @r:   r>   r>   E   s          JJJJKK
 8Haf===>J())HT3V444
 (4tI>>>?sD;;;<htS$w7778xc3w7778HT3V<<<=[!{xai@@@A#ZL114>;XhD@@@A'(+$ $D   6 JIIO%) %) ^%)N     \555K K K 65KZI I I  -% -% -%^; ; ;z  ( 
  
  
D"
 "
 "
H	 	 	  <
 
 
0!
 !
 !
F' ' 'R   X,H H H 2 2 X2 2 2 2 2r<   r>   )	metaclassc                   \    e Zd ZU dZi ej         eddh          gdgdZeed<   dD ]Z	e
                    e	           e fd            Zd	 Zd
 Z fdZd Z ee          d             Z ee          d             Zd Zd Zd Zed             Zed             Z fdZ xZS )BaseSVCz!ABC for LibSVM-based classifiers.ovrovorL   )decision_function_shape
break_tiesr_   )rY   rX   c                     || _         || _        t                                          |||||||d||	|
||||           d S )NrH   rQ   )r  r  rh   re   )rd   rR   rS   rT   rU   rV   rW   rX   rZ   r[   r\   r]   rN   r^   r  rP   r  ro   s                    r:   re   zBaseSVC.__init__  sl    ( (?$$#!%% 	 	
 	
 	
 	
 	
r<   c                 `   t          |d          }t          |           t          j        |d          \  }}t	          | j        ||          | _        t          |          dk     rt          dt          |          z            || _	        t          j
        |t          j        d          S )	NTr   )return_inverseclassesr   r   z>The number of classes has to be greater than one; got %d classrW   r   )r   r   r)   uniquer   r]   r   r   rc   r   r   r   )rd   r   y_clss       r:   r   zBaseSVC._validate_targets  s    !$'''$Q'''2d333Q1$2CSTVWWWs88a<<Pc((  
 z!2:S9999r<   c                     |                      |          }| j        dk    r@t          | j                  dk    r(t	          |dk     | t          | j                            S |S )a4  Evaluate the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The input samples.

        Returns
        -------
        X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
            If decision_function_shape='ovr', the shape is (n_samples,
            n_classes).

        Notes
        -----
        If decision_function_shape='ovo', the function values are proportional
        to the distance of the samples X to the separating hyperplane. If the
        exact distances are required, divide the function values by the norm of
        the weight vector (``coef_``). See also `this question
        <https://stats.stackexchange.com/questions/14876/
        interpreting-distance-from-hyperplane-in-svm>`_ for further details.
        If decision_function_shape='ovr', the decision function is a monotonic
        transformation of ovo decision function.
        r  r   r   )r   r  r   r   r   )rd   r   decs      r:   r   zBaseSVC.decision_function  s`    6 %%a(('500S5G5G!5K5K)#'C4T]9K9KLLL
r<   c                    t          |            | j        r| j        dk    rt          d          | j        rM| j        dk    rBt	          | j                  dk    r*t          j        |                     |          d          }n!t                      
                    |          }| j                            t          j        |t          j                            S )a  Perform classification on samples in X.

        For an one-class model, +1 or -1 is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            Class labels for samples in X.
        r  z>break_ties must be False when decision_function_shape is 'ovo'r  r   r   )axisrw   )r   r  r  rc   r   r   r)   argmaxr   rh   r   taker   intp)rd   r   r   ro   s      r:   r   zBaseSVC.predict  s    " 	? 	t;uDDP  
 O	#,55DM""Q&&	$0033!<<<AA""A}!!"*Qbg">">">???r<   c                 b    | j         st          d          | j        dvrt          d          dS )Nz5predict_proba is not available when probability=Falser~   z0predict_proba only implemented for SVC and NuSVCT)r[   r   ra   r   s    r:   _check_probazBaseSVC._check_proba=  sF     	 G   :000 !STTTtr<   c                     |                      |          }| j        j        dk    s| j        j        dk    rt	          d          | j        r| j        n| j        } ||          S )a  Compute probabilities of possible outcomes for samples in X.

        The model needs to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the probability of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        r   zApredict_proba is not available when fitted with probability=False)r   probA_r   probB_r   r   _sparse_predict_proba_dense_predict_proba)rd   r   
pred_probas      r:   predict_probazBaseSVC.predict_probaF  sw    6 &&q));q  DK$4$9$9 S   +/,UD&&D<U 	 z!}}r<   c                 P    t          j        |                     |                    S )a  Compute log probabilities of possible outcomes for samples in X.

        The model need to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the log-probabilities of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        )r)   logr+  )rd   r   s     r:   predict_log_probazBaseSVC.predict_log_probak  s"    8 vd((++,,,r<   c                 P   |                      |          }| j        }t          |          rd}t                              | j                  }t          j        || j        | j	        | j
        | j        | j        | j        | j        ||| j        | j        | j        | j                  }|S )NrC   r   )r   rR   r   rb   r   ra   r   r+  r   r   r   r   r   r   r   rS   r\   rU   r   )rd   r   rR   r   pprobs        r:   r)  zBaseSVC._dense_predict_proba  s      ##F 	#"F$$TZ00$M!OKK;*+
 
 
" r<   c                 |   t          j        |j        t           j        d          |_        | j        }t          |          rd}| j                            |          }t          j	        |j        |j
        |j        | j        j        | j        j
        | j        j        | j        j        | j        t                              | j                  || j        | j        | j        | j        | j        t-          | dt          j        d                    | j        | j        | j        | j        | j        | j        | j                  S )NrW   r   rC   r   r   )r)   r   r   r   rR   r   r   r   r   libsvm_sparse_predict_probar   r   r   r   r   rb   ra   rS   r   rU   rV   rW   r   r   rX   rY   rZ   r[   r   r   r   r   s       r:   r(  zBaseSVC._sparse_predict_proba  s   AF"*C@@@F 	#"F*00888FIH!&!)!(!dj))KKJHFD/28A;;77GLNOKK/
 
 	
r<   c                 R   | j         j        d         dk    rt          | j         | j                  }nut	          | j         | j        | j                  }t          j        |d                   r't          j        |          	                                }nt          j        |          }|S )Nr   r   )r   r(   r   r   r;   r   r   r   vstacktocsrr)   r   s     r:   r   zBaseSVC._get_coef  s    ? #q(("4?D4IJJDD $$2G D {47## 'y,,..yr<   c                     | j         S zParameter learned in Platt scaling when `probability=True`.

        Returns
        -------
        ndarray of shape  (n_classes * (n_classes - 1) / 2)
        )r   r   s    r:   r&  zBaseSVC.probA_       {r<   c                     | j         S r7  )r   r   s    r:   r'  zBaseSVC.probB_  r8  r<   c                 r    t                                                      }| j        dk    |j        _        |S rg   )rh   ri   rR   rj   rl   rm   s     r:   ri   zBaseSVC.__sklearn_tags__  s.    ww''))!%!=r<   )r   r  r  r  r>   r_   r   r  r	  unused_parampopr   re   r   r   r   r$  r   r+  r.  r)  r(  r   r
  r&  r'  ri   r  r  s   @r:   r  r    s        ++$

+$$.Ju~$>$>#? k$ $ $D   
 * 1 1""<0000%
 %
 %
 %
 ^%
N: : :  @@ @ @ @ @J   \," "  "H \,- -  -:  6!
 !
 !
F      X   X        r<   r  c           
         ddiddddddd	iidd
idddddddiiddddidd}| dk    r||          S | dk    rt          d| z            |                    |d          }|d|z  }nH|                    |d          }|
d|d|d}n&|                    |d          }|d|d|d|}n|S t          d|d|d|d|          )a  Find the liblinear magic number for the solver.

    This number depends on the values of the following attributes:
      - multi_class
      - penalty
      - loss
      - dual

    The same number is also internally used by LibLinear to determine
    which solver to use.
    F   r      )FT)l1l2rA  T      r   r               )logistic_regressionhingesquared_hingeepsilon_insensitivesquared_epsilon_insensitivecrammer_singerrM  r  z<`multi_class` must be one of `ovr`, `crammer_singer`, got %rNzloss='%s' is not supportedzThe combination of penalty='z' and loss='z' is not supportedz' are not supported when dual=zUnsupported set of arguments: z, Parameters: penalty=z, loss=z, dual=)rc   get)	multi_classpenaltylossdual_solver_type_dict_solver_penerror_string_solver_dual
solver_nums	            r:   _get_liblinear_solver_typerX    s   " (-aj8K8KLLq	"!&
!12E2EFF $tRj1(,b.C.C'D  &&& --			J[X
 
 	
 $''d33K3d:"w55 77DDD" L
 &))$55J!! CJ''444QUQUW 
 "!
*<<$$$	.  r<   r  rH  皙?c                    |dvrht                      }|                    |          }|j        }t          |          dk     rt	          d|d         z            t          |||          }n"t          j        dt          j                  }|}t          j
        |           t          |          }|rt          dd	           d
}|r|dk    rt	          d|z            |}t          j
        |           t          j
        |           t          j
        |           t          j        |           rt#          |            t          j        |t          j                                                  }t          j        |d          }t+          || t          j                  }t-          ||||          }t          j        | |t          j        |           ||
||||	|                    t          j        d          j                  ||          \  }}t5          |          }||	k    rt7          j        dt:                     |r|ddddf         }||dddf         z  }n|}d}|||fS )a  Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.

    Preprocessing is done in this function before supplying it to liblinear.

    Parameters
    ----------
    X : {array-like, sparse matrix} of shape (n_samples, n_features)
        Training vector, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    y : array-like of shape (n_samples,)
        Target vector relative to X

    C : float
        Inverse of cross-validation parameter. The lower the C, the higher
        the penalization.

    fit_intercept : bool
        Whether or not to fit an intercept. If set to True, the feature vector
        is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
        1 corresponds to the intercept. If set to False, no intercept will be
        used in calculations (i.e. data is expected to be already centered).

    intercept_scaling : float
        Liblinear internally penalizes the intercept, treating it like any
        other term in the feature vector. To reduce the impact of the
        regularization on the intercept, the `intercept_scaling` parameter can
        be set to a value greater than 1; the higher the value of
        `intercept_scaling`, the lower the impact of regularization on it.
        Then, the weights become `[w_x_1, ..., w_x_n,
        w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
        the feature weights and the intercept weight is scaled by
        `intercept_scaling`. This scaling allows the intercept term to have a
        different regularization behavior compared to the other features.

    class_weight : dict or 'balanced', default=None
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one. For
        multi-output problems, a list of dicts can be provided in the same
        order as the columns of y.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

    penalty : {'l1', 'l2'}
        The norm of the penalty used in regularization.

    dual : bool
        Dual or primal formulation,

    verbose : int
        Set verbose to any positive number for verbosity.

    max_iter : int
        Number of iterations.

    tol : float
        Stopping condition.

    random_state : int, RandomState instance or None, default=None
        Controls the pseudo random number generation for shuffling the data.
        Pass an int for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.

    multi_class : {'ovr', 'crammer_singer'}, default='ovr'
        `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
        optimizes a joint objective over all classes.
        While `crammer_singer` is interesting from an theoretical perspective
        as it is consistent it is seldom used in practice and rarely leads to
        better accuracy and is more expensive to compute.
        If `crammer_singer` is chosen, the options loss, penalty and dual will
        be ignored.

    loss : {'logistic_regression', 'hinge', 'squared_hinge',             'epsilon_insensitive', 'squared_epsilon_insensitive},             default='logistic_regression'
        The loss function used to fit the model.

    epsilon : float, default=0.1
        Epsilon parameter in the epsilon-insensitive loss function. Note
        that the value of this parameter depends on the scale of the target
        variable y. If unsure, set epsilon=0.

    sample_weight : array-like of shape (n_samples,), default=None
        Weights assigned to each sample.

    Returns
    -------
    coef_ : ndarray of shape (n_features, n_features + 1)
        The coefficient vector got by minimizing the objective function.

    intercept_ : float
        The intercept term added to the vector.

    n_iter_ : array of int
        Number of iterations run across for each class.
    )rK  rL  r   zeThis solver needs samples of at least 2 classes in the data, but the data contains only one class: %rr   r  rw   z[LibLinear]ry   rz   g      zqIntercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.W)requirementsr|   z@Liblinear failed to converge, increase the number of iterations.NrO   rH   )r   fit_transformr   r   rc   r   r)   r   r   	liblinearr   r   r   r   r   r   r   r   r   r   requirer   rX  
train_wrapr   r   r   r   r   r   )r   r   rW   fit_interceptintercept_scalingr]   rP  rR  rN   r^   rV   rP   rO  rQ  rY   r   ency_indr   r   r   biasr   	raw_coef_r   
n_iter_maxr   r   s                               r:   _fit_liblinearrh  *  s   h IIInn!!!$$<x==1'{+   -\8qQQQ"*555 )))
\
*
*C %m$$$$ D %!!,.?@   %D
g&&&$W--- ))) 
{1~~ A JuBJ///5577EJu3///E(LLLM,['4NNK"-	
A	BHSMM%&& Iw$ WJXN	
 	
 	

  !!!SbS&!&111b5)99


*g%%r<   )Nr  rH  rY  N)8r   abcr   r   numbersr   r   numpyr)   scipy.sparserl   r   baser   r	   r
   
exceptionsr   r   preprocessingr   utilsr   r   r   r   utils._param_validationr   r   utils.extmathr   utils.metaestimatorsr   utils.multiclassr   r   utils.validationr   r   r   r   r   r   ry   r   r^  r    r   r!   r   rb   r;   r>   r  rX  rh   r<   r:   <module>rw     s    ' ' ' ' ' ' ' ' " " " " " " " "           ? ? ? ? ? ? ? ? ? ? ; ; ; ; ; ; ; ; ( ( ( ( ( ( W W W W W W W W W W W W : : : : : : : : + + + + + + / / / / / / S S S S S S S S                & % % % % %        - - - - - -GGG  @n	2 n	2 n	2 n	2 n	2' n	2 n	2 n	2 n	2bx x x x xozW x x x xv	6 6 6J 	!C& C& C& C& C& C&r<   