
    ^Mhq              	          d Z ddlZddlZddlmZ ddlmZ ddlm	Z	m
Z
mZmZmZ ddlmZmZ dd	lmZ dd
lmZ d Zej        d             Zd Zd ZddZddddej         ej        fddddf	dZd Zd Zd Z ej         ej        fddfdZ!dS )z'Routines for numerical differentiation.    N)norm)LinearOperator   )issparse
csc_matrix
csr_matrix
coo_matrixfind   )group_densegroup_sparse)array_namespace)array_api_extrac                 2   |dk    rt          j        |t                    }nE|dk    r0t          j        |          }t          j        |t                    }nt          d          t          j        |t           j         k    |t           j        k    z            r||fS ||z  }|                                }| |z
  }	|| z
  }
|dk    r| |z   }||k     ||k    z  }t          j        |          t          j	        |	|
          k    }|||z  xx         dz  cc<   |
|	k    | z  }|
|         |z  ||<   |
|	k     | z  }|	|          |z  ||<   n|dk    r|	|k    |
|k    z  }|
|	k    | z  }t          j
        ||         d|
|         z  |z            ||<   d||<   |
|	k     | z  }t          j
        ||         d|	|         z  |z             ||<   d||<   t          j
        |
|	          |z  }| t          j        |          |k    z  }||         ||<   d||<   ||fS )	a  Adjust final difference scheme to the presence of bounds.

    Parameters
    ----------
    x0 : ndarray, shape (n,)
        Point at which we wish to estimate derivative.
    h : ndarray, shape (n,)
        Desired absolute finite difference steps.
    num_steps : int
        Number of `h` steps in one direction required to implement finite
        difference scheme. For example, 2 means that we need to evaluate
        f(x0 + 2 * h) or f(x0 - 2 * h)
    scheme : {'1-sided', '2-sided'}
        Whether steps in one or both directions are required. In other
        words '1-sided' applies to forward and backward schemes, '2-sided'
        applies to center schemes.
    lb : ndarray, shape (n,)
        Lower bounds on independent variables.
    ub : ndarray, shape (n,)
        Upper bounds on independent variables.

    Returns
    -------
    h_adjusted : ndarray, shape (n,)
        Adjusted absolute step sizes. Step size decreases only if a sign flip
        or switching to one-sided scheme doesn't allow to take a full step.
    use_one_sided : ndarray of bool, shape (n,)
        Whether to switch to one-sided scheme. Informative only for
        ``scheme='2-sided'``.
    1-sideddtype2-sidedz(`scheme` must be '1-sided' or '2-sided'.      ?TF)np	ones_likeboolabs
zeros_like
ValueErrorallinfcopymaximumminimum)x0h	num_stepsschemelbubuse_one_sidedh_total
h_adjusted
lower_dist
upper_distxviolatedfittingforwardbackwardcentralmin_distadjusted_centrals                      W/var/www/html/test/jupyter/venv/lib/python3.11/site-packages/scipy/optimize/_numdiff.py_adjust_scheme_to_boundsr6      s   > Qd333	9		F1IIat444CDDD	vrbfW}rv.//  -)mGJbJbJLFq2v&&//RZ
J%G%GG8g%&&&",&&&+x7(1I=
7+x7 *8 44y@
8	9		(Z7-BC+x7 jgJj11I=? ?
7!%g+x7 "
hKz(33i?!A !A  A
8"&h:j*55	A$Hz(:(:h(FG'/0@'A
#$*/&'}$$    c                    t          j        t           j                  j        }d}t          j        | t           j                  r4t          j        |           j        }t          j        |           j        }d}t          j        |t           j                  r:t          j        |          j        }|r||k     rt          j        |          j        }|dv r|dz  S |dv r|dz  S t          d          )a  
    Calculates relative EPS step to use for a given data type
    and numdiff step method.

    Progressively smaller steps are used for larger floating point types.

    Parameters
    ----------
    f0_dtype: np.dtype
        dtype of function evaluation

    x0_dtype: np.dtype
        dtype of parameter vector

    method: {'2-point', '3-point', 'cs'}

    Returns
    -------
    EPS: float
        relative step size. May be np.float16, np.float32, np.float64

    Notes
    -----
    The default relative step will be np.float64. However, if x0 or f0 are
    smaller floating point types (np.float16, np.float32), then the smallest
    floating point type is chosen.
    FT)2-pointcsr   )3-pointgUUUUUU?zBUnknown step method, should be one of {'2-point', '3-point', 'cs'})	r   finfofloat64eps
issubdtypeinexactr   itemsizeRuntimeError)x0_dtypef0_dtypemethodEPSx0_is_fpx0_itemsizef0_itemsizes          r5   _eps_for_methodrJ   \   s    < (2:


"CH	}Xrz** hx  $hx((1	}Xrz** )hx((1 	)k11(8$$(C"""Cx	;		Sz : ; ; 	;r7   c           
         |dk                         t                    dz  dz
  }t          |j        |j        |          }| .||z  t	          j        dt	          j        |                    z  }ng| |z  t	          j        |          z  }||z   |z
  }t	          j        |dk    ||z  t	          j        dt	          j        |                    z  |          }|S )az  
    Computes an absolute step from a relative step for finite difference
    calculation.

    Parameters
    ----------
    rel_step: None or array-like
        Relative step for the finite difference calculation
    x0 : np.ndarray
        Parameter vector
    f0 : np.ndarray or scalar
    method : {'2-point', '3-point', 'cs'}

    Returns
    -------
    h : float
        The absolute step size

    Notes
    -----
    `h` will always be np.float64. However, if `x0` or `f0` are
    smaller floating point dtypes (e.g. np.float32), then the absolute
    step size will be calculated from the smallest floating point size.
    r   r   r   N      ?)astypefloatrJ   r   r   r    r   where)rel_stepr"   f0rE   sign_x0rstepabs_stepdxs           r5   _compute_absolute_steprV      s    6 Qwu%%)A-GBHbh77E7?RZRVBZZ%@%@@
 g%r

2 H}"8B!G!GObjbfRjj.I.II$& & Or7   c                     d | D             \  }}|j         dk    rt          j        ||j                  }|j         dk    rt          j        ||j                  }||fS )aa  
    Prepares new-style bounds from a two-tuple specifying the lower and upper
    limits for values in x0. If a value is not bound then the lower/upper bound
    will be expected to be -np.inf/np.inf.

    Examples
    --------
    >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
    (array([0., 1., 2.]), array([ 1.,  2., inf]))
    c              3   L   K   | ]}t          j        |t                     V   dS )r   N)r   asarrayrN   ).0bs     r5   	<genexpr>z"_prepare_bounds.<locals>.<genexpr>   s1      99Qbj%(((999999r7   r   )ndimr   resizeshape)boundsr"   r&   r'   s       r5   _prepare_boundsra      s`     :9&999FB	w!||Yr28$$	w!||Yr28$$r6Mr7   c                    t          |           rt          |           } n7t          j        |           } | dk                        t          j                  } | j        dk    rt          d          | j        \  }}|t          j	        |          r5t          j
                            |          }|                    |          }n/t          j        |          }|j        |fk    rt          d          | dd|f         } t          |           rt          ||| j        | j                  }nt#          |||           }|                                ||<   |S )a  Group columns of a 2-D matrix for sparse finite differencing [1]_.

    Two columns are in the same group if in each row at least one of them
    has zero. A greedy sequential algorithm is used to construct groups.

    Parameters
    ----------
    A : array_like or sparse matrix, shape (m, n)
        Matrix of which to group columns.
    order : int, iterable of int with shape (n,) or None
        Permutation array which defines the order of columns enumeration.
        If int or None, a random permutation is used with `order` used as
        a random seed. Default is 0, that is use a random permutation but
        guarantee repeatability.

    Returns
    -------
    groups : ndarray of int, shape (n,)
        Contains values from 0 to n_groups-1, where n_groups is the number
        of found groups. Each value ``groups[i]`` is an index of a group to
        which ith column assigned. The procedure was helpful only if
        n_groups is significantly less than n.

    References
    ----------
    .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
           sparse Jacobian matrices", Journal of the Institute of Mathematics
           and its Applications, 13 (1974), pp. 117-120.
    r   r   z`A` must be 2-dimensional.Nz`order` has incorrect shape.)r   r   r   
atleast_2drM   int32r]   r   r_   isscalarrandomRandomStatepermutationrY   r   indicesindptrr   r   )Aordermnrnggroupss         r5   group_columnsrq      s1   < {{ &qMMM!!VOOBH%%v{{56667DAq}E**}i##E**""
5!!;1$;<<<	!!!U(A{{ &aAIqx88Q1%%KKMMF5MMr7   r;   F c           
         	
 |dvrt          d| d          t                    t          j                                      d          }j        }                    |j        d          r|j        }                    ||          j	        dk    rt          d          t          |          \  }}|j        j        k    s|j        j        k    rt          d          |r[t          j        t          j        |                    r&t          j        t          j        |                    st          d	          
i 
	 
fd}| |          }n.t          j        |          }|j	        dk    rt          d          t          j        |k     |k    z            rt          d          |r0|t#          j        |j        |          }t%          ||||          S |t'          |||          }ndk                        t(                    dz  dz
  }|}|z   z
  }t          j        |dk    t#          j        |j        |          |z  t          j        dt          j                            z  |          }|dk    rt1          |dd||          \  }}n&|dk    rt1          |dd||          \  }}n|dk    rd}|t3          |||||          S t5          |          st7          |          dk    r|\  }}n|}t9          |          }t5          |          rt;          |          }nt          j        |          }t          j        |          }t?          |||||||          S )aJ  Compute finite difference approximation of the derivatives of a
    vector-valued function.

    If a function maps from R^n to R^m, its derivatives form m-by-n matrix
    called the Jacobian, where an element (i, j) is a partial derivative of
    f[i] with respect to x[j].

    Parameters
    ----------
    fun : callable
        Function of which to estimate the derivatives. The argument x
        passed to this function is ndarray of shape (n,) (never a scalar
        even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
    x0 : array_like of shape (n,) or float
        Point at which to estimate the derivatives. Float will be converted
        to a 1-D array.
    method : {'3-point', '2-point', 'cs'}, optional
        Finite difference method to use:
            - '2-point' - use the first order accuracy forward or backward
                          difference.
            - '3-point' - use central difference in interior points and the
                          second order accuracy forward or backward difference
                          near the boundary.
            - 'cs' - use a complex-step finite difference scheme. This assumes
                     that the user function is real-valued and can be
                     analytically continued to the complex plane. Otherwise,
                     produces bogus results.
    rel_step : None or array_like, optional
        Relative step size to use. If None (default) the absolute step size is
        computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
        `rel_step` being selected automatically, see Notes. Otherwise
        ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
        sign of `h` is ignored. The calculated step size is possibly adjusted
        to fit into the bounds.
    abs_step : array_like, optional
        Absolute step size to use, possibly adjusted to fit into the bounds.
        For ``method='3-point'`` the sign of `abs_step` is ignored. By default
        relative steps are used, only if ``abs_step is not None`` are absolute
        steps used.
    f0 : None or array_like, optional
        If not None it is assumed to be equal to ``fun(x0)``, in this case
        the ``fun(x0)`` is not called. Default is None.
    bounds : tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each bound must match the size of `x0` or be a scalar, in the latter
        case the bound will be the same for all variables. Use it to limit the
        range of function evaluation. Bounds checking is not implemented
        when `as_linear_operator` is True.
    sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
        Defines a sparsity structure of the Jacobian matrix. If the Jacobian
        matrix is known to have only few non-zero elements in each row, then
        it's possible to estimate its several columns by a single function
        evaluation [3]_. To perform such economic computations two ingredients
        are required:

        * structure : array_like or sparse matrix of shape (m, n). A zero
          element means that a corresponding element of the Jacobian
          identically equals to zero.
        * groups : array_like of shape (n,). A column grouping for a given
          sparsity structure, use `group_columns` to obtain it.

        A single array or a sparse matrix is interpreted as a sparsity
        structure, and groups are computed inside the function. A tuple is
        interpreted as (structure, groups). If None (default), a standard
        dense differencing will be used.

        Note, that sparse differencing makes sense only for large Jacobian
        matrices where each row contains few non-zero elements.
    as_linear_operator : bool, optional
        When True the function returns an `scipy.sparse.linalg.LinearOperator`.
        Otherwise it returns a dense array or a sparse matrix depending on
        `sparsity`. The linear operator provides an efficient way of computing
        ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
        direct access to individual elements of the matrix. By default
        `as_linear_operator` is False.
    args, kwargs : tuple and dict, optional
        Additional arguments passed to `fun`. Both empty by default.
        The calling signature is ``fun(x, *args, **kwargs)``.

    Returns
    -------
    J : {ndarray, sparse matrix, LinearOperator}
        Finite difference approximation of the Jacobian matrix.
        If `as_linear_operator` is True returns a LinearOperator
        with shape (m, n). Otherwise it returns a dense array or sparse
        matrix depending on how `sparsity` is defined. If `sparsity`
        is None then a ndarray with shape (m, n) is returned. If
        `sparsity` is not None returns a csr_matrix with shape (m, n).
        For sparse matrices and linear operators it is always returned as
        a 2-D structure, for ndarrays, if m=1 it is returned
        as a 1-D gradient array with shape (n,).

    See Also
    --------
    check_derivative : Check correctness of a function computing derivatives.

    Notes
    -----
    If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
    determined from the smallest floating point dtype of `x0` or `fun(x0)`,
    ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
    s=3 for '3-point' method. Such relative step approximately minimizes a sum
    of truncation and round-off errors, see [1]_. Relative steps are used by
    default. However, absolute steps are used when ``abs_step is not None``.
    If any of the absolute or relative steps produces an indistinguishable
    difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
    automatic step size is substituted for that particular entry.

    A finite difference scheme for '3-point' method is selected automatically.
    The well-known central difference scheme is used for points sufficiently
    far from the boundary, and 3-point forward or backward scheme is used for
    points near the boundary. Both schemes have the second-order accuracy in
    terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
    forward and backward difference schemes.

    For dense differencing when m=1 Jacobian is returned with a shape (n,),
    on the other hand when n=1 Jacobian is returned with a shape (m, 1).
    Our motivation is the following: a) It handles a case of gradient
    computation (m=1) in a conventional way. b) It clearly separates these two
    different cases. b) In all cases np.atleast_2d can be called to get 2-D
    Jacobian with correct dimensions.

    References
    ----------
    .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
           Computing. 3rd edition", sec. 5.7.

    .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
           sparse Jacobian matrices", Journal of the Institute of Mathematics
           and its Applications, 13 (1974), pp. 117-120.

    .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
           Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize._numdiff import approx_derivative
    >>>
    >>> def f(x, c1, c2):
    ...     return np.array([x[0] * np.sin(c1 * x[1]),
    ...                      x[0] * np.cos(c2 * x[1])])
    ...
    >>> x0 = np.array([1.0, 0.5 * np.pi])
    >>> approx_derivative(f, x0, args=(1, 2))
    array([[ 1.,  0.],
           [-1.,  0.]])

    Bounds can be used to limit the region of function evaluation.
    In the example below we compute left and right derivative at point 1.0.

    >>> def g(x):
    ...     return x**2 if x >= 1 else x
    ...
    >>> x0 = 1.0
    >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
    array([ 1.])
    >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
    array([ 2.])
    )r9   r;   r:   zUnknown method 'z'. r   )r]   xpreal floatingz#`x0` must have at most 1 dimension.z,Inconsistent shapes between bounds and `x0`.z7Bounds not supported when `as_linear_operator` is True.Nc                                          | j        d          r                    | j                  } t          j         | gR i           }|j        dk    rt          d          |S )Nru   r   z-`fun` return value has more than 1 dimension.)isdtyper   rM   r   
atleast_1dr]   rB   )r-   fargsfunkwargsr"   rt   s     r5   fun_wrappedz&approx_derivative.<locals>.fun_wrapped  s     ::ag// 	'		!RX&&AM##a1$111&11226A::  8 9 9 9r7   z&`f0` passed has more than 1 dimension.z `x0` violates bound constraints.r   r   rL   r9   r   r;   r   r:   F) r   r   xpx
atleast_ndrY   r=   rw   r   rM   r]   ra   r_   r   r   isinfrx   anyrJ   _linear_operator_differencerV   rN   rO   r    r   r6   _dense_differencer   lenrq   r   rc   _sparse_difference)r{   r"   rE   rP   rT   rQ   r`   sparsityas_linear_operatorrz   r|   _x_dtyper&   r'   r}   r#   rR   rU   r(   	structurerp   rt   s   ``       ``           @r5   approx_derivativer     s   F 1117F777888			B	

2Q2	6	6	6BZF	zz"(O,,  
2v		B	w{{>???VR((FB	x28rx2833GHHH :26"(2,,#7#7 :')vbhrll';';: 9 : : 	: ~
 
 
 
 
 
 
 
 
 
z[__]27Q;;EFFF	vrBw27#$$ =;<<< 26&rx6BBH*;+-xA A 	A &xR@@AA Qw&&u--1A5GA 6R-Bq(28VDD !#%:c26"::#>#>? A
 Y7Aq)R -  -A}}y  7Aq)R -  -A}}t^^!M$["b!%2F< < < H%% 1#h--1*<*<$,!	66$	&x00	"" 5&y11		M)44	]6**F%k2r1&3Y&,f6 6 6r7   c                      j         j         }|dk    r
 fd}n-|dk    r	 fd}n|dk    r	 fd}nt          d          t          |f|          S )Nr9   c                     t          j        | t          j        |                     rt          j                  S t	          |           z  }|| z  z   } |          z
  }||z  S )Nr   array_equalr   zerosr   )	prU   r-   dfrQ   r{   r#   rm   r"   s	       r5   matvecz+_linear_operator_difference.<locals>.matvec$  se    ~aq!1!122 #x{{"T!WWBRT	AQ"B7Nr7   r;   c                    t          j        | t          j        |                     rt          j        	          S dz  t	          |           z  }
|dz  | z  z
  }
|dz  | z  z   } |          } |          }||z
  }||z  S )Nr   r   )r   rU   x1x2f1f2r   r{   r#   rm   r"   s          r5   r   z+_linear_operator_difference.<locals>.matvec-  s    ~aq!1!122 #x{{"1tAwwBr!tQhBr!tQhBRBRBbB7Nr7   r:   c                     t          j        | t          j        |                     rt          j                  S t	          |           z  }|| z  dz  z   } |          }|j        }||z  S )N              ?)r   r   r   r   r   imag)	r   rU   r-   r   r   r{   r#   rm   r"   s	        r5   r   z+_linear_operator_difference.<locals>.matvec9  sl    ~aq!1!122 #x{{"T!WWBRT#XAQBB7Nr7   Never be here.)sizerB   r   )r{   r"   rQ   r#   rE   rn   r   rm   s   ````   @r5   r   r     s    
A
A	 	 	 	 	 	 	 	 	 	 
9				 		 		 		 		 		 		 		 		 
4	 	 	 	 	 	 	 	 	 +,,,1a&&)))r7   c                 @   |j         }|j         }t          j        ||f          }|                                }	|                                }
|                    t
          d          }t          |j                   D ]}|dk    r7|	|xx         ||         z  cc<   |	|         ||         z
  } | |	          |z
  }n#|dk    rm||         re|	|xx         ||         z  cc<   |
|xx         d||         z  z  cc<   |
|         ||         z
  } | |	          } | |
          }d|z  d|z  z   |z
  }n|dk    ra||         sY|	|xx         ||         z  cc<   |
|xx         ||         z  cc<   |
|         |	|         z
  } | |	          } | |
          }||z
  }nI|dk    r4||xx         ||         d	z  z  cc<    | |          }|j        }||         }nt          d
          ||z  ||<   ||         x|	|<   x|
|<   ||<   |dk    rt          j	        |          }|j
        S )NT)r   r9   r;   r   g         r:   r   r   r   )r   r   emptyr   rM   complexranger   rB   ravelT)r{   r"   rQ   r#   r(   rE   rm   rn   J_transposedr   r   xcirU   r   r   r   s                    r5   r   r   H  sa   
A
A8QF##L	B	B	7	&	&B16]] & &YqEEEQqTMEEEAABR2BBy  ]1%5 qEEEQqTMEEEqEEEQ1XEEEAABRBRBQV#b(BBy  q)9 qEEEQqTMEEEqEEEQqTMEEEAABRBRBbBBt^^qEEEQqTCZEEERBB1BB/000r'Q "1%1%11Avvx-->r7   c                    |j         }|j         }	g }
g }g }t          j        |          dz   }t          |          D ]}t          j        ||          }||z  }|dk    rU||z   }||z
  } | |          |z
  }t          j        |          \  }t          |d d |f                   \  }}}||         }n|dk    r|                                }|                                }||z  }||xx         ||         z  cc<   ||xx         d||         z  z  cc<   | |z  }||xx         ||         z  cc<   ||xx         ||         z  cc<   t          j        |	          }||         ||         z
  ||<   ||         ||         z
  ||<    | |          } | |          }t          j        |          \  }t          |d d |f                   \  }}}||         }||         }t          j	        |          }||         }d||         z  d||         z  z   ||         z
  ||<   ||          }||         ||         z
  ||<   nk|dk    rV | ||dz  z             }|j
        }|}t          j        |          \  }t          |d d |f                   \  }}}||         }nt          d	          |
                    |           |                    |           |                    ||         ||         z             t          j        |
          }
t          j        |          }t          j        |          }t          ||
|ff||	f
          } t          |           S )Nr   r9   r;   r   r   r:   r   r   )r_   )r   r   maxr   equalnonzeror
   r   r   r   r   r   appendhstackr	   r   )!r{   r"   rQ   r#   r(   r   rp   rE   rm   rn   row_indicescol_indices	fractionsn_groupsgroupeh_vecr-   rU   r   colsr   j_r   r   mask_1mask_2r   r   maskrowsJs!                                    r5   r   r   t  s   
A
AKKIvf~~!Hx >( >(HUF##AYU
ARBQ"B JqMMED9QQQW-..GAq!QAAy   BB"Q&FvJJJ%-'JJJvJJJ!eFm++JJJ#^a'FvJJJ%-'JJJvJJJ%-'JJJ!BFbj0BvJFbj0BvJRBRBJqMMED9QQQW-..GAq!QA #D!BT7DBtH}q2d8|3bh>BtHdU8D$x"T(*BtHHt^^R%)^$$BBBJqMMED9QQQW-..GAq!QAA-... 	11AA'''')K((K)K((K	)$$II[9:1a&IIIAa==r7   c           	      ~   |i } ||g|R i |}t          |          rt          | |||||          }t          |          }||z
  }t          |          \  }	}
}t	          j        ||	|
f                                                   }t	          j        t	          j        |          t	          j	        dt	          j        |                    z            S t          | ||||          }t	          j        ||z
            }t	          j        |t	          j	        dt	          j        |                    z            S )aT	  Check correctness of a function computing derivatives (Jacobian or
    gradient) by comparison with a finite difference approximation.

    Parameters
    ----------
    fun : callable
        Function of which to estimate the derivatives. The argument x
        passed to this function is ndarray of shape (n,) (never a scalar
        even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
    jac : callable
        Function which computes Jacobian matrix of `fun`. It must work with
        argument x the same way as `fun`. The return value must be array_like
        or sparse matrix with an appropriate shape.
    x0 : array_like of shape (n,) or float
        Point at which to estimate the derivatives. Float will be converted
        to 1-D array.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each bound must match the size of `x0` or be a scalar, in the latter
        case the bound will be the same for all variables. Use it to limit the
        range of function evaluation.
    args, kwargs : tuple and dict, optional
        Additional arguments passed to `fun` and `jac`. Both empty by default.
        The calling signature is ``fun(x, *args, **kwargs)`` and the same
        for `jac`.

    Returns
    -------
    accuracy : float
        The maximum among all relative errors for elements with absolute values
        higher than 1 and absolute errors for elements with absolute values
        less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
        then it is likely that your `jac` implementation is correct.

    See Also
    --------
    approx_derivative : Compute finite difference approximation of derivative.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize._numdiff import check_derivative
    >>>
    >>>
    >>> def f(x, c1, c2):
    ...     return np.array([x[0] * np.sin(c1 * x[1]),
    ...                      x[0] * np.cos(c2 * x[1])])
    ...
    >>> def jac(x, c1, c2):
    ...     return np.array([
    ...         [np.sin(c1 * x[1]),  c1 * x[0] * np.cos(c1 * x[1])],
    ...         [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
    ...     ])
    ...
    >>>
    >>> x0 = np.array([1.0, 0.5 * np.pi])
    >>> check_derivative(f, jac, x0, args=(1, 2))
    2.4492935982947064e-16
    N)r`   r   rz   r|   r   )r`   rz   r|   )
r   r   r   r
   r   rY   r   r   r   r    )r{   jacr"   r`   rz   r|   	J_to_testJ_diffabs_errr   r   abs_err_dataJ_diff_datas                r5   check_derivativer     sG   z ~B((((((I	 ?"36I(,V= = =y))	f$!']]1lj1..4466vbf\**jBF;$7$7889 : : 	: #36(,V= = =&V+,,vg
1bfVnn = ==>>>r7   )r   )"__doc__	functoolsnumpyr   numpy.linalgr   scipy.sparse.linalgr   sparser   r   r   r	   r
   _group_columnsr   r   scipy._lib._array_apir   
scipy._libr   r~   r6   	lru_cacherJ   rV   ra   rq   r   r   r   r   r   r   rr   r7   r5   <module>r      s   - -               . . . . . . G G G G G G G G G G G G G G 5 5 5 5 5 5 5 5 1 1 1 1 1 1 - - - - - -L% L% L%^ 2; 2; 2;j. . .b  *: : : :z '0$w&7$).RG6 G6 G6 G6T&* &* &*R) ) )XM M M` -/F7BF*;" M? M? M? M? M? M?r7   