a
    hq3                     @   s   d Z ddlmZmZmZmZ ddlmZ ddlZ	ddlZ	zddl
mZ dZW n eyj   ddlZdZY n0 ddlZddlmZ d	d
gZdd	 Zdd Zdd Zdd Zdd Zddd
ZdS )z1Basic linear factorizations needed by the solver.    )bmat
csc_matrixeyeissparse)LinearOperatorNcholesky_AAtTF)warnorthogonalityprojectionsc                 C   sn   t j|}t| r(tjjj| dd}nt jj| dd}|dksH|dkrLdS t j| |}|||  }|S )a  Measure orthogonality between a vector and the null space of a matrix.

    Compute a measure of orthogonality between the null space
    of the (possibly sparse) matrix ``A`` and a given vector ``g``.

    The formula is a simplified (and cheaper) version of formula (3.13)
    from [1]_.
    ``orth =  norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.

    References
    ----------
    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
           "On the solution of equality constrained quadratic
            programming problems arising in optimization."
            SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
    Zfro)ordr   )nplinalgnormr   scipysparsedot)AgZnorm_gZnorm_AZnorm_A_gZorth r   j/var/www/html/assistant/venv/lib/python3.9/site-packages/scipy/optimize/_trustregion_constr/projections.pyr
      s    c           	         s@   t   fdd} fdd} fdd}|||fS )zLReturn linear operators for matrix A using ``NormalEquation`` approach.
    c                    sf     | }|  j | }d}t |krb|kr:qb  |}| j | }|d7 }q"|S Nr      r   Tr
   )xvzkr   factor	max_refinorth_tolr   r   
null_space@   s    
z/normal_equation_projections.<locals>.null_spacec                    s     | S Nr   r   r   r    r   r   least_squaresR   s    z2normal_equation_projections.<locals>.least_squaresc                    s    j | S r$   r   r   r&   r'   r   r   	row_spaceV   s    z.normal_equation_projections.<locals>.row_spacer   	r   mnr"   r!   tolr#   r(   r*   r   r   r   normal_equation_projections9   s
    r/   c           	   
      s   t tt jg dggztjjW n4 tyd   t	ddd t
  | Y S 0  fdd}fdd}fd	d
}|||fS )z;Return linear operators for matrix A - ``AugmentedSystem``.NzVSingular Jacobian matrix. Using dense SVD decomposition to perform the factorizations.   
stacklevelc                    s|   t | t g}|}|d  }d}t |krx|krDqx|| }|}||7 }|d  }|d7 }q,|S r   )r   hstackzerosr
   r   )r   r   lu_solr   r   new_vZ	lu_updater   Kr,   r!   r-   r"   solver   r   r#   r   s    
z0augmented_system_projections.<locals>.null_spacec                    s,   t | t  g}|}|   S r$   r   r3   r4   r   r   r5   )r,   r-   r9   r   r   r(      s    z3augmented_system_projections.<locals>.least_squaresc                    s(   t t  | g}|}|d   S r$   r:   r;   )r-   r9   r   r   r*      s    z/augmented_system_projections.<locals>.row_space)r   r   r   r   r   r   r   Z
factorizedRuntimeErrorr	   svd_factorization_projectionsZtoarrayr+   r   r7   r   augmented_system_projections\   s    
"
r>   c           	         s   t jj jddd\tjdddf tj|k rXtddd t ||S  fd	d
}fdd}fdd}|||fS )zMReturn linear operators for matrix A using ``QRFactorization`` approach.
    TZeconomic)ZpivotingmodeNzPSingular Jacobian matrix. Using SVD decomposition to perform the factorizations.r0   r1   c                    s   j | }tjj|dd}t}||< |  j | }d}t |kr|kr\qj |}tjj|dd}||< | j | }|d7 }qD|S )NFlowerr   r   )r   r   r   r   solve_triangularr   r4   r
   r   aux1aux2r   r   r   r   PQRr,   r!   r"   r   r   r#      s    

z0qr_factorization_projections.<locals>.null_spacec                    s4   j | }tjj|dd}t}|| < |S )NFrA   )r   r   r   r   rC   r   r4   r   rE   rF   r   )rH   rI   rJ   r,   r   r   r(      s
    
z3qr_factorization_projections.<locals>.least_squaresc                    s*   |   }t jj|ddd}|}|S )NFr   )rB   Ztrans)r   r   rC   r   rK   )rH   rI   rJ   r   r   r*      s    

z/qr_factorization_projections.<locals>.row_space)	r   r   Zqrr   r   r   infr	   r=   r+   r   rG   r   qr_factorization_projections   s     		rM   c           	         s   t jj dd\dd|kf |kddf |k  fdd}fdd}fdd	}|||fS )
zNReturn linear operators for matrix A using ``SVDFactorization`` approach.
    F)Zfull_matricesNc                    s    | }d | } |}|  j | }d}t |kr|krLq |}d | } |}| j | }|d7 }q4|S )Nr   r   r   rD   r   UVtr!   r"   sr   r   r#      s    




z1svd_factorization_projections.<locals>.null_spacec                    s$    | }d | }  |}|S Nr   r%   rK   rO   rP   rQ   r   r   r(     s    

z4svd_factorization_projections.<locals>.least_squaresc                    s(    j | }d | }j |}|S rR   r)   rK   rS   r   r   r*     s    z0svd_factorization_projections.<locals>.row_space)r   r   Zsvdr+   r   rN   r   r=      s    r=   -q=r0   V瞯<c                 C   s@  t | \}}|| dkr"t| } t| rh|du r6d}|dvrFtd|dkrtstjdtdd	 d}n|du rtd
}|dvrtd|dkrt	| |||||\}}}	nf|dkrt
| |||||\}}}	nD|d
krt| |||||\}}}	n"|dkrt| |||||\}}}	t||f|}
t||f|}t||f|	}|
||fS )a  Return three linear operators related with a given matrix A.

    Parameters
    ----------
    A : sparse matrix (or ndarray), shape (m, n)
        Matrix ``A`` used in the projection.
    method : string, optional
        Method used for compute the given linear
        operators. Should be one of:

            - 'NormalEquation': The operators
               will be computed using the
               so-called normal equation approach
               explained in [1]_. In order to do
               so the Cholesky factorization of
               ``(A A.T)`` is computed. Exclusive
               for sparse matrices.
            - 'AugmentedSystem': The operators
               will be computed using the
               so-called augmented system approach
               explained in [1]_. Exclusive
               for sparse matrices.
            - 'QRFactorization': Compute projections
               using QR factorization. Exclusive for
               dense matrices.
            - 'SVDFactorization': Compute projections
               using SVD factorization. Exclusive for
               dense matrices.

    orth_tol : float, optional
        Tolerance for iterative refinements.
    max_refin : int, optional
        Maximum number of iterative refinements.
    tol : float, optional
        Tolerance for singular values.

    Returns
    -------
    Z : LinearOperator, shape (n, n)
        Null-space operator. For a given vector ``x``,
        the null space operator is equivalent to apply
        a projection matrix ``P = I - A.T inv(A A.T) A``
        to the vector. It can be shown that this is
        equivalent to project ``x`` into the null space
        of A.
    LS : LinearOperator, shape (m, n)
        Least-squares operator. For a given vector ``x``,
        the least-squares operator is equivalent to apply a
        pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
        to the vector. It can be shown that this vector
        ``pinv(A.T) x`` is the least_square solution to
        ``A.T y = x``.
    Y : LinearOperator, shape (n, m)
        Row-space operator. For a given vector ``x``,
        the row-space operator is equivalent to apply a
        projection matrix ``Q = A.T inv(A A.T)``
        to the vector.  It can be shown that this
        vector ``y = Q x``  the minimum norm solution
        of ``A y = x``.

    Notes
    -----
    Uses iterative refinements described in [1]
    during the computation of ``Z`` in order to
    cope with the possibility of large roundoff errors.

    References
    ----------
    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
        "On the solution of equality constrained quadratic
        programming problems arising in optimization."
        SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
    r   NAugmentedSystem)NormalEquationrV   z%Method not allowed for sparse matrix.rW   zmOnly accepts 'NormalEquation' option when scikit-sparse is available. Using 'AugmentedSystem' option instead.r0   r1   QRFactorization)rX   SVDFactorizationz#Method not allowed for dense array.rY   )r   shaper   r   
ValueErrorsksparse_availablewarningsr	   ImportWarningr/   r>   rM   r=   r   )r   methodr"   r!   r.   r,   r-   r#   r(   r*   ZZLSYr   r   r   r   #  sB    J



)NrT   r0   rU   )__doc__Zscipy.sparser   r   r   r   Zscipy.sparse.linalgr   Zscipy.linalgr   Zsksparse.cholmodr   r\   ImportErrorr]   numpyr   r	   __all__r
   r/   r>   rM   r=   r   r   r   r   r   <module>   s*   
##S>6