Add methods for computing decompositions.
This commit is contained in:
parent
aaa359f3b0
commit
00039c0a76
|
@ -255,3 +255,17 @@ impl<N: Real, R: DimMin<C>, C: Dim> Bidiagonal<N, R, C>
|
||||||
// // res self.q_determinant()
|
// // res self.q_determinant()
|
||||||
// // }
|
// // }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
|
where DimMinimum<R, C>: DimSub<U1>,
|
||||||
|
DefaultAllocator: Allocator<N, R, C> +
|
||||||
|
Allocator<N, C> +
|
||||||
|
Allocator<N, R> +
|
||||||
|
Allocator<N, DimMinimum<R, C>> +
|
||||||
|
Allocator<N, DimDiff<DimMinimum<R, C>, U1>> {
|
||||||
|
|
||||||
|
/// Computes the bidiagonalization using householder reflections.
|
||||||
|
pub fn bidiagonalize(self) -> Bidiagonal<N, R, C> {
|
||||||
|
Bidiagonal::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use alga::general::Real;
|
use alga::general::Real;
|
||||||
|
|
||||||
use core::{DefaultAllocator, MatrixN, MatrixMN, Matrix};
|
use core::{DefaultAllocator, MatrixN, MatrixMN, Matrix, SquareMatrix};
|
||||||
use constraint::{ShapeConstraint, SameNumberOfRows};
|
use constraint::{ShapeConstraint, SameNumberOfRows};
|
||||||
use storage::{Storage, StorageMut};
|
use storage::{Storage, StorageMut};
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
|
@ -110,3 +110,15 @@ impl<N: Real, D: DimSub<Dynamic>> Cholesky<N, D>
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<N: Real, D: DimSub<Dynamic>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
|
where DefaultAllocator: Allocator<N, D, D> {
|
||||||
|
|
||||||
|
/// Attempts to compute the sholesky decomposition of this matrix.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the input matrix is not definite-positive. The intput matrix is assumed
|
||||||
|
/// to be symmetric and only the lower-triangular part is read.
|
||||||
|
pub fn cholesky(self) -> Option<Cholesky<N, D>> {
|
||||||
|
Cholesky::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -23,7 +23,9 @@ pub struct FullPivLU<N: Real, R: DimMin<C>, C: Dim>
|
||||||
impl<N: Real, R: DimMin<C>, C: Dim> FullPivLU<N, R, C>
|
impl<N: Real, R: DimMin<C>, C: Dim> FullPivLU<N, R, C>
|
||||||
where DefaultAllocator: Allocator<N, R, C> +
|
where DefaultAllocator: Allocator<N, R, C> +
|
||||||
Allocator<(usize, usize), DimMinimum<R, C>> {
|
Allocator<(usize, usize), DimMinimum<R, C>> {
|
||||||
/// This computes the matrixces `P, L, U` such that `P * matrix = LU`.
|
/// Computes the LU decomposition with full-pivoting of `matrix`.
|
||||||
|
///
|
||||||
|
/// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`.
|
||||||
pub fn new(mut matrix: MatrixMN<N, R, C>) -> Self {
|
pub fn new(mut matrix: MatrixMN<N, R, C>) -> Self {
|
||||||
let (nrows, ncols) = matrix.data.shape();
|
let (nrows, ncols) = matrix.data.shape();
|
||||||
let min_nrows_ncols = nrows.min(ncols);
|
let min_nrows_ncols = nrows.min(ncols);
|
||||||
|
@ -203,3 +205,14 @@ impl<N: Real, D: DimMin<D, Output = D>> FullPivLU<N, D, D>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
|
where DefaultAllocator: Allocator<N, R, C> +
|
||||||
|
Allocator<(usize, usize), DimMinimum<R, C>> {
|
||||||
|
/// Computes the LU decomposition with full-pivoting of `matrix`.
|
||||||
|
///
|
||||||
|
/// This effectively computes `P, L, U, Q` such that `P * matrix * Q = LU`.
|
||||||
|
pub fn full_piv_lu(self) -> FullPivLU<N, R, C> {
|
||||||
|
FullPivLU::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use alga::general::Real;
|
use alga::general::Real;
|
||||||
use core::{MatrixN, MatrixMN, VectorN, DefaultAllocator};
|
use core::{SquareMatrix, MatrixN, MatrixMN, VectorN, DefaultAllocator};
|
||||||
use dimension::{DimSub, DimDiff, Dynamic, U1};
|
use dimension::{DimSub, DimDiff, Dynamic, U1};
|
||||||
use storage::Storage;
|
use storage::Storage;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
|
@ -95,3 +95,14 @@ impl<N: Real, D: DimSub<U1>> Hessenberg<N, D>
|
||||||
&self.hess
|
&self.hess
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
impl<N: Real, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
|
where DefaultAllocator: Allocator<N, D, D> +
|
||||||
|
Allocator<N, D> +
|
||||||
|
Allocator<N, DimDiff<D, U1>> {
|
||||||
|
/// Computes the Hessenberg decomposition of this matrix using householder reflections.
|
||||||
|
pub fn hessenberg(self) -> Hessenberg<N, D> {
|
||||||
|
Hessenberg::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -300,3 +300,13 @@ pub fn gauss_step_swap<N, R: Dim, C: Dim, S>(matrix: &mut Matrix<N, R, C, S>, di
|
||||||
down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one());
|
down.column_mut(k).axpy(-pivot_row[k], &coeffs, N::one());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
|
where DefaultAllocator: Allocator<N, R, C> +
|
||||||
|
Allocator<(usize, usize), DimMinimum<R, C>> {
|
||||||
|
|
||||||
|
/// Computes the LU decomposition with partial (row) pivoting of `matrix`.
|
||||||
|
pub fn lu(self) -> LU<N, R, C> {
|
||||||
|
LU::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -228,3 +228,14 @@ impl<N: Real, D: DimMin<D, Output = D>> QR<N, D, D>
|
||||||
// res self.q_determinant()
|
// res self.q_determinant()
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
|
where DefaultAllocator: Allocator<N, R, C> +
|
||||||
|
Allocator<N, R> +
|
||||||
|
Allocator<N, DimMinimum<R, C>> {
|
||||||
|
|
||||||
|
/// Computes the QR decomposition of this matrix.
|
||||||
|
pub fn qr(self) -> QR<N, R, C> {
|
||||||
|
QR::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ impl<N: Real, D: Dim> RealSchur<N, D>
|
||||||
Self::try_new(m, N::default_epsilon(), 0).unwrap()
|
Self::try_new(m, N::default_epsilon(), 0).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the schur decomposition of a square matrix.
|
/// Attempts to compute the schur decomposition of a square matrix.
|
||||||
///
|
///
|
||||||
/// If only eigenvalues are needed, it is more efficient to call the matrix method
|
/// If only eigenvalues are needed, it is more efficient to call the matrix method
|
||||||
/// `.eigenvalues()` instead.
|
/// `.eigenvalues()` instead.
|
||||||
|
@ -444,6 +444,26 @@ impl<N: Real, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
Allocator<N, DimDiff<D, U1>> + // For Hessenberg.
|
Allocator<N, DimDiff<D, U1>> + // For Hessenberg.
|
||||||
Allocator<N, D, D> +
|
Allocator<N, D, D> +
|
||||||
Allocator<N, D> {
|
Allocator<N, D> {
|
||||||
|
/// Computes the schur decomposition of a square matrix.
|
||||||
|
pub fn real_schur(self) -> RealSchur<N, D> {
|
||||||
|
RealSchur::new(self.into_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to compute the schur decomposition of a square matrix.
|
||||||
|
///
|
||||||
|
/// If only eigenvalues are needed, it is more efficient to call the matrix method
|
||||||
|
/// `.eigenvalues()` instead.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `eps` − tolerence used to determine when a value converged to 0.
|
||||||
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
|
/// continues indefinitely until convergence.
|
||||||
|
pub fn try_real_schur(self, eps: N, max_niter: usize) -> Option<RealSchur<N, D>> {
|
||||||
|
RealSchur::try_new(self.into_owned(), eps, max_niter)
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes the eigenvalues of this matrix.
|
/// Computes the eigenvalues of this matrix.
|
||||||
pub fn eigenvalues(&self) -> Option<VectorN<N, D>> {
|
pub fn eigenvalues(&self) -> Option<VectorN<N, D>> {
|
||||||
assert!(self.is_square(), "Unable to compute eigenvalues of a non-square matrix.");
|
assert!(self.is_square(), "Unable to compute eigenvalues of a non-square matrix.");
|
||||||
|
|
|
@ -45,7 +45,7 @@ impl<N: Real, R: DimMin<C>, C: Dim> SVD<N, R, C>
|
||||||
Self::try_new(matrix, compute_u, compute_v, N::default_epsilon(), 0).unwrap()
|
Self::try_new(matrix, compute_u, compute_v, N::default_epsilon(), 0).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the Singular Value Decomposition of `matrix` using implicit shift.
|
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
|
@ -487,6 +487,25 @@ impl<N: Real, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
|
||||||
Allocator<N, DimMinimum<R, C>, C> +
|
Allocator<N, DimMinimum<R, C>, C> +
|
||||||
Allocator<N, R, DimMinimum<R, C>> +
|
Allocator<N, R, DimMinimum<R, C>> +
|
||||||
Allocator<N, DimMinimum<R, C>> {
|
Allocator<N, DimMinimum<R, C>> {
|
||||||
|
/// Computes the Singular Value Decomposition using implicit shift.
|
||||||
|
pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD<N, R, C> {
|
||||||
|
SVD::new(self.into_owned(), compute_u, compute_v)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `compute_u` − set this to `true` to enable the computation of left-singular vectors.
|
||||||
|
/// * `compute_v` − set this to `true` to enable the computation of left-singular vectors.
|
||||||
|
/// * `eps` − tolerence used to determine when a value converged to 0.
|
||||||
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
|
/// continues indefinitely until convergence.
|
||||||
|
pub fn try_svd(self, compute_u: bool, compute_v: bool, eps: N, max_niter: usize) -> Option<SVD<N, R, C>> {
|
||||||
|
SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter)
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes the singular values of this matrix.
|
/// Computes the singular values of this matrix.
|
||||||
pub fn singular_values(&self) -> VectorN<N, DimMinimum<R, C>> {
|
pub fn singular_values(&self) -> VectorN<N, DimMinimum<R, C>> {
|
||||||
SVD::new(self.clone_owned(), false, false).singular_values
|
SVD::new(self.clone_owned(), false, false).singular_values
|
||||||
|
|
|
@ -267,6 +267,29 @@ impl<N: Real, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
where DefaultAllocator: Allocator<N, D, D> +
|
where DefaultAllocator: Allocator<N, D, D> +
|
||||||
Allocator<N, D> +
|
Allocator<N, D> +
|
||||||
Allocator<N, DimDiff<D, U1>> {
|
Allocator<N, DimDiff<D, U1>> {
|
||||||
|
|
||||||
|
/// Computes the eigendecomposition of this symmetric matrix.
|
||||||
|
///
|
||||||
|
/// Only the lower-triangular part (including the diagonal) of `m` are read.
|
||||||
|
pub fn symmetric_eigen(self) -> SymmetricEigen<N, D> {
|
||||||
|
SymmetricEigen::new(self.into_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the eigendecomposition of the given symmetric matrix with user-specified
|
||||||
|
/// convergence parameters.
|
||||||
|
///
|
||||||
|
/// Only the lower-triangular and diagonal parts of `m` are read.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `eps` − tolerence used to determine when a value converged to 0.
|
||||||
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
|
/// continues indefinitely until convergence.
|
||||||
|
pub fn try_symmetric_eigen(self, eps: N, max_niter: usize) -> Option<SymmetricEigen<N, D>> {
|
||||||
|
SymmetricEigen::try_new(self.into_owned(), eps, max_niter)
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes the eigenvalues of this symmetric matrix.
|
/// Computes the eigenvalues of this symmetric matrix.
|
||||||
///
|
///
|
||||||
/// Only the lower-triangular part of the matrix is read.
|
/// Only the lower-triangular part of the matrix is read.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use alga::general::Real;
|
use alga::general::Real;
|
||||||
use core::{MatrixN, MatrixMN, VectorN, DefaultAllocator};
|
use core::{SquareMatrix, MatrixN, MatrixMN, VectorN, DefaultAllocator};
|
||||||
use dimension::{DimSub, DimDiff, U1};
|
use dimension::{DimSub, DimDiff, U1};
|
||||||
use storage::Storage;
|
use storage::Storage;
|
||||||
use allocator::Allocator;
|
use allocator::Allocator;
|
||||||
|
@ -110,3 +110,15 @@ impl<N: Real, D: DimSub<U1>> SymmetricTridiagonal<N, D>
|
||||||
&q * self.tri * q.transpose()
|
&q * self.tri * q.transpose()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<N: Real, D: DimSub<U1>, S: Storage<N, D, D>> SquareMatrix<N, D, S>
|
||||||
|
where DefaultAllocator: Allocator<N, D, D> +
|
||||||
|
Allocator<N, DimDiff<D, U1>> {
|
||||||
|
|
||||||
|
/// Computes the tridiagonalization of this symmetric matrix.
|
||||||
|
///
|
||||||
|
/// Only the lower-triangular and diagonal parts of `self` are read.
|
||||||
|
pub fn symmetric_tridiagonalize(self) -> SymmetricTridiagonal<N, D> {
|
||||||
|
SymmetricTridiagonal::new(self.into_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use na::{DMatrix, Matrix2, Matrix4, Matrix5x3, Matrix3x5, Bidiagonal};
|
use na::{DMatrix, Matrix2, Matrix4, Matrix5x3, Matrix3x5};
|
||||||
|
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
|
@ -8,7 +8,7 @@ quickcheck! {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let bidiagonal = Bidiagonal::new(m.clone());
|
let bidiagonal = m.clone().bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
println!("{}{}{}", &u, &d, &v_t);
|
println!("{}{}{}", &u, &d, &v_t);
|
||||||
|
@ -18,7 +18,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn bidiagonal_static_5_3(m: Matrix5x3<f64>) -> bool {
|
||||||
let bidiagonal = Bidiagonal::new(m);
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
println!("{}{}{}", &u, &d, &v_t);
|
println!("{}{}{}", &u, &d, &v_t);
|
||||||
|
@ -28,7 +28,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn bidiagonal_static_3_5(m: Matrix3x5<f64>) -> bool {
|
||||||
let bidiagonal = Bidiagonal::new(m);
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
println!("{}{}{}", &u, &d, &v_t);
|
println!("{}{}{}", &u, &d, &v_t);
|
||||||
|
@ -38,7 +38,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_square(m: Matrix4<f64>) -> bool {
|
fn bidiagonal_static_square(m: Matrix4<f64>) -> bool {
|
||||||
let bidiagonal = Bidiagonal::new(m);
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
println!("{}{}{}", &u, &d, &v_t);
|
println!("{}{}{}", &u, &d, &v_t);
|
||||||
|
@ -48,7 +48,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bidiagonal_static_square_2x2(m: Matrix2<f64>) -> bool {
|
fn bidiagonal_static_square_2x2(m: Matrix2<f64>) -> bool {
|
||||||
let bidiagonal = Bidiagonal::new(m);
|
let bidiagonal = m.bidiagonalize();
|
||||||
let (u, d, v_t) = bidiagonal.unpack();
|
let (u, d, v_t) = bidiagonal.unpack();
|
||||||
|
|
||||||
println!("{}{}{}", &u, &d, &v_t);
|
println!("{}{}{}", &u, &d, &v_t);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix4x3, DVector, Vector4, Cholesky};
|
use na::{DMatrix, Matrix4x3, DVector, Vector4};
|
||||||
use na::dimension::U4;
|
use na::dimension::U4;
|
||||||
use na::debug::RandomSDP;
|
use na::debug::RandomSDP;
|
||||||
|
|
||||||
|
@ -11,14 +11,14 @@ quickcheck! {
|
||||||
// Put garbage on the upper triangle to make sure it is not read by the decomposition.
|
// Put garbage on the upper triangle to make sure it is not read by the decomposition.
|
||||||
m.fill_upper_triangle(23.0, 1);
|
m.fill_upper_triangle(23.0, 1);
|
||||||
|
|
||||||
let l = Cholesky::new(m.clone()).unwrap().unpack();
|
let l = m.clone().cholesky().unwrap().unpack();
|
||||||
m.fill_upper_triangle_with_lower_triangle();
|
m.fill_upper_triangle_with_lower_triangle();
|
||||||
relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cholesky_static(m: RandomSDP<f64, U4>) -> bool {
|
fn cholesky_static(m: RandomSDP<f64, U4>) -> bool {
|
||||||
let m = m.unwrap();
|
let m = m.unwrap();
|
||||||
let chol = Cholesky::new(m).unwrap();
|
let chol = m.cholesky().unwrap();
|
||||||
let l = chol.unpack();
|
let l = chol.unpack();
|
||||||
|
|
||||||
if !relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7) {
|
if !relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7) {
|
||||||
|
@ -35,7 +35,7 @@ quickcheck! {
|
||||||
let n = m.nrows();
|
let n = m.nrows();
|
||||||
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
||||||
|
|
||||||
let chol = Cholesky::new(m.clone()).unwrap();
|
let chol = m.clone().cholesky().unwrap();
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::new_random(n);
|
||||||
let b2 = DMatrix::new_random(n, nb);
|
let b2 = DMatrix::new_random(n, nb);
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ quickcheck! {
|
||||||
|
|
||||||
fn cholesky_solve_static(m: RandomSDP<f64, U4>) -> bool {
|
fn cholesky_solve_static(m: RandomSDP<f64, U4>) -> bool {
|
||||||
let m = m.unwrap();
|
let m = m.unwrap();
|
||||||
let chol = Cholesky::new(m).unwrap();
|
let chol = m.clone().cholesky().unwrap();
|
||||||
let b1 = Vector4::new_random();
|
let b1 = Vector4::new_random();
|
||||||
let b2 = Matrix4x3::new_random();
|
let b2 = Matrix4x3::new_random();
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ quickcheck! {
|
||||||
fn cholesky_inverse(m: RandomSDP<f64>) -> bool {
|
fn cholesky_inverse(m: RandomSDP<f64>) -> bool {
|
||||||
let m = m.unwrap();
|
let m = m.unwrap();
|
||||||
|
|
||||||
let m1 = Cholesky::new(m.clone()).unwrap().inverse();
|
let m1 = m.clone().cholesky().unwrap().inverse();
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
let id2 = &m1 * &m;
|
let id2 = &m1 * &m;
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ quickcheck! {
|
||||||
|
|
||||||
fn cholesky_inverse_static(m: RandomSDP<f64, U4>) -> bool {
|
fn cholesky_inverse_static(m: RandomSDP<f64, U4>) -> bool {
|
||||||
let m = m.unwrap();
|
let m = m.unwrap();
|
||||||
let m1 = Cholesky::new(m.clone()).unwrap().inverse();
|
let m1 = m.clone().cholesky().unwrap().inverse();
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
let id2 = &m1 * &m;
|
let id2 = &m1 * &m;
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
use na::{DMatrix, Matrix2, Matrix3, Matrix4, SymmetricEigen};
|
use na::{DMatrix, Matrix2, Matrix3, Matrix4};
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn symmetric_eigen(n: usize) -> bool {
|
fn symmetric_eigen(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 10));
|
let n = cmp::max(1, cmp::min(n, 10));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
let eig = SymmetricEigen::new(m.clone());
|
let eig = m.clone().symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
@ -20,7 +20,7 @@ quickcheck! {
|
||||||
let mut m = DMatrix::<f64>::new_random(n, n);
|
let mut m = DMatrix::<f64>::new_random(n, n);
|
||||||
m.row_mut(n / 2).fill(0.0);
|
m.row_mut(n / 2).fill(0.0);
|
||||||
m.column_mut(n / 2).fill(0.0);
|
m.column_mut(n / 2).fill(0.0);
|
||||||
let eig = SymmetricEigen::new(m.clone());
|
let eig = m.clone().symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
@ -29,7 +29,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symmetric_eigen_static_square_4x4(m: Matrix4<f64>) -> bool {
|
fn symmetric_eigen_static_square_4x4(m: Matrix4<f64>) -> bool {
|
||||||
let eig = SymmetricEigen::new(m);
|
let eig = m.symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
@ -38,7 +38,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symmetric_eigen_static_square_3x3(m: Matrix3<f64>) -> bool {
|
fn symmetric_eigen_static_square_3x3(m: Matrix3<f64>) -> bool {
|
||||||
let eig = SymmetricEigen::new(m);
|
let eig = m.symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
@ -47,7 +47,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symmetric_eigen_static_square_2x2(m: Matrix2<f64>) -> bool {
|
fn symmetric_eigen_static_square_2x2(m: Matrix2<f64>) -> bool {
|
||||||
let eig = SymmetricEigen::new(m);
|
let eig = m.symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
@ -85,7 +85,7 @@ fn symmetric_eigen_singular_24x24() {
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
||||||
|
|
||||||
let eig = SymmetricEigen::new(m.clone());
|
let eig = m.clone().symmetric_eigen();
|
||||||
let recomp = eig.recompose();
|
let recomp = eig.recompose();
|
||||||
|
|
||||||
assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5));
|
assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5));
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix3, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5,
|
use na::{DMatrix, Matrix3, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5,
|
||||||
DVector, Vector4,
|
DVector, Vector4};
|
||||||
FullPivLU};
|
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -11,7 +10,7 @@ fn full_piv_lu_simple() {
|
||||||
-1.0, 2.0, -1.0,
|
-1.0, 2.0, -1.0,
|
||||||
0.0, -1.0, 2.0);
|
0.0, -1.0, 2.0);
|
||||||
|
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
assert_eq!(lu.determinant(), 4.0);
|
assert_eq!(lu.determinant(), 4.0);
|
||||||
|
|
||||||
let (p, l, u, q) = lu.unpack();
|
let (p, l, u, q) = lu.unpack();
|
||||||
|
@ -30,7 +29,7 @@ fn full_piv_lu_simple_with_pivot() {
|
||||||
-1.0, 2.0, -1.0,
|
-1.0, 2.0, -1.0,
|
||||||
2.0, -1.0, 0.0);
|
2.0, -1.0, 0.0);
|
||||||
|
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
assert_eq!(lu.determinant(), -4.0);
|
assert_eq!(lu.determinant(), -4.0);
|
||||||
|
|
||||||
let (p, l, u, q) = lu.unpack();
|
let (p, l, u, q) = lu.unpack();
|
||||||
|
@ -50,7 +49,7 @@ quickcheck! {
|
||||||
m = DMatrix::new_random(1, 1);
|
m = DMatrix::new_random(1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let lu = FullPivLU::new(m.clone());
|
let lu = m.clone().full_piv_lu();
|
||||||
let (p, l, u, q) = lu.unpack();
|
let (p, l, u, q) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -60,7 +59,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn full_piv_lu_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn full_piv_lu_static_3_5(m: Matrix3x5<f64>) -> bool {
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
let (p, l, u, q) = lu.unpack();
|
let (p, l, u, q) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -70,7 +69,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn full_piv_lu_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn full_piv_lu_static_5_3(m: Matrix5x3<f64>) -> bool {
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
let (p, l, u, q) = lu.unpack();
|
let (p, l, u, q) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -80,7 +79,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn full_piv_lu_static_square(m: Matrix4<f64>) -> bool {
|
fn full_piv_lu_static_square(m: Matrix4<f64>) -> bool {
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
let (p, l, u, q) = lu.unpack();
|
let (p, l, u, q) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -95,7 +94,7 @@ quickcheck! {
|
||||||
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
let lu = FullPivLU::new(m.clone());
|
let lu = m.clone().full_piv_lu();
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::new_random(n);
|
||||||
let b2 = DMatrix::new_random(n, nb);
|
let b2 = DMatrix::new_random(n, nb);
|
||||||
|
|
||||||
|
@ -110,7 +109,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn full_piv_lu_solve_static(m: Matrix4<f64>) -> bool {
|
fn full_piv_lu_solve_static(m: Matrix4<f64>) -> bool {
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
let b1 = Vector4::new_random();
|
let b1 = Vector4::new_random();
|
||||||
let b2 = Matrix4x3::new_random();
|
let b2 = Matrix4x3::new_random();
|
||||||
|
|
||||||
|
@ -133,7 +132,7 @@ quickcheck! {
|
||||||
u.fill_diagonal(1.0);
|
u.fill_diagonal(1.0);
|
||||||
let m = l * u;
|
let m = l * u;
|
||||||
|
|
||||||
let m1 = FullPivLU::new(m.clone()).try_inverse().unwrap();
|
let m1 = m.clone().full_piv_lu().try_inverse().unwrap();
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
let id2 = &m1 * &m;
|
let id2 = &m1 * &m;
|
||||||
|
|
||||||
|
@ -141,7 +140,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn full_piv_lu_inverse_static(m: Matrix4<f64>) -> bool {
|
fn full_piv_lu_inverse_static(m: Matrix4<f64>) -> bool {
|
||||||
let lu = FullPivLU::new(m);
|
let lu = m.full_piv_lu();
|
||||||
|
|
||||||
if let Some(m1) = lu.try_inverse() {
|
if let Some(m1) = lu.try_inverse() {
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix2, Matrix4, Hessenberg};
|
use na::{DMatrix, Matrix2, Matrix4};
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn hessenberg_simple() {
|
fn hessenberg_simple() {
|
||||||
let m = Matrix2::new(1.0, 0.0,
|
let m = Matrix2::new(1.0, 0.0,
|
||||||
1.0, 3.0);
|
1.0, 3.0);
|
||||||
let hess = Hessenberg::new(m);
|
let hess = m.hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
assert!(relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7))
|
assert!(relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7))
|
||||||
}
|
}
|
||||||
|
@ -18,19 +18,19 @@ quickcheck! {
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
let hess = Hessenberg::new(m.clone());
|
let hess = m.clone().hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hessenberg_static_mat2(m: Matrix2<f64>) -> bool {
|
fn hessenberg_static_mat2(m: Matrix2<f64>) -> bool {
|
||||||
let hess = Hessenberg::new(m);
|
let hess = m.hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hessenberg_static(m: Matrix4<f64>) -> bool {
|
fn hessenberg_static(m: Matrix4<f64>) -> bool {
|
||||||
let hess = Hessenberg::new(m);
|
let hess = m.hessenberg();
|
||||||
let (p, h) = hess.unpack();
|
let (p, h) = hess.unpack();
|
||||||
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix3, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5,
|
use na::{DMatrix, Matrix3, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5,
|
||||||
DVector, Vector4,
|
DVector, Vector4};
|
||||||
LU};
|
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -11,7 +10,7 @@ fn lu_simple() {
|
||||||
-1.0, 2.0, -1.0,
|
-1.0, 2.0, -1.0,
|
||||||
0.0, -1.0, 2.0);
|
0.0, -1.0, 2.0);
|
||||||
|
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
assert_eq!(lu.determinant(), 4.0);
|
assert_eq!(lu.determinant(), 4.0);
|
||||||
|
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
|
@ -29,7 +28,7 @@ fn lu_simple_with_pivot() {
|
||||||
-1.0, 2.0, -1.0,
|
-1.0, 2.0, -1.0,
|
||||||
2.0, -1.0, 0.0);
|
2.0, -1.0, 0.0);
|
||||||
|
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
assert_eq!(lu.determinant(), -4.0);
|
assert_eq!(lu.determinant(), -4.0);
|
||||||
|
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
|
@ -48,7 +47,7 @@ quickcheck! {
|
||||||
m = DMatrix::new_random(1, 1);
|
m = DMatrix::new_random(1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let lu = LU::new(m.clone());
|
let lu = m.clone().lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -57,7 +56,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn lu_static_3_5(m: Matrix3x5<f64>) -> bool {
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -66,7 +65,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn lu_static_5_3(m: Matrix5x3<f64>) -> bool {
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -75,7 +74,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_static_square(m: Matrix4<f64>) -> bool {
|
fn lu_static_square(m: Matrix4<f64>) -> bool {
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
let (p, l, u) = lu.unpack();
|
let (p, l, u) = lu.unpack();
|
||||||
let mut lu = l * u;
|
let mut lu = l * u;
|
||||||
p.inv_permute_rows(&mut lu);
|
p.inv_permute_rows(&mut lu);
|
||||||
|
@ -89,7 +88,7 @@ quickcheck! {
|
||||||
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
let lu = LU::new(m.clone());
|
let lu = m.clone().lu();
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::new_random(n);
|
||||||
let b2 = DMatrix::new_random(n, nb);
|
let b2 = DMatrix::new_random(n, nb);
|
||||||
|
|
||||||
|
@ -104,7 +103,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_solve_static(m: Matrix4<f64>) -> bool {
|
fn lu_solve_static(m: Matrix4<f64>) -> bool {
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
let b1 = Vector4::new_random();
|
let b1 = Vector4::new_random();
|
||||||
let b2 = Matrix4x3::new_random();
|
let b2 = Matrix4x3::new_random();
|
||||||
|
|
||||||
|
@ -127,7 +126,7 @@ quickcheck! {
|
||||||
u.fill_diagonal(1.0);
|
u.fill_diagonal(1.0);
|
||||||
let m = l * u;
|
let m = l * u;
|
||||||
|
|
||||||
let m1 = LU::new(m.clone()).try_inverse().unwrap();
|
let m1 = m.clone().lu().try_inverse().unwrap();
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
let id2 = &m1 * &m;
|
let id2 = &m1 * &m;
|
||||||
|
|
||||||
|
@ -135,7 +134,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lu_inverse_static(m: Matrix4<f64>) -> bool {
|
fn lu_inverse_static(m: Matrix4<f64>) -> bool {
|
||||||
let lu = LU::new(m);
|
let lu = m.lu();
|
||||||
|
|
||||||
if let Some(m1) = lu.try_inverse() {
|
if let Some(m1) = lu.try_inverse() {
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5,
|
use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5,
|
||||||
DVector, Vector4, QR};
|
DVector, Vector4};
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn qr(m: DMatrix<f64>) -> bool {
|
fn qr(m: DMatrix<f64>) -> bool {
|
||||||
let qr = QR::new(m.clone());
|
let qr = m.clone().qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn qr_static_5_3(m: Matrix5x3<f64>) -> bool {
|
||||||
let qr = QR::new(m);
|
let qr = m.qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn qr_static_3_5(m: Matrix3x5<f64>) -> bool {
|
||||||
let qr = QR::new(m);
|
let qr = m.qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_static_square(m: Matrix4<f64>) -> bool {
|
fn qr_static_square(m: Matrix4<f64>) -> bool {
|
||||||
let qr = QR::new(m);
|
let qr = m.qr();
|
||||||
let q = qr.q();
|
let q = qr.q();
|
||||||
let r = qr.r();
|
let r = qr.r();
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ quickcheck! {
|
||||||
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
let qr = QR::new(m.clone());
|
let qr = m.clone().qr();
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::new_random(n);
|
||||||
let b2 = DMatrix::new_random(n, nb);
|
let b2 = DMatrix::new_random(n, nb);
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_solve_static(m: Matrix4<f64>) -> bool {
|
fn qr_solve_static(m: Matrix4<f64>) -> bool {
|
||||||
let qr = QR::new(m);
|
let qr = m.qr();
|
||||||
let b1 = Vector4::new_random();
|
let b1 = Vector4::new_random();
|
||||||
let b2 = Matrix4x3::new_random();
|
let b2 = Matrix4x3::new_random();
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ quickcheck! {
|
||||||
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
|
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
if let Some(m1) = QR::new(m.clone()).try_inverse() {
|
if let Some(m1) = m.clone().qr().try_inverse() {
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
let id2 = &m1 * &m;
|
let id2 = &m1 * &m;
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn qr_inverse_static(m: Matrix4<f64>) -> bool {
|
fn qr_inverse_static(m: Matrix4<f64>) -> bool {
|
||||||
let qr = QR::new(m);
|
let qr = m.qr();
|
||||||
|
|
||||||
if let Some(m1) = qr.try_inverse() {
|
if let Some(m1) = qr.try_inverse() {
|
||||||
let id1 = &m * &m1;
|
let id1 = &m * &m1;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use na::{DMatrix, Matrix2, Matrix3, Matrix4, RealSchur};
|
use na::{DMatrix, Matrix2, Matrix3, Matrix4};
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -8,7 +8,7 @@ fn schur_simpl_mat3() {
|
||||||
-2.0, 1.0, 2.0,
|
-2.0, 1.0, 2.0,
|
||||||
4.0, 2.0, 5.0);
|
4.0, 2.0, 5.0);
|
||||||
|
|
||||||
let schur = RealSchur::new(m);
|
let schur = m.real_schur();
|
||||||
let (vecs, vals) = schur.unpack();
|
let (vecs, vals) = schur.unpack();
|
||||||
|
|
||||||
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7));
|
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7));
|
||||||
|
@ -20,7 +20,7 @@ quickcheck! {
|
||||||
let n = cmp::max(1, cmp::min(n, 10));
|
let n = cmp::max(1, cmp::min(n, 10));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
|
|
||||||
if !relative_eq!(&vecs * &vals * vecs.transpose(), m, epsilon = 1.0e-7) {
|
if !relative_eq!(&vecs * &vals * vecs.transpose(), m, epsilon = 1.0e-7) {
|
||||||
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.transpose());
|
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.transpose());
|
||||||
|
@ -30,7 +30,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn schur_static_mat2(m: Matrix2<f64>) -> bool {
|
fn schur_static_mat2(m: Matrix2<f64>) -> bool {
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
|
|
||||||
let ok = relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7);
|
let ok = relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7);
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -41,7 +41,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn schur_static_mat3(m: Matrix3<f64>) -> bool {
|
fn schur_static_mat3(m: Matrix3<f64>) -> bool {
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
|
|
||||||
let ok = relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7);
|
let ok = relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7);
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -51,7 +51,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn schur_static_mat4(m: Matrix4<f64>) -> bool {
|
fn schur_static_mat4(m: Matrix4<f64>) -> bool {
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
|
|
||||||
let ok = relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7);
|
let ok = relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7);
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -69,7 +69,7 @@ fn schur_static_mat4_fail() {
|
||||||
-94.61793793643038, -18.64216213611094, 88.32376703241675, -99.30169870309795,
|
-94.61793793643038, -18.64216213611094, 88.32376703241675, -99.30169870309795,
|
||||||
90.62661897246733, 96.74200696130146, 34.7421322611369, 84.86773307198098);
|
90.62661897246733, 96.74200696130146, 34.7421322611369, 84.86773307198098);
|
||||||
|
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
||||||
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ fn schur_static_mat4_fail2() {
|
||||||
27.932377940728202, 82.94220150938, -35.5898884705951, 67.56447552434219,
|
27.932377940728202, 82.94220150938, -35.5898884705951, 67.56447552434219,
|
||||||
55.66754906908682, -42.14328890569226, -20.684709585152206, -87.9456949841046);
|
55.66754906908682, -42.14328890569226, -20.684709585152206, -87.9456949841046);
|
||||||
|
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
||||||
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ fn schur_static_mat3_fail() {
|
||||||
-7.525423104386547, -17.827350599642287, 11.297377444555849,
|
-7.525423104386547, -17.827350599642287, 11.297377444555849,
|
||||||
38.080736654870464, -84.27428302131528, -95.88198590331922);
|
38.080736654870464, -84.27428302131528, -95.88198590331922);
|
||||||
|
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
||||||
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ fn schur_singular() {
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
||||||
|
|
||||||
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
|
let (vecs, vals) = m.clone().real_schur().unpack();
|
||||||
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
|
||||||
assert!(relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
assert!(relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,14 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
use na::{DMatrix, Matrix2, Matrix3, Matrix4, Matrix6, Matrix5x2, Matrix5x3, Matrix2x5, Matrix3x5,
|
use na::{DMatrix, Matrix2, Matrix3, Matrix4, Matrix6, Matrix5x2, Matrix5x3, Matrix2x5, Matrix3x5,
|
||||||
DVector,
|
DVector};
|
||||||
SVD};
|
|
||||||
|
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
quickcheck! {
|
quickcheck! {
|
||||||
fn svd(m: DMatrix<f64>) -> bool {
|
fn svd(m: DMatrix<f64>) -> bool {
|
||||||
if m.len() > 0 {
|
if m.len() > 0 {
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
let recomp_m = svd.clone().recompose();
|
let recomp_m = svd.clone().recompose();
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = DMatrix::from_diagonal(&s);
|
let ds = DMatrix::from_diagonal(&s);
|
||||||
|
@ -26,7 +25,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn svd_static_5_3(m: Matrix5x3<f64>) -> bool {
|
fn svd_static_5_3(m: Matrix5x3<f64>) -> bool {
|
||||||
let svd = SVD::new(m, true, true);
|
let svd = m.svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = Matrix3::from_diagonal(&s);
|
let ds = Matrix3::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -37,7 +36,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn svd_static_5_2(m: Matrix5x2<f64>) -> bool {
|
fn svd_static_5_2(m: Matrix5x2<f64>) -> bool {
|
||||||
let svd = SVD::new(m, true, true);
|
let svd = m.svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = Matrix2::from_diagonal(&s);
|
let ds = Matrix2::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -48,7 +47,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn svd_static_3_5(m: Matrix3x5<f64>) -> bool {
|
fn svd_static_3_5(m: Matrix3x5<f64>) -> bool {
|
||||||
let svd = SVD::new(m, true, true);
|
let svd = m.svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
|
|
||||||
let ds = Matrix3::from_diagonal(&s);
|
let ds = Matrix3::from_diagonal(&s);
|
||||||
|
@ -58,7 +57,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn svd_static_2_5(m: Matrix2x5<f64>) -> bool {
|
fn svd_static_2_5(m: Matrix2x5<f64>) -> bool {
|
||||||
let svd = SVD::new(m, true, true);
|
let svd = m.svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = Matrix2::from_diagonal(&s);
|
let ds = Matrix2::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -67,7 +66,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn svd_static_square(m: Matrix4<f64>) -> bool {
|
fn svd_static_square(m: Matrix4<f64>) -> bool {
|
||||||
let svd = SVD::new(m, true, true);
|
let svd = m.svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = Matrix4::from_diagonal(&s);
|
let ds = Matrix4::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -78,7 +77,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn svd_static_square_2x2(m: Matrix2<f64>) -> bool {
|
fn svd_static_square_2x2(m: Matrix2<f64>) -> bool {
|
||||||
let svd = SVD::new(m, true, true);
|
let svd = m.svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = Matrix2::from_diagonal(&s);
|
let ds = Matrix2::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -90,7 +89,7 @@ quickcheck! {
|
||||||
|
|
||||||
fn svd_pseudo_inverse(m: DMatrix<f64>) -> bool {
|
fn svd_pseudo_inverse(m: DMatrix<f64>) -> bool {
|
||||||
if m.len() > 0 {
|
if m.len() > 0 {
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
let pinv = svd.pseudo_inverse(1.0e-10);
|
let pinv = svd.pseudo_inverse(1.0e-10);
|
||||||
|
|
||||||
if m.nrows() > m.ncols() {
|
if m.nrows() > m.ncols() {
|
||||||
|
@ -112,7 +111,7 @@ quickcheck! {
|
||||||
let nb = cmp::min(nb, 10);
|
let nb = cmp::min(nb, 10);
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
|
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
|
|
||||||
if svd.rank(1.0e-7) == n {
|
if svd.rank(1.0e-7) == n {
|
||||||
let b1 = DVector::new_random(n);
|
let b1 = DVector::new_random(n);
|
||||||
|
@ -169,7 +168,7 @@ fn svd_singular() {
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
||||||
|
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = DMatrix::from_diagonal(&s);
|
let ds = DMatrix::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -212,7 +211,7 @@ fn svd_singular_vertical() {
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
||||||
|
|
||||||
|
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = DMatrix::from_diagonal(&s);
|
let ds = DMatrix::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -249,7 +248,7 @@ fn svd_singular_horizontal() {
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
|
||||||
|
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
|
||||||
let ds = DMatrix::from_diagonal(&s);
|
let ds = DMatrix::from_diagonal(&s);
|
||||||
|
|
||||||
|
@ -261,22 +260,22 @@ fn svd_singular_horizontal() {
|
||||||
#[test]
|
#[test]
|
||||||
fn svd_zeros() {
|
fn svd_zeros() {
|
||||||
let m = DMatrix::from_element(10, 10, 0.0);
|
let m = DMatrix::from_element(10, 10, 0.0);
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
assert_eq!(m, svd.recompose());
|
assert_eq!(m, svd.recompose());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn svd_identity() {
|
fn svd_identity() {
|
||||||
let m = DMatrix::<f64>::identity(10, 10);
|
let m = DMatrix::<f64>::identity(10, 10);
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
assert_eq!(m, svd.recompose());
|
assert_eq!(m, svd.recompose());
|
||||||
|
|
||||||
let m = DMatrix::<f64>::identity(10, 15);
|
let m = DMatrix::<f64>::identity(10, 15);
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
assert_eq!(m, svd.recompose());
|
assert_eq!(m, svd.recompose());
|
||||||
|
|
||||||
let m = DMatrix::<f64>::identity(15, 10);
|
let m = DMatrix::<f64>::identity(15, 10);
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
assert_eq!(m, svd.recompose());
|
assert_eq!(m, svd.recompose());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,7 +292,7 @@ fn svd_with_delimited_subproblem() {
|
||||||
m[(7,7)] = 14.0; m[(3,8)] = 13.0;
|
m[(7,7)] = 14.0; m[(3,8)] = 13.0;
|
||||||
m[(8,8)] = 16.0; m[(3,9)] = 17.0;
|
m[(8,8)] = 16.0; m[(3,9)] = 17.0;
|
||||||
m[(9,9)] = 18.0;
|
m[(9,9)] = 18.0;
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
assert!(relative_eq!(m, svd.recompose(), epsilon = 1.0e-7));
|
assert!(relative_eq!(m, svd.recompose(), epsilon = 1.0e-7));
|
||||||
|
|
||||||
// Rectangular versions.
|
// Rectangular versions.
|
||||||
|
@ -308,10 +307,10 @@ fn svd_with_delimited_subproblem() {
|
||||||
m[(7,7)] = 14.0; m[(3,8)] = 13.0;
|
m[(7,7)] = 14.0; m[(3,8)] = 13.0;
|
||||||
m[(8,8)] = 16.0; m[(3,9)] = 17.0;
|
m[(8,8)] = 16.0; m[(3,9)] = 17.0;
|
||||||
m[(9,9)] = 18.0;
|
m[(9,9)] = 18.0;
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
assert!(relative_eq!(m, svd.recompose(), epsilon = 1.0e-7));
|
assert!(relative_eq!(m, svd.recompose(), epsilon = 1.0e-7));
|
||||||
|
|
||||||
let svd = SVD::new(m.transpose(), true, true);
|
let svd = m.transpose().svd(true, true);
|
||||||
assert!(relative_eq!(m.transpose(), svd.recompose(), epsilon = 1.0e-7));
|
assert!(relative_eq!(m.transpose(), svd.recompose(), epsilon = 1.0e-7));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,7 +323,7 @@ fn svd_fail() {
|
||||||
0.07311092531259344, 0.5579247949052946, 0.14518764691585773, 0.03502980663114896, 0.7991329455957719, 0.4929930019965745,
|
0.07311092531259344, 0.5579247949052946, 0.14518764691585773, 0.03502980663114896, 0.7991329455957719, 0.4929930019965745,
|
||||||
0.12293810556077789, 0.6617084679545999, 0.9002240700227326, 0.027153062135304884, 0.3630189466989524, 0.18207502727558866,
|
0.12293810556077789, 0.6617084679545999, 0.9002240700227326, 0.027153062135304884, 0.3630189466989524, 0.18207502727558866,
|
||||||
0.843196731466686, 0.08951878746549924, 0.7533450877576973, 0.009558876499740077, 0.9429679490873482, 0.9355764454129878);
|
0.843196731466686, 0.08951878746549924, 0.7533450877576973, 0.009558876499740077, 0.9429679490873482, 0.9355764454129878);
|
||||||
let svd = SVD::new(m.clone(), true, true);
|
let svd = m.clone().svd(true, true);
|
||||||
println!("Singular values: {}", svd.singular_values);
|
println!("Singular values: {}", svd.singular_values);
|
||||||
println!("u: {:.5}", svd.u.unwrap());
|
println!("u: {:.5}", svd.u.unwrap());
|
||||||
println!("v: {:.5}", svd.v_t.unwrap());
|
println!("v: {:.5}", svd.v_t.unwrap());
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
|
|
||||||
use na::{DMatrix, Matrix2, Matrix4, SymmetricTridiagonal};
|
use na::{DMatrix, Matrix2, Matrix4};
|
||||||
|
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary")]
|
#[cfg(feature = "arbitrary")]
|
||||||
|
@ -8,7 +8,7 @@ quickcheck! {
|
||||||
fn symm_tridiagonal(n: usize) -> bool {
|
fn symm_tridiagonal(n: usize) -> bool {
|
||||||
let n = cmp::max(1, cmp::min(n, 50));
|
let n = cmp::max(1, cmp::min(n, 50));
|
||||||
let m = DMatrix::<f64>::new_random(n, n);
|
let m = DMatrix::<f64>::new_random(n, n);
|
||||||
let tri = SymmetricTridiagonal::new(m.clone());
|
let tri = m.clone().symmetric_tridiagonalize();
|
||||||
let recomp = tri.recompose();
|
let recomp = tri.recompose();
|
||||||
|
|
||||||
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
|
||||||
|
@ -17,7 +17,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symm_tridiagonal_static_square(m: Matrix4<f64>) -> bool {
|
fn symm_tridiagonal_static_square(m: Matrix4<f64>) -> bool {
|
||||||
let tri = SymmetricTridiagonal::new(m);
|
let tri = m.symmetric_tridiagonalize();
|
||||||
println!("{}{}", tri.internal_tri(), tri.off_diagonal());
|
println!("{}{}", tri.internal_tri(), tri.off_diagonal());
|
||||||
let recomp = tri.recompose();
|
let recomp = tri.recompose();
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ quickcheck! {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn symm_tridiagonal_static_square_2x2(m: Matrix2<f64>) -> bool {
|
fn symm_tridiagonal_static_square_2x2(m: Matrix2<f64>) -> bool {
|
||||||
let tri = SymmetricTridiagonal::new(m);
|
let tri = m.symmetric_tridiagonalize();
|
||||||
let recomp = tri.recompose();
|
let recomp = tri.recompose();
|
||||||
|
|
||||||
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
|
||||||
|
|
Loading…
Reference in New Issue