From 1001e8ee0f7500e6274e19d2908a4c3497172d8e Mon Sep 17 00:00:00 2001 From: sebcrozet Date: Sat, 23 Mar 2019 11:46:56 +0100 Subject: [PATCH] Cleanup warnings and rename Schur -> RealSchur --- benches/linalg/schur.rs | 10 +- nalgebra-lapack/src/lib.rs | 2 +- nalgebra-lapack/src/schur.rs | 14 +- nalgebra-lapack/tests/linalg/mod.rs | 2 +- nalgebra-lapack/tests/linalg/real_schur.rs | 6 +- src/base/blas.rs | 6 +- src/base/matrix.rs | 13 +- src/base/matrix_alga.rs | 2 +- src/base/norm.rs | 5 +- src/base/unit.rs | 2 - src/debug/random_orthogonal.rs | 17 +- src/debug/random_sdp.rs | 13 +- src/lib.rs | 2 +- src/linalg/cholesky.rs | 26 +- src/linalg/eigen.rs | 4 +- src/linalg/givens.rs | 4 +- src/linalg/schur.rs | 32 +-- src/linalg/svd.rs | 5 +- src/linalg/symmetric_eigen.rs | 13 +- src/linalg/symmetric_tridiagonal.rs | 5 +- tests/core/abomonation.rs | 2 +- tests/linalg/bidiagonal.rs | 123 +++++----- tests/linalg/cholesky.rs | 148 ++++++------ tests/linalg/eigen.rs | 111 +++++---- tests/linalg/full_piv_lu.rs | 192 ++++++++------- tests/linalg/hessenberg.rs | 59 +++-- tests/linalg/lu.rs | 1 + tests/linalg/mod.rs | 2 +- tests/linalg/qr.rs | 2 +- tests/linalg/{real_schur.rs => schur.rs} | 119 ++++----- tests/linalg/solve.rs | 110 +++++---- tests/linalg/svd.rs | 266 ++++++++++----------- tests/linalg/tridiagonal.rs | 87 ++++--- 33 files changed, 714 insertions(+), 691 deletions(-) rename tests/linalg/{real_schur.rs => schur.rs} (65%) diff --git a/benches/linalg/schur.rs b/benches/linalg/schur.rs index e0e588ac..ce0b741f 100644 --- a/benches/linalg/schur.rs +++ b/benches/linalg/schur.rs @@ -1,28 +1,28 @@ -use na::{Matrix4, RealSchur}; +use na::{Matrix4, Schur}; use test::{self, Bencher}; #[bench] fn schur_decompose_4x4(bh: &mut Bencher) { let m = Matrix4::::new_random(); - bh.iter(|| test::black_box(RealSchur::new(m.clone()))) + bh.iter(|| test::black_box(Schur::new(m.clone()))) } #[bench] fn schur_decompose_10x10(bh: &mut Bencher) { let m = ::reproductible_dmatrix(10, 10); - bh.iter(|| test::black_box(RealSchur::new(m.clone()))) + bh.iter(|| test::black_box(Schur::new(m.clone()))) } #[bench] fn schur_decompose_100x100(bh: &mut Bencher) { let m = ::reproductible_dmatrix(100, 100); - bh.iter(|| test::black_box(RealSchur::new(m.clone()))) + bh.iter(|| test::black_box(Schur::new(m.clone()))) } #[bench] fn schur_decompose_200x200(bh: &mut Bencher) { let m = ::reproductible_dmatrix(200, 200); - bh.iter(|| test::black_box(RealSchur::new(m.clone()))) + bh.iter(|| test::black_box(Schur::new(m.clone()))) } #[bench] diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs index c343ba83..a1a7b7b7 100644 --- a/nalgebra-lapack/src/lib.rs +++ b/nalgebra-lapack/src/lib.rs @@ -98,7 +98,7 @@ pub use self::eigen::Eigen; pub use self::hessenberg::Hessenberg; pub use self::lu::{LUScalar, LU}; pub use self::qr::QR; -pub use self::schur::RealSchur; +pub use self::schur::Schur; pub use self::svd::SVD; pub use self::symmetric_eigen::SymmetricEigen; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index a09b31ff..ab2423cb 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -33,7 +33,7 @@ use lapack; )) )] #[derive(Clone, Debug)] -pub struct RealSchur +pub struct Schur where DefaultAllocator: Allocator + Allocator { re: VectorN, @@ -42,21 +42,21 @@ where DefaultAllocator: Allocator + Allocator q: MatrixN, } -impl Copy for RealSchur +impl Copy for Schur where DefaultAllocator: Allocator + Allocator, MatrixN: Copy, VectorN: Copy, {} -impl RealSchur +impl Schur where DefaultAllocator: Allocator + Allocator { /// Computes the eigenvalues and real Schur form of the matrix `m`. /// /// Panics if the method did not converge. pub fn new(m: MatrixN) -> Self { - Self::try_new(m).expect("RealSchur decomposition: convergence failed.") + Self::try_new(m).expect("Schur decomposition: convergence failed.") } /// Computes the eigenvalues and real Schur form of the matrix `m`. @@ -118,7 +118,7 @@ where DefaultAllocator: Allocator + Allocator ); lapack_check!(info); - Some(RealSchur { + Some(Schur { re: wr, im: wi, t: m, @@ -162,7 +162,7 @@ where DefaultAllocator: Allocator + Allocator * */ /// Trait implemented by scalars for which Lapack implements the Real Schur decomposition. -pub trait RealSchurScalar: Scalar { +pub trait SchurScalar: Scalar { #[allow(missing_docs)] fn xgees( jobvs: u8, @@ -202,7 +202,7 @@ pub trait RealSchurScalar: Scalar { macro_rules! real_eigensystem_scalar_impl ( ($N: ty, $xgees: path) => ( - impl RealSchurScalar for $N { + impl SchurScalar for $N { #[inline] fn xgees(jobvs: u8, sort: u8, diff --git a/nalgebra-lapack/tests/linalg/mod.rs b/nalgebra-lapack/tests/linalg/mod.rs index ba228308..a6742217 100644 --- a/nalgebra-lapack/tests/linalg/mod.rs +++ b/nalgebra-lapack/tests/linalg/mod.rs @@ -2,6 +2,6 @@ mod cholesky; mod lu; mod qr; mod real_eigensystem; -mod real_schur; +mod schur; mod svd; mod symmetric_eigen; diff --git a/nalgebra-lapack/tests/linalg/real_schur.rs b/nalgebra-lapack/tests/linalg/real_schur.rs index 127107dd..ccdb0f0b 100644 --- a/nalgebra-lapack/tests/linalg/real_schur.rs +++ b/nalgebra-lapack/tests/linalg/real_schur.rs @@ -1,5 +1,5 @@ use na::{DMatrix, Matrix4}; -use nl::RealSchur; +use nl::Schur; use std::cmp; quickcheck! { @@ -7,13 +7,13 @@ quickcheck! { let n = cmp::max(1, cmp::min(n, 10)); let m = DMatrix::::new_random(n, n); - let (vecs, vals) = RealSchur::new(m.clone()).unpack(); + let (vecs, vals) = Schur::new(m.clone()).unpack(); relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7) } fn schur_static(m: Matrix4) -> bool { - let (vecs, vals) = RealSchur::new(m.clone()).unpack(); + let (vecs, vals) = Schur::new(m.clone()).unpack(); relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7) } diff --git a/src/base/blas.rs b/src/base/blas.rs index af20abae..7dce36b6 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -21,6 +21,8 @@ impl> Vector { /// # Examples: /// /// ``` + /// # extern crate num_complex; + /// # extern crate nalgebra; /// # use num_complex::Complex; /// # use nalgebra::Vector3; /// let vec = Vector3::new(Complex::new(11.0, 3.0), Complex::new(-15.0, 0.0), Complex::new(13.0, 5.0)); @@ -197,11 +199,13 @@ impl> Matrix { /// # Examples: /// /// ``` + /// # extern crate num_complex; + /// # extern crate nalgebra; /// # use num_complex::Complex; /// # use nalgebra::Matrix2x3; /// let mat = Matrix2x3::new(Complex::new(11.0, 1.0), Complex::new(-12.0, 2.0), Complex::new(13.0, 3.0), /// Complex::new(21.0, 43.0), Complex::new(22.0, 5.0), Complex::new(-23.0, 0.0)); - /// assert_eq!(mat.iamax_full(), (1, 0)); + /// assert_eq!(mat.icamax_full(), (1, 0)); /// ``` #[inline] pub fn icamax_full(&self) -> (usize, usize) { diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 73fe3fa4..8f6c071a 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -773,7 +773,7 @@ impl> Matrix { // FIXME: rename `apply` to `apply_mut` and `apply_into` to `apply`? /// Returns `self` with each of its components replaced by the result of a closure `f` applied on it. #[inline] - pub fn apply_into N>(mut self, mut f: F) -> Self{ + pub fn apply_into N>(mut self, f: F) -> Self{ self.apply(f); self } @@ -1093,14 +1093,11 @@ impl> SquareMatrix { pub fn hermitian_part(&self) -> MatrixMN where DefaultAllocator: Allocator { assert!(self.is_square(), "Cannot compute the hermitian part of a non-square matrix."); - let nrows = self.data.shape().0; - unsafe { - let mut tr = self.conjugate_transpose(); - tr += self; - tr *= ::convert::<_, N>(0.5); - tr - } + let mut tr = self.conjugate_transpose(); + tr += self; + tr *= ::convert::<_, N>(0.5); + tr } } diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index 35db628b..a2e1dd54 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -7,7 +7,7 @@ use alga::general::{ AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul, ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module, - Multiplicative, Real, RingCommutative, Complex + Multiplicative, RingCommutative, Complex }; use alga::linear::{ FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace, diff --git a/src/base/norm.rs b/src/base/norm.rs index 1b814d74..b3056067 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -1,8 +1,7 @@ -use num::{Signed, Zero}; -use std::cmp::PartialOrd; +use num::Zero; use allocator::Allocator; -use ::{Real, Complex, Scalar}; +use ::{Real, Complex}; use storage::{Storage, StorageMut}; use base::{DefaultAllocator, Matrix, Dim, MatrixMN}; use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint}; diff --git a/src/base/unit.rs b/src/base/unit.rs index b57ca3d3..87a428b5 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -13,8 +13,6 @@ use abomonation::Abomonation; use alga::general::{SubsetOf, Complex}; use alga::linear::NormedSpace; -use ::Real; - /// A wrapper that ensures the underlying algebraic entity has a unit norm. /// /// Use `.as_ref()` or `.into_inner()` to obtain the underlying value by-reference or by-move. diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index 6d393a24..94035109 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -3,22 +3,22 @@ use base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; -use alga::general::Real; +use alga::general::Complex; +use base::Scalar; use base::allocator::Allocator; use base::dimension::{Dim, Dynamic, U2}; use base::{DefaultAllocator, MatrixN}; -use geometry::UnitComplex; -use num_complex::Complex; +use linalg::givens::GivensRotation; /// A random orthogonal matrix. #[derive(Clone, Debug)] -pub struct RandomOrthogonal +pub struct RandomOrthogonal where DefaultAllocator: Allocator { m: MatrixN, } -impl RandomOrthogonal +impl RandomOrthogonal where DefaultAllocator: Allocator { /// Retrieve the generated matrix. @@ -30,10 +30,9 @@ where DefaultAllocator: Allocator pub fn new N>(dim: D, mut rand: Rand) -> Self { let mut res = MatrixN::identity_generic(dim, dim); - // Create an orthogonal matrix by compositing planar 2D rotations. + // Create an orthogonal matrix by composing random Givens rotations rotations. for i in 0..dim.value() - 1 { - let c = Complex::new(rand(), rand()); - let rot: UnitComplex = UnitComplex::from_complex(c); + let rot = GivensRotation::new(rand(), rand()).0; rot.rotate(&mut res.fixed_rows_mut::(i)); } @@ -42,7 +41,7 @@ where DefaultAllocator: Allocator } #[cfg(feature = "arbitrary")] -impl Arbitrary for RandomOrthogonal +impl Arbitrary for RandomOrthogonal where DefaultAllocator: Allocator, Owned: Clone + Send, diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index 7835c775..772eccc9 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -3,7 +3,8 @@ use base::storage::Owned; #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; -use alga::general::Real; +use alga::general::Complex; +use base::Scalar; use base::allocator::Allocator; use base::dimension::{Dim, Dynamic}; use base::{DefaultAllocator, MatrixN}; @@ -12,13 +13,13 @@ use debug::RandomOrthogonal; /// A random, well-conditioned, symmetric definite-positive matrix. #[derive(Clone, Debug)] -pub struct RandomSDP +pub struct RandomSDP where DefaultAllocator: Allocator { m: MatrixN, } -impl RandomSDP +impl RandomSDP where DefaultAllocator: Allocator { /// Retrieve the generated matrix. @@ -30,11 +31,11 @@ where DefaultAllocator: Allocator /// random reals generators. pub fn new N>(dim: D, mut rand: Rand) -> Self { let mut m = RandomOrthogonal::new(dim, || rand()).unwrap(); - let mt = m.transpose(); + let mt = m.conjugate_transpose(); for i in 0..dim.value() { let mut col = m.column_mut(i); - let eigenval = N::one() + rand().abs(); + let eigenval = N::one() + N::from_real(rand().modulus()); col *= eigenval; } @@ -43,7 +44,7 @@ where DefaultAllocator: Allocator } #[cfg(feature = "arbitrary")] -impl Arbitrary for RandomSDP +impl Arbitrary for RandomSDP where DefaultAllocator: Allocator, Owned: Clone + Send, diff --git a/src/lib.rs b/src/lib.rs index 282661b0..a718cf91 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -66,7 +66,7 @@ an optimized set of tools for computer graphics and physics. Those features incl * General transformations that does not have to be invertible, stored as an homogeneous matrix: `Transform2`, `Transform3`. * 3D projections for computer graphics: `Perspective3`, `Orthographic3`. -* Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `RealSchur`, `Hessenberg`, `SymmetricEigen`. +* Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `Schur`, `Hessenberg`, `SymmetricEigen`. * Insertion and removal of rows of columns of a matrix. * Implements traits from the [alga](https://crates.io/crates/alga) crate for generic programming. diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index 96f533e4..682bd990 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -1,7 +1,6 @@ #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; -use num::Zero; use alga::general::Complex; use allocator::Allocator; @@ -59,21 +58,24 @@ where DefaultAllocator: Allocator let mut col_j = col_j.rows_range_mut(j..); let col_k = col_k.rows_range(j..); - col_j.axpy(factor, &col_k, N::one()); + col_j.axpy(factor.conjugate(), &col_k, N::one()); } let diag = unsafe { *matrix.get_unchecked((j, j)) }; - if diag.real() > N::Real::zero() { - let denom = diag.sqrt(); - unsafe { - *matrix.get_unchecked_mut((j, j)) = denom; - } + if !diag.is_zero() { + if let Some(denom) = diag.try_sqrt() { + unsafe { + *matrix.get_unchecked_mut((j, j)) = denom; + } - let mut col = matrix.slice_range_mut(j + 1.., j); - col /= denom; - } else { - return None; + let mut col = matrix.slice_range_mut(j + 1.., j); + col /= denom; + + continue; + } } + + return None; } Some(Cholesky { chol: matrix }) @@ -119,7 +121,7 @@ where DefaultAllocator: Allocator ShapeConstraint: SameNumberOfRows, { let _ = self.chol.solve_lower_triangular_mut(b); - let _ = self.chol.tr_solve_lower_triangular_mut(b); + let _ = self.chol.conjugate().tr_solve_lower_triangular_mut(b); } /// Returns the solution of the system `self * x = b` where `self` is the decomposed matrix and diff --git a/src/linalg/eigen.rs b/src/linalg/eigen.rs index dd721f81..be1812ee 100644 --- a/src/linalg/eigen.rs +++ b/src/linalg/eigen.rs @@ -15,7 +15,7 @@ use constraint::{DimEq, ShapeConstraint}; use geometry::{Reflection, UnitComplex}; use linalg::householder; -use linalg::RealSchur; +use linalg::Schur; /// Eigendecomposition of a real matrix with real eigenvalues (or complex eigen values for complex matrices). #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] @@ -76,7 +76,7 @@ where ); let dim = m.nrows(); - let (mut eigenvectors, mut eigenvalues) = RealSchur::new(m, 0).unwrap().unpack(); + let (mut eigenvectors, mut eigenvalues) = Schur::new(m, 0).unwrap().unpack(); println!("Schur eigenvalues: {}", eigenvalues); diff --git a/src/linalg/givens.rs b/src/linalg/givens.rs index 5943e0c1..ecc9126b 100644 --- a/src/linalg/givens.rs +++ b/src/linalg/givens.rs @@ -1,15 +1,13 @@ //! Construction of givens rotations. -use alga::general::{Complex, Real}; +use alga::general::Complex; use num::{Zero, One}; -use num_complex::Complex as NumComplex; use base::dimension::{Dim, U2}; use base::constraint::{ShapeConstraint, DimEq}; use base::storage::{Storage, StorageMut}; use base::{Vector, Matrix}; -use geometry::UnitComplex; /// A Givens rotation. #[derive(Debug, Clone, Copy)] diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index b815131e..9c877d92 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -12,12 +12,14 @@ use base::storage::Storage; use base::{DefaultAllocator, MatrixN, SquareMatrix, Unit, Vector2, Vector3, VectorN}; use constraint::{DimEq, ShapeConstraint}; -use geometry::{Reflection, UnitComplex}; +use geometry::Reflection; use linalg::householder; use linalg::Hessenberg; use linalg::givens::GivensRotation; -/// Real Schur decomposition of a square matrix. +/// Schur decomposition of a square matrix. +/// +/// If this is a real matrix, this will be a Real Schur decomposition. #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] #[cfg_attr( feature = "serde-serialize", @@ -34,20 +36,20 @@ use linalg::givens::GivensRotation; )) )] #[derive(Clone, Debug)] -pub struct RealSchur +pub struct Schur where DefaultAllocator: Allocator { q: MatrixN, t: MatrixN, } -impl Copy for RealSchur +impl Copy for Schur where DefaultAllocator: Allocator, MatrixN: Copy, {} -impl RealSchur +impl Schur where D: DimSub, // For Hessenberg. ShapeConstraint: DimEq>, // For Hessenberg. @@ -75,7 +77,7 @@ where pub fn try_new(m: MatrixN, eps: N::Real, max_niter: usize) -> Option { let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; - Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| RealSchur { + Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| Schur { q: q.unwrap(), t: t, }) @@ -474,8 +476,6 @@ fn compute_2x2_basis>( let x1 = eigval1 - m[(1, 1)]; let x2 = eigval2 - m[(1, 1)]; - println!("eigval1: {}, eigval2: {}, h10: {}", eigval1, eigval2, h10); - // NOTE: Choose the one that yields a larger x component. // This is necessary for numerical stability of the normalization of the complex // number. @@ -499,8 +499,8 @@ where + Allocator, { /// Computes the Schur decomposition of a square matrix. - pub fn real_schur(self) -> RealSchur { - RealSchur::new(self.into_owned()) + pub fn schur(self) -> Schur { + Schur::new(self.into_owned()) } /// Attempts to compute the Schur decomposition of a square matrix. @@ -514,8 +514,8 @@ where /// * `max_niter` − maximum total number of iterations performed by the algorithm. If this /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. - pub fn try_real_schur(self, eps: N::Real, max_niter: usize) -> Option> { - RealSchur::try_new(self.into_owned(), eps, max_niter) + pub fn try_schur(self, eps: N::Real, max_niter: usize) -> Option> { + Schur::try_new(self.into_owned(), eps, max_niter) } /// Computes the eigenvalues of this matrix. @@ -543,7 +543,7 @@ where } // FIXME: add balancing? - let schur = RealSchur::do_decompose( + let schur = Schur::do_decompose( self.clone_owned(), &mut work, N::Real::default_epsilon(), @@ -551,7 +551,7 @@ where false, ) .unwrap(); - if RealSchur::do_eigenvalues(&schur.1, &mut work) { + if Schur::do_eigenvalues(&schur.1, &mut work) { Some(work) } else { None @@ -566,7 +566,7 @@ where let dim = self.data.shape().0; let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; - let schur = RealSchur::do_decompose( + let schur = Schur::do_decompose( self.clone_owned(), &mut work, N::default_epsilon(), @@ -575,7 +575,7 @@ where ) .unwrap(); let mut eig = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; - RealSchur::do_complex_eigenvalues(&schur.1, &mut eig); + Schur::do_complex_eigenvalues(&schur.1, &mut eig); eig } } diff --git a/src/linalg/svd.rs b/src/linalg/svd.rs index 8de75829..a455badd 100644 --- a/src/linalg/svd.rs +++ b/src/linalg/svd.rs @@ -1,9 +1,7 @@ #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; -use num_complex::Complex as NumComplex; use num::{Zero, One}; -use std::ops::MulAssign; use approx::AbsDiffEq; use alga::general::{Real, Complex}; @@ -13,7 +11,6 @@ use constraint::{SameNumberOfRows, ShapeConstraint}; use dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1, U2}; use storage::Storage; -use linalg::givens; use linalg::symmetric_eigen; use linalg::Bidiagonal; use linalg::givens::GivensRotation; @@ -116,7 +113,7 @@ where matrix.unscale_mut(m_amax); } - let mut b = Bidiagonal::new(matrix); + let b = Bidiagonal::new(matrix); let mut u = if compute_u { Some(b.u()) } else { None }; let mut v_t = if compute_v { Some(b.v_t()) } else { None }; let mut diagonal = b.diagonal(); diff --git a/src/linalg/symmetric_eigen.rs b/src/linalg/symmetric_eigen.rs index a0f614fe..87b08dc2 100644 --- a/src/linalg/symmetric_eigen.rs +++ b/src/linalg/symmetric_eigen.rs @@ -1,10 +1,8 @@ #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Serialize}; -use num::{Zero, One}; -use num_complex::Complex as NumComplex; +use num::Zero; use approx::AbsDiffEq; -use std::ops::MulAssign; use alga::general::Complex; use allocator::Allocator; @@ -12,7 +10,6 @@ use base::{DefaultAllocator, Matrix2, MatrixN, SquareMatrix, Vector2, VectorN}; use dimension::{Dim, DimDiff, DimSub, U1, U2}; use storage::Storage; -use geometry::UnitComplex; use linalg::givens::GivensRotation; use linalg::SymmetricTridiagonal; @@ -121,8 +118,6 @@ where DefaultAllocator: Allocator + Allocator q = Some(res.0); diag = res.1; off_diag = res.2; - - println!("Tridiagonalization q: {:.5?}", q); } else { let res = SymmetricTridiagonal::new(m).unpack_tridiagonal(); q = None; @@ -154,7 +149,6 @@ where DefaultAllocator: Allocator + Allocator let j = i + 1; if let Some((rot, norm)) = GivensRotation::cancel_y(&v) { - println!("Canceling: {:.5?} with norm: {:.5?}", rot, norm); if i > start { // Not the first iteration. off_diag[i - 1] = norm; @@ -204,10 +198,6 @@ where DefaultAllocator: Allocator + Allocator diag[start + 0] = eigvals[0]; diag[start + 1] = eigvals[1]; - println!("Eigvals: {:.5?}", eigvals); - println!("m: {:.5}", m); - println!("Curr q: {:.5?}", q); - if let Some(ref mut q) = q { if let Some((rot, _)) = GivensRotation::try_new(basis.x, basis.y, eps) { let rot = GivensRotation::new_unchecked(rot.c(), N::from_real(rot.s())); @@ -372,7 +362,6 @@ mod test { let expected = expected_shift(m); let computed = super::wilkinson_shift(m.m11, m.m22, m.m12); - println!("{} {}", expected, computed); assert!(relative_eq!(expected, computed, epsilon = 1.0e-7)); } } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index 0b8f5675..29451d31 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -53,8 +53,6 @@ where DefaultAllocator: Allocator + Allocator> pub fn new(mut m: MatrixN) -> Self { let dim = m.data.shape().0; - println!("Input m: {}", m.index((0.., 0..))); - assert!( m.is_square(), "Unable to compute the symmetric tridiagonal decomposition of a non-square matrix." @@ -84,7 +82,6 @@ where DefaultAllocator: Allocator + Allocator> m.ger_symm(-N::one(), &p, &axis.conjugate(), N::one()); m.ger_symm(-N::one(), &axis, &p.conjugate(), N::one()); m.ger_symm(dot * ::convert(2.0), &axis, &axis.conjugate(), N::one()); - println!("The m: {}", m); } } @@ -112,7 +109,7 @@ where DefaultAllocator: Allocator + Allocator> } /// Retrieve the diagonal, and off diagonal elements of this decomposition. - pub fn unpack_tridiagonal(mut self) -> (VectorN, VectorN>) + pub fn unpack_tridiagonal(self) -> (VectorN, VectorN>) where DefaultAllocator: Allocator + Allocator> { (self.diagonal(), self.off_diagonal.map(N::modulus)) diff --git a/tests/core/abomonation.rs b/tests/core/abomonation.rs index be3952cd..01760e14 100644 --- a/tests/core/abomonation.rs +++ b/tests/core/abomonation.rs @@ -40,7 +40,7 @@ fn assert_encode_and_decode(original_data: T // Encode let mut bytes = Vec::new(); unsafe { - encode(&original_data, &mut bytes); + let _ = encode(&original_data, &mut bytes); } // Drop the original, so that dangling pointers are revealed by the test diff --git a/tests/linalg/bidiagonal.rs b/tests/linalg/bidiagonal.rs index d2f21a84..b4c382f3 100644 --- a/tests/linalg/bidiagonal.rs +++ b/tests/linalg/bidiagonal.rs @@ -1,89 +1,78 @@ #![cfg(feature = "arbitrary")] -use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3}; -use core::helper::{RandScalar, RandComplex}; +macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3}; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; + quickcheck! { + fn bidiagonal(m: DMatrix<$scalar>) -> bool { + let m = m.map(|e| e.0); + if m.len() == 0 { + return true; + } -quickcheck! { - fn bidiagonal(m: DMatrix>) -> bool { - let m = m.map(|e| e.0); - if m.len() == 0 { - return true; + let bidiagonal = m.clone().bidiagonalize(); + let (u, d, v_t) = bidiagonal.unpack(); + + relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + } + + fn bidiagonal_static_5_3(m: Matrix5x3<$scalar>) -> bool { + let m = m.map(|e| e.0); + let bidiagonal = m.bidiagonalize(); + let (u, d, v_t) = bidiagonal.unpack(); + + relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + } + + fn bidiagonal_static_3_5(m: Matrix3x5<$scalar>) -> bool { + let m = m.map(|e| e.0); + let bidiagonal = m.bidiagonalize(); + let (u, d, v_t) = bidiagonal.unpack(); + + relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + } + + fn bidiagonal_static_square(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let bidiagonal = m.bidiagonalize(); + let (u, d, v_t) = bidiagonal.unpack(); + + relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + } + + fn bidiagonal_static_square_2x2(m: Matrix2<$scalar>) -> bool { + let m = m.map(|e| e.0); + let bidiagonal = m.bidiagonalize(); + let (u, d, v_t) = bidiagonal.unpack(); + + relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + } + } } - - let bidiagonal = m.clone().bidiagonalize(); - let (u, d, v_t) = bidiagonal.unpack(); - - println!("{}{}{}", &u, &d, &v_t); - println!("{:.7}{:.7}", &u * &d * &v_t, m); - - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) } +); - fn bidiagonal_static_5_3(m: Matrix5x3>) -> bool { - let m = m.map(|e| e.0); - let bidiagonal = m.bidiagonalize(); - let (u, d, v_t) = bidiagonal.unpack(); - - println!("{}{}{}", &u, &d, &v_t); - println!("{:.7}{:.7}", &u * &d * &v_t, m); - - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) - } - - fn bidiagonal_static_3_5(m: Matrix3x5>) -> bool { - let m = m.map(|e| e.0); - let bidiagonal = m.bidiagonalize(); - let (u, d, v_t) = bidiagonal.unpack(); - - println!("{}{}{}", &u, &d, &v_t); - println!("{:.7}{:.7}", &u * &d * &v_t, m); - - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) - } - - fn bidiagonal_static_square(m: Matrix4>) -> bool { - let m = m.map(|e| e.0); - let bidiagonal = m.bidiagonalize(); - let (u, d, v_t) = bidiagonal.unpack(); - - println!("{}{}{}", &u, &d, &v_t); - println!("{:.7}{:.7}", &u * &d * &v_t, m); - - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) - } - - fn bidiagonal_static_square_2x2(m: Matrix2>) -> bool { - let m = m.map(|e| e.0); - let bidiagonal = m.bidiagonalize(); - let (u, d, v_t) = bidiagonal.unpack(); - - println!("{}{}{}", &u, &d, &v_t); - println!("{:.7}{:.7}", &u * &d * &v_t, m); - - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) - } -} - +gen_tests!(complex, RandComplex); +gen_tests!(f64, RandScalar); #[test] fn bidiagonal_identity() { - let m = DMatrix::::identity(10, 10); + let m = na::DMatrix::::identity(10, 10); let bidiagonal = m.clone().bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - println!("u, s, v_t: {}{}{}", u, d, v_t); - println!("recomp: {}", &u * &d * &v_t); assert_eq!(m, &u * d * &v_t); - let m = DMatrix::::identity(10, 15); + let m = na::DMatrix::::identity(10, 15); let bidiagonal = m.clone().bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - println!("u, s, v_t: {}{}{}", u, d, v_t); assert_eq!(m, &u * d * &v_t); - let m = DMatrix::::identity(15, 10); + let m = na::DMatrix::::identity(15, 10); let bidiagonal = m.clone().bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - println!("u, s, v_t: {}{}{}", u, d, v_t); assert_eq!(m, &u * d * &v_t); } \ No newline at end of file diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index 9fe086ff..5bc91dc2 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -1,81 +1,87 @@ #![cfg(all(feature = "arbitrary", feature = "debug"))] -use na::debug::RandomSDP; -use na::dimension::U4; -use na::{DMatrix, DVector, Matrix4x3, Vector4}; -use std::cmp; -quickcheck! { - fn cholesky(m: RandomSDP) -> bool { - let mut m = m.unwrap(); +macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use na::debug::RandomSDP; + use na::dimension::{U4, Dynamic}; + use na::{DMatrix, DVector, Matrix4x3, Vector4}; + use rand::random; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; + use std::cmp; - // Put garbage on the upper triangle to make sure it is not read by the decomposition. - m.fill_upper_triangle(23.0, 1); + quickcheck! { + fn cholesky(n: usize) -> bool { + let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap(); + let l = m.clone().cholesky().unwrap().unpack(); + relative_eq!(m, &l * l.conjugate_transpose(), epsilon = 1.0e-7) + } - let l = m.clone().cholesky().unwrap().unpack(); - m.fill_upper_triangle_with_lower_triangle(); - relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7) - } + fn cholesky_static(_m: RandomSDP) -> bool { + let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let chol = m.cholesky().unwrap(); + let l = chol.unpack(); - fn cholesky_static(m: RandomSDP) -> bool { - let m = m.unwrap(); - let chol = m.cholesky().unwrap(); - let l = chol.unpack(); + if !relative_eq!(m, &l * l.conjugate_transpose(), epsilon = 1.0e-7) { + false + } + else { + true + } + } - if !relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7) { - false - } - else { - true + fn cholesky_solve(n: usize, nb: usize) -> bool { + let n = n.max(1).min(50); + let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); + let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. + + let chol = m.clone().cholesky().unwrap(); + let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); + + let sol1 = chol.solve(&b1); + let sol2 = chol.solve(&b2); + + relative_eq!(&m * &sol1, b1, epsilon = 1.0e-7) && + relative_eq!(&m * &sol2, b2, epsilon = 1.0e-7) + } + + fn cholesky_solve_static(_n: usize) -> bool { + let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let chol = m.clone().cholesky().unwrap(); + let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); + let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0); + + let sol1 = chol.solve(&b1); + let sol2 = chol.solve(&b2); + + relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && + relative_eq!(m * sol2, b2, epsilon = 1.0e-7) + } + + fn cholesky_inverse(n: usize) -> bool { + let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap(); + let m1 = m.clone().cholesky().unwrap().inverse(); + let id1 = &m * &m1; + let id2 = &m1 * &m; + + id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) + } + + fn cholesky_inverse_static(_n: usize) -> bool { + let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); + let m1 = m.clone().cholesky().unwrap().inverse(); + let id1 = &m * &m1; + let id2 = &m1 * &m; + + id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) + } + } } } +); - - fn cholesky_solve(m: RandomSDP, nb: usize) -> bool { - let m = m.unwrap(); - let n = m.nrows(); - let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. - - let chol = m.clone().cholesky().unwrap(); - let b1 = DVector::new_random(n); - let b2 = DMatrix::new_random(n, nb); - - let sol1 = chol.solve(&b1); - let sol2 = chol.solve(&b2); - - relative_eq!(&m * &sol1, b1, epsilon = 1.0e-7) && - relative_eq!(&m * &sol2, b2, epsilon = 1.0e-7) - } - - fn cholesky_solve_static(m: RandomSDP) -> bool { - let m = m.unwrap(); - let chol = m.clone().cholesky().unwrap(); - let b1 = Vector4::new_random(); - let b2 = Matrix4x3::new_random(); - - let sol1 = chol.solve(&b1); - let sol2 = chol.solve(&b2); - - relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && - relative_eq!(m * sol2, b2, epsilon = 1.0e-7) - } - - fn cholesky_inverse(m: RandomSDP) -> bool { - let m = m.unwrap(); - - let m1 = m.clone().cholesky().unwrap().inverse(); - let id1 = &m * &m1; - let id2 = &m1 * &m; - - id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) - } - - fn cholesky_inverse_static(m: RandomSDP) -> bool { - let m = m.unwrap(); - let m1 = m.clone().cholesky().unwrap().inverse(); - let id1 = &m * &m1; - let id2 = &m1 * &m; - - id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) - } -} +gen_tests!(complex, RandComplex); +gen_tests!(f64, RandScalar); diff --git a/tests/linalg/eigen.rs b/tests/linalg/eigen.rs index a5dfcf6b..9a1c3539 100644 --- a/tests/linalg/eigen.rs +++ b/tests/linalg/eigen.rs @@ -4,68 +4,65 @@ use na::DMatrix; #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use na::{DMatrix, Matrix2, Matrix3, Matrix4}; - use core::helper::{RandScalar, RandComplex}; - use std::cmp; + macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use na::{DMatrix, Matrix2, Matrix3, Matrix4}; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; + use std::cmp; - quickcheck! { - fn symmetric_eigen(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 10)); - let m = DMatrix::>::new_random(n, n).map(|e| e.0).hermitian_part(); - let eig = m.clone().symmetric_eigen(); - let recomp = eig.recompose(); + quickcheck! { + fn symmetric_eigen(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 10)); + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + let eig = m.clone().symmetric_eigen(); + let recomp = eig.recompose(); - println!("{}{}", m.lower_triangle(), recomp.lower_triangle()); + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + } - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + fn symmetric_eigen_singular(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 10)); + let mut m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + m.row_mut(n / 2).fill(na::zero()); + m.column_mut(n / 2).fill(na::zero()); + let eig = m.clone().symmetric_eigen(); + let recomp = eig.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + } + + fn symmetric_eigen_static_square_4x4(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0).hermitian_part(); + let eig = m.symmetric_eigen(); + let recomp = eig.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + } + + fn symmetric_eigen_static_square_3x3(m: Matrix3<$scalar>) -> bool { + let m = m.map(|e| e.0).hermitian_part(); + let eig = m.symmetric_eigen(); + let recomp = eig.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + } + + fn symmetric_eigen_static_square_2x2(m: Matrix2<$scalar>) -> bool { + let m = m.map(|e| e.0).hermitian_part(); + let eig = m.symmetric_eigen(); + let recomp = eig.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + } + } + } } + ); - fn symmetric_eigen_singular(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 10)); - let mut m = DMatrix::>::new_random(n, n).map(|e| e.0).hermitian_part(); - m.row_mut(n / 2).fill(na::zero()); - m.column_mut(n / 2).fill(na::zero()); - let eig = m.clone().symmetric_eigen(); - let recomp = eig.recompose(); - - println!("{}{}", m.lower_triangle(), recomp.lower_triangle()); - - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) - } - - fn symmetric_eigen_static_square_4x4(m: Matrix4>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); - let eig = m.symmetric_eigen(); - let recomp = eig.recompose(); - - println!("{}{}", m.lower_triangle(), recomp.lower_triangle()); - - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) - } - - fn symmetric_eigen_static_square_3x3(m: Matrix3>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); - let eig = m.symmetric_eigen(); - let recomp = eig.recompose(); - - println!("Eigenvectors: {}", eig.eigenvectors); - println!("Eigenvalues: {}", eig.eigenvalues); - println!("{}{}", m.lower_triangle(), recomp.lower_triangle()); - - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) - } - - fn symmetric_eigen_static_square_2x2(m: Matrix2>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); - let eig = m.symmetric_eigen(); - let recomp = eig.recompose(); - - println!("Eigenvectors: {}", eig.eigenvectors); - println!("{}{}", m.lower_triangle(), recomp.lower_triangle()); - - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) - } - } + gen_tests!(complex, RandComplex); + gen_tests!(f64, RandScalar); } // Test proposed on the issue #176 of rulinalg. diff --git a/tests/linalg/full_piv_lu.rs b/tests/linalg/full_piv_lu.rs index 5a0ad75b..c0e15cde 100644 --- a/tests/linalg/full_piv_lu.rs +++ b/tests/linalg/full_piv_lu.rs @@ -42,122 +42,140 @@ fn full_piv_lu_simple_with_pivot() { #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use std::cmp; - use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4}; + macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use std::cmp; + use num::One; + use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4}; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; - quickcheck! { - fn full_piv_lu(m: DMatrix) -> bool { - let mut m = m; - if m.len() == 0 { - m = DMatrix::new_random(1, 1); - } + quickcheck! { + fn full_piv_lu(m: DMatrix<$scalar>) -> bool { + let mut m = m.map(|e| e.0); + if m.len() == 0 { + m = DMatrix::<$scalar>::new_random(1, 1).map(|e| e.0); + } - let lu = m.clone().full_piv_lu(); - let (p, l, u, q) = lu.unpack(); - let mut lu = l * u; - p.inv_permute_rows(&mut lu); - q.inv_permute_columns(&mut lu); + let lu = m.clone().full_piv_lu(); + let (p, l, u, q) = lu.unpack(); + let mut lu = l * u; + p.inv_permute_rows(&mut lu); + q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) - } + relative_eq!(m, lu, epsilon = 1.0e-7) + } - fn full_piv_lu_static_3_5(m: Matrix3x5) -> bool { - let lu = m.full_piv_lu(); - let (p, l, u, q) = lu.unpack(); - let mut lu = l * u; - p.inv_permute_rows(&mut lu); - q.inv_permute_columns(&mut lu); + fn full_piv_lu_static_3_5(m: Matrix3x5<$scalar>) -> bool { + let m = m.map(|e| e.0); + let lu = m.full_piv_lu(); + let (p, l, u, q) = lu.unpack(); + let mut lu = l * u; + p.inv_permute_rows(&mut lu); + q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) - } + relative_eq!(m, lu, epsilon = 1.0e-7) + } - fn full_piv_lu_static_5_3(m: Matrix5x3) -> bool { - let lu = m.full_piv_lu(); - let (p, l, u, q) = lu.unpack(); - let mut lu = l * u; - p.inv_permute_rows(&mut lu); - q.inv_permute_columns(&mut lu); + fn full_piv_lu_static_5_3(m: Matrix5x3<$scalar>) -> bool { + let m = m.map(|e| e.0); + let lu = m.full_piv_lu(); + let (p, l, u, q) = lu.unpack(); + let mut lu = l * u; + p.inv_permute_rows(&mut lu); + q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) - } + relative_eq!(m, lu, epsilon = 1.0e-7) + } - fn full_piv_lu_static_square(m: Matrix4) -> bool { - let lu = m.full_piv_lu(); - let (p, l, u, q) = lu.unpack(); - let mut lu = l * u; - p.inv_permute_rows(&mut lu); - q.inv_permute_columns(&mut lu); + fn full_piv_lu_static_square(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let lu = m.full_piv_lu(); + let (p, l, u, q) = lu.unpack(); + let mut lu = l * u; + p.inv_permute_rows(&mut lu); + q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) - } + relative_eq!(m, lu, epsilon = 1.0e-7) + } - fn full_piv_lu_solve(n: usize, nb: usize) -> bool { - if n != 0 && nb != 0 { - let n = cmp::min(n, 50); // To avoid slowing down the test too much. - let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. - let m = DMatrix::::new_random(n, n); + fn full_piv_lu_solve(n: usize, nb: usize) -> bool { + if n != 0 && nb != 0 { + let n = cmp::min(n, 50); // To avoid slowing down the test too much. + let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - let lu = m.clone().full_piv_lu(); - let b1 = DVector::new_random(n); - let b2 = DMatrix::new_random(n, nb); + let lu = m.clone().full_piv_lu(); + let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); - let sol1 = lu.solve(&b1); - let sol2 = lu.solve(&b2); + let sol1 = lu.solve(&b1); + let sol2 = lu.solve(&b2); - return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && - (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) - } + return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && + (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) + } - return true; - } + return true; + } - fn full_piv_lu_solve_static(m: Matrix4) -> bool { - let lu = m.full_piv_lu(); - let b1 = Vector4::new_random(); - let b2 = Matrix4x3::new_random(); + fn full_piv_lu_solve_static(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let lu = m.full_piv_lu(); + let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); + let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0); - let sol1 = lu.solve(&b1); - let sol2 = lu.solve(&b2); + let sol1 = lu.solve(&b1); + let sol2 = lu.solve(&b2); - return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && - (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) - } + return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && + (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) + } - fn full_piv_lu_inverse(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much. - let m = DMatrix::::new_random(n, n); + fn full_piv_lu_inverse(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much. + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - let mut l = m.lower_triangle(); - let mut u = m.upper_triangle(); + let mut l = m.lower_triangle(); + let mut u = m.upper_triangle(); - // Ensure the matrix is well conditioned for inversion. - l.fill_diagonal(1.0); - u.fill_diagonal(1.0); - let m = l * u; + // Ensure the matrix is well conditioned for inversion. + l.fill_diagonal(One::one()); + u.fill_diagonal(One::one()); + let m = l * u; - let m1 = m.clone().full_piv_lu().try_inverse().unwrap(); - let id1 = &m * &m1; - let id2 = &m1 * &m; + let m1 = m.clone().full_piv_lu().try_inverse().unwrap(); + let id1 = &m * &m1; + let id2 = &m1 * &m; - return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5); - } + return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5); + } - fn full_piv_lu_inverse_static(m: Matrix4) -> bool { - let lu = m.full_piv_lu(); + fn full_piv_lu_inverse_static(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let lu = m.full_piv_lu(); - if let Some(m1) = lu.try_inverse() { - let id1 = &m * &m1; - let id2 = &m1 * &m; + if let Some(m1) = lu.try_inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - } - else { - true + id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) + } + else { + true + } + } + } } } - } + ); + + gen_tests!(complex, RandComplex); + gen_tests!(f64, RandScalar); } + /* #[test] fn swap_rows() { diff --git a/tests/linalg/hessenberg.rs b/tests/linalg/hessenberg.rs index 72c2baab..dfabd29d 100644 --- a/tests/linalg/hessenberg.rs +++ b/tests/linalg/hessenberg.rs @@ -1,8 +1,6 @@ #![cfg(feature = "arbitrary")] -use na::{DMatrix, Matrix2, Matrix4}; -use core::helper::{RandScalar, RandComplex}; -use std::cmp; +use na::Matrix2; #[test] @@ -13,27 +11,42 @@ fn hessenberg_simple() { assert!(relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)) } -quickcheck! { - fn hessenberg(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let m = DMatrix::>::new_random(n, n).map(|e| e.0); - let hess = m.clone().hessenberg(); - let (p, h) = hess.unpack(); - relative_eq!(m, &p * h * p.conjugate_transpose(), epsilon = 1.0e-7) - } +macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use na::{DMatrix, Matrix2, Matrix4}; + use std::cmp; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; - fn hessenberg_static_mat2(m: Matrix2>) -> bool { - let m = m.map(|e| e.0); - let hess = m.hessenberg(); - let (p, h) = hess.unpack(); - relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7) - } + quickcheck! { + fn hessenberg(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - fn hessenberg_static(m: Matrix4>) -> bool { - let m = m.map(|e| e.0); - let hess = m.hessenberg(); - let (p, h) = hess.unpack(); - relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7) + let hess = m.clone().hessenberg(); + let (p, h) = hess.unpack(); + relative_eq!(m, &p * h * p.conjugate_transpose(), epsilon = 1.0e-7) + } + + fn hessenberg_static_mat2(m: Matrix2<$scalar>) -> bool { + let m = m.map(|e| e.0); + let hess = m.hessenberg(); + let (p, h) = hess.unpack(); + relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7) + } + + fn hessenberg_static(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let hess = m.hessenberg(); + let (p, h) = hess.unpack(); + relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7) + } + } + } } -} +); + +gen_tests!(complex, RandComplex); +gen_tests!(f64, RandScalar); \ No newline at end of file diff --git a/tests/linalg/lu.rs b/tests/linalg/lu.rs index cb82731e..02f62c10 100644 --- a/tests/linalg/lu.rs +++ b/tests/linalg/lu.rs @@ -40,6 +40,7 @@ fn lu_simple_with_pivot() { #[cfg(feature = "arbitrary")] mod quickcheck_tests { + #[allow(unused_imports)] use core::helper::{RandScalar, RandComplex}; macro_rules! gen_tests( diff --git a/tests/linalg/mod.rs b/tests/linalg/mod.rs index 74a5e03c..f515b4d6 100644 --- a/tests/linalg/mod.rs +++ b/tests/linalg/mod.rs @@ -7,7 +7,7 @@ mod hessenberg; mod inverse; mod lu; mod qr; -mod real_schur; +mod schur; mod solve; mod svd; mod tridiagonal; diff --git a/tests/linalg/qr.rs b/tests/linalg/qr.rs index 48b0a8f7..76eb00d9 100644 --- a/tests/linalg/qr.rs +++ b/tests/linalg/qr.rs @@ -1,12 +1,12 @@ #![cfg(feature = "arbitrary")] -use core::helper::{RandScalar, RandComplex}; macro_rules! gen_tests( ($module: ident, $scalar: ty) => { mod $module { use na::{DMatrix, DVector, Matrix3x5, Matrix4, Matrix4x3, Matrix5x3, Vector4}; use std::cmp; + #[allow(unused_imports)] use core::helper::{RandScalar, RandComplex}; quickcheck! { diff --git a/tests/linalg/real_schur.rs b/tests/linalg/schur.rs similarity index 65% rename from tests/linalg/real_schur.rs rename to tests/linalg/schur.rs index 54ad9ce4..16767c94 100644 --- a/tests/linalg/real_schur.rs +++ b/tests/linalg/schur.rs @@ -8,7 +8,7 @@ fn schur_simpl_mat3() { -2.0, 1.0, 2.0, 4.0, 2.0, 5.0); - let schur = m.real_schur(); + let schur = m.schur(); let (vecs, vals) = schur.unpack(); assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)); @@ -16,59 +16,70 @@ fn schur_simpl_mat3() { #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use std::cmp; - use na::{DMatrix, Matrix2, Matrix3, Matrix4}; - use core::helper::{RandScalar, RandComplex}; + macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use std::cmp; + use na::{DMatrix, Matrix2, Matrix3, Matrix4}; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; - quickcheck! { - fn schur(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 10)); - let m = DMatrix::>::new_random(n, n).map(|e| e.0); + quickcheck! { + fn schur(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 10)); + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - let (vecs, vals) = m.clone().real_schur().unpack(); + let (vecs, vals) = m.clone().schur().unpack(); - if !relative_eq!(&vecs * &vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) { - println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); + if !relative_eq!(&vecs * &vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) { + println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); + } + + relative_eq!(&vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) + } + + fn schur_static_mat2(m: Matrix2<$scalar>) -> bool { + let m = m.map(|e| e.0); + let (vecs, vals) = m.clone().schur().unpack(); + + let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); + if !ok { + println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); + println!("Reconstruction:{}{}", m, &vecs * &vals * vecs.conjugate_transpose()); + } + ok + } + + fn schur_static_mat3(m: Matrix3<$scalar>) -> bool { + let m = m.map(|e| e.0); + let (vecs, vals) = m.clone().schur().unpack(); + + let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); + if !ok { + println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); + println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); + } + ok + } + + fn schur_static_mat4(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let (vecs, vals) = m.clone().schur().unpack(); + + let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); + if !ok { + println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); + } + + ok + } + } } - - relative_eq!(&vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) } + ); - fn schur_static_mat2(m: Matrix2>) -> bool { - let m = m.map(|e| e.0); - let (vecs, vals) = m.clone().real_schur().unpack(); - - let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); - if !ok { - println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); - println!("Reconstruction:{}{}", m, &vecs * &vals * vecs.conjugate_transpose()); - } - ok - } - - fn schur_static_mat3(m: Matrix3>) -> bool { - let m = m.map(|e| e.0); - let (vecs, vals) = m.clone().real_schur().unpack(); - - let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); - if !ok { - println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); - println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); - } - ok - } - - fn schur_static_mat4(m: Matrix4>) -> bool { - let m = m.map(|e| e.0); - let (vecs, vals) = m.clone().real_schur().unpack(); - - let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7); - if !ok { - println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose()); - } - ok - } - } + gen_tests!(complex, RandComplex); + gen_tests!(f64, RandScalar); } #[test] @@ -79,8 +90,7 @@ fn schur_static_mat4_fail() { -94.61793793643038, -18.64216213611094, 88.32376703241675, -99.30169870309795, 90.62661897246733, 96.74200696130146, 34.7421322611369, 84.86773307198098); - let (vecs, vals) = m.clone().real_schur().unpack(); - println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose()); + let (vecs, vals) = m.clone().schur().unpack(); assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)) } @@ -92,8 +102,7 @@ fn schur_static_mat4_fail2() { 27.932377940728202, 82.94220150938, -35.5898884705951, 67.56447552434219, 55.66754906908682, -42.14328890569226, -20.684709585152206, -87.9456949841046); - let (vecs, vals) = m.clone().real_schur().unpack(); - println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose()); + let (vecs, vals) = m.clone().schur().unpack(); assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)) } @@ -104,8 +113,7 @@ fn schur_static_mat3_fail() { -7.525423104386547, -17.827350599642287, 11.297377444555849, 38.080736654870464, -84.27428302131528, -95.88198590331922); - let (vecs, vals) = m.clone().real_schur().unpack(); - println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose()); + let (vecs, vals) = m.clone().schur().unpack(); assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)) } @@ -138,7 +146,6 @@ fn schur_singular() { 0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]); - let (vecs, vals) = m.clone().real_schur().unpack(); - println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose()); + let (vecs, vals) = m.clone().schur().unpack(); assert!(relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)) } diff --git a/tests/linalg/solve.rs b/tests/linalg/solve.rs index 76dc05b5..e2f8a3f8 100644 --- a/tests/linalg/solve.rs +++ b/tests/linalg/solve.rs @@ -1,57 +1,65 @@ #![cfg(feature = "arbitrary")] -use na::{Matrix4, Matrix4x5}; -fn unzero_diagonal(a: &mut Matrix4) { - for i in 0..4 { - if a[(i, i)] < 1.0e-7 { - a[(i, i)] = 1.0; +macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use na::{Matrix4, Matrix4x5, Complex}; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; + + fn unzero_diagonal(a: &mut Matrix4) { + for i in 0..4 { + if a[(i, i)].asum() < na::convert(1.0e-7) { + a[(i, i)] = N::one(); + } + } + } + + quickcheck! { + fn solve_lower_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { + let b = b.map(|e| e.0); + let mut a = a.map(|e| e.0); + unzero_diagonal(&mut a); + let tri = a.lower_triangle(); + let x = a.solve_lower_triangular(&b).unwrap(); + + relative_eq!(tri * x, b, epsilon = 1.0e-7) + } + + fn solve_upper_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { + let b = b.map(|e| e.0); + let mut a = a.map(|e| e.0); + unzero_diagonal(&mut a); + let tri = a.upper_triangle(); + let x = a.solve_upper_triangular(&b).unwrap(); + + relative_eq!(tri * x, b, epsilon = 1.0e-7) + } + + fn tr_solve_lower_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { + let b = b.map(|e| e.0); + let mut a = a.map(|e| e.0); + unzero_diagonal(&mut a); + let tri = a.lower_triangle(); + let x = a.tr_solve_lower_triangular(&b).unwrap(); + + relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7) + } + + fn tr_solve_upper_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { + let b = b.map(|e| e.0); + let mut a = a.map(|e| e.0); + unzero_diagonal(&mut a); + let tri = a.upper_triangle(); + let x = a.tr_solve_upper_triangular(&b).unwrap(); + + relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7) + } + } } } -} +); -quickcheck! { - fn solve_lower_triangular(a: Matrix4, b: Matrix4x5) -> bool { - let mut a = a; - unzero_diagonal(&mut a); - let tri = a.lower_triangle(); - let x = a.solve_lower_triangular(&b).unwrap(); - - println!("{}\n{}\n{}\n{}", tri, x, tri * x, b); - - relative_eq!(tri * x, b, epsilon = 1.0e-7) - } - - fn solve_upper_triangular(a: Matrix4, b: Matrix4x5) -> bool { - let mut a = a; - unzero_diagonal(&mut a); - let tri = a.upper_triangle(); - let x = a.solve_upper_triangular(&b).unwrap(); - - println!("{}\n{}\n{}\n{}", tri, x, tri * x, b); - - relative_eq!(tri * x, b, epsilon = 1.0e-7) - } - - fn tr_solve_lower_triangular(a: Matrix4, b: Matrix4x5) -> bool { - let mut a = a; - unzero_diagonal(&mut a); - let tri = a.lower_triangle(); - let x = a.tr_solve_lower_triangular(&b).unwrap(); - - println!("{}\n{}\n{}\n{}", tri, x, tri * x, b); - - relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7) - } - - fn tr_solve_upper_triangular(a: Matrix4, b: Matrix4x5) -> bool { - let mut a = a; - unzero_diagonal(&mut a); - let tri = a.upper_triangle(); - let x = a.tr_solve_upper_triangular(&b).unwrap(); - - println!("{}\n{}\n{}\n{}", tri, x, tri * x, b); - - relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7) - } -} +gen_tests!(complex, RandComplex); +gen_tests!(f64, RandScalar); diff --git a/tests/linalg/svd.rs b/tests/linalg/svd.rs index efced01a..0eb1d69e 100644 --- a/tests/linalg/svd.rs +++ b/tests/linalg/svd.rs @@ -3,161 +3,161 @@ use na::{DMatrix, Matrix6}; #[cfg(feature = "arbitrary")] mod quickcheck_tests { - use na::{ - DMatrix, DVector, Matrix2, Matrix2x5, Matrix3, Matrix3x5, Matrix4, Matrix5x2, Matrix5x3, - Complex - }; - use std::cmp; - use core::helper::{RandScalar, RandComplex}; + macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use na::{ + DMatrix, DVector, Matrix2, Matrix2x5, Matrix3, Matrix3x5, Matrix4, Matrix5x2, Matrix5x3, + Complex + }; + use std::cmp; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; + quickcheck! { + fn svd(m: DMatrix<$scalar>) -> bool { + let m = m.map(|e| e.0); + if m.len() > 0 { + let svd = m.clone().svd(true, true); + let recomp_m = svd.clone().recompose().unwrap(); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = DMatrix::from_diagonal(&s.map(|e| Complex::from_real(e))); - quickcheck! { - fn svd(m: DMatrix>) -> bool { - let m = m.map(|e| e.0); - if m.len() > 0 { - let svd = m.clone().svd(true, true); - let recomp_m = svd.clone().recompose().unwrap(); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = DMatrix::from_diagonal(&s.map(|e| Complex::from_real(e))); + s.iter().all(|e| *e >= 0.0) && + relative_eq!(&u * ds * &v_t, recomp_m, epsilon = 1.0e-5) && + relative_eq!(m, recomp_m, epsilon = 1.0e-5) + } + else { + true + } + } - println!("{}{}", &m, &u * &ds * &v_t); + fn svd_static_5_3(m: Matrix5x3<$scalar>) -> bool { + let m = m.map(|e| e.0); + let svd = m.svd(true, true); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = Matrix3::from_diagonal(&s.map(|e| Complex::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(&u * ds * &v_t, recomp_m, epsilon = 1.0e-5) && - relative_eq!(m, recomp_m, epsilon = 1.0e-5) - } - else { - true - } - } + s.iter().all(|e| *e >= 0.0) && + relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5) && + u.is_orthogonal(1.0e-5) && + v_t.is_orthogonal(1.0e-5) + } - fn svd_static_5_3(m: Matrix5x3>) -> bool { - let m = m.map(|e| e.0); - let svd = m.svd(true, true); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = Matrix3::from_diagonal(&s.map(|e| Complex::from_real(e))); + fn svd_static_5_2(m: Matrix5x2<$scalar>) -> bool { + let m = m.map(|e| e.0); + let svd = m.svd(true, true); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) - } + s.iter().all(|e| *e >= 0.0) && + relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5) && + u.is_orthogonal(1.0e-5) && + v_t.is_orthogonal(1.0e-5) + } - fn svd_static_5_2(m: Matrix5x2>) -> bool { - let m = m.map(|e| e.0); - let svd = m.svd(true, true); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e))); + fn svd_static_3_5(m: Matrix3x5<$scalar>) -> bool { + let m = m.map(|e| e.0); + let svd = m.svd(true, true); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) - } + let ds = Matrix3::from_diagonal(&s.map(|e| Complex::from_real(e))); - fn svd_static_3_5(m: Matrix3x5>) -> bool { - let m = m.map(|e| e.0); - let svd = m.svd(true, true); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + s.iter().all(|e| *e >= 0.0) && + relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) + } - let ds = Matrix3::from_diagonal(&s.map(|e| Complex::from_real(e))); + fn svd_static_2_5(m: Matrix2x5<$scalar>) -> bool { + let m = m.map(|e| e.0); + let svd = m.svd(true, true); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) - } + s.iter().all(|e| *e >= 0.0) && + relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) + } - fn svd_static_2_5(m: Matrix2x5>) -> bool { - let m = m.map(|e| e.0); - let svd = m.svd(true, true); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e))); + fn svd_static_square(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0); + let svd = m.svd(true, true); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = Matrix4::from_diagonal(&s.map(|e| Complex::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) - } + s.iter().all(|e| *e >= 0.0) && + relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) && + u.is_orthogonal(1.0e-5) && + v_t.is_orthogonal(1.0e-5) + } - fn svd_static_square(m: Matrix4>) -> bool { - let m = m.map(|e| e.0); - let svd = m.svd(true, true); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = Matrix4::from_diagonal(&s.map(|e| Complex::from_real(e))); + fn svd_static_square_2x2(m: Matrix2<$scalar>) -> bool { + let m = m.map(|e| e.0); + let svd = m.svd(true, true); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) - } + s.iter().all(|e| *e >= 0.0) && + relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) && + u.is_orthogonal(1.0e-5) && + v_t.is_orthogonal(1.0e-5) + } - fn svd_static_square_2x2(m: Matrix2>) -> bool { - let m = m.map(|e| e.0); - let svd = m.svd(true, true); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e))); + fn svd_pseudo_inverse(m: DMatrix<$scalar>) -> bool { + let m = m.map(|e| e.0); - println!("u, s, v_t: {}{}{}", u, s, v_t); - println!("m: {}", m); - println!("recomp: {}", u * ds * v_t); - println!("uu_t, vv_t: {}{}", u * u.conjugate_transpose(), v_t.conjugate_transpose() * v_t); + if m.len() > 0 { + let svd = m.clone().svd(true, true); + let pinv = svd.pseudo_inverse(1.0e-10).unwrap(); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) - } + if m.nrows() > m.ncols() { + (pinv * m).is_identity(1.0e-5) + } + else { + (m * pinv).is_identity(1.0e-5) + } + } + else { + true + } + } - fn svd_pseudo_inverse(m: DMatrix>) -> bool { - let m = m.map(|e| e.0); + fn svd_solve(n: usize, nb: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 10)); + let nb = cmp::min(nb, 10); + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - if m.len() > 0 { - let svd = m.clone().svd(true, true); - let pinv = svd.pseudo_inverse(1.0e-10).unwrap(); + let svd = m.clone().svd(true, true); - if m.nrows() > m.ncols() { - println!("{}", &pinv * &m); - (pinv * m).is_identity(1.0e-5) - } - else { - println!("{}", &m * &pinv); - (m * pinv).is_identity(1.0e-5) + if svd.rank(1.0e-7) == n { + let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); + + let sol1 = svd.solve(&b1, 1.0e-7).unwrap(); + let sol2 = svd.solve(&b2, 1.0e-7).unwrap(); + + let recomp = svd.recompose().unwrap(); + if !relative_eq!(m, recomp, epsilon = 1.0e-6) { + println!("{}{}", m, recomp); + } + + if !relative_eq!(&m * &sol1, b1, epsilon = 1.0e-6) { + println!("Problem 1: {:.6}{:.6}", b1, &m * sol1); + return false; + } + if !relative_eq!(&m * &sol2, b2, epsilon = 1.0e-6) { + println!("Problem 2: {:.6}{:.6}", b2, &m * sol2); + return false; + } + } + + true + } } } - else { - true - } } + ); - fn svd_solve(n: usize, nb: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 10)); - let nb = cmp::min(nb, 10); - let m = DMatrix::>::new_random(n, n).map(|e| e.0); - - let svd = m.clone().svd(true, true); - - if svd.rank(1.0e-7) == n { - let b1 = DVector::>::new_random(n).map(|e| e.0); - let b2 = DMatrix::>::new_random(n, nb).map(|e| e.0); - - let sol1 = svd.solve(&b1, 1.0e-7).unwrap(); - let sol2 = svd.solve(&b2, 1.0e-7).unwrap(); - - let recomp = svd.recompose().unwrap(); - if !relative_eq!(m, recomp, epsilon = 1.0e-6) { - println!("{}{}", m, recomp); - } - - if !relative_eq!(&m * &sol1, b1, epsilon = 1.0e-6) { - println!("Problem 1: {:.6}{:.6}", b1, &m * sol1); - return false; - } - if !relative_eq!(&m * &sol2, b2, epsilon = 1.0e-6) { - println!("Problem 2: {:.6}{:.6}", b2, &m * sol2); - return false; - } - } - - true - } - } + gen_tests!(complex, RandComplex); + gen_tests!(f64, RandScalar); } @@ -194,8 +194,6 @@ fn svd_singular() { let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = DMatrix::from_diagonal(&s); - println!("{:.5}", &u * &ds * &v_t); - assert!(s.iter().all(|e| *e >= 0.0)); assert!(u.is_orthogonal(1.0e-5)); assert!(v_t.is_orthogonal(1.0e-5)); @@ -345,11 +343,7 @@ fn svd_fail() { 0.12293810556077789, 0.6617084679545999, 0.9002240700227326, 0.027153062135304884, 0.3630189466989524, 0.18207502727558866, 0.843196731466686, 0.08951878746549924, 0.7533450877576973, 0.009558876499740077, 0.9429679490873482, 0.9355764454129878); let svd = m.clone().svd(true, true); - println!("Singular values: {}", svd.singular_values); - println!("u: {:.5}", svd.u.unwrap()); - println!("v: {:.5}", svd.v_t.unwrap()); let recomp = svd.recompose().unwrap(); - println!("{:.5}{:.5}", m, recomp); assert_relative_eq!(m, recomp, epsilon = 1.0e-5); } diff --git a/tests/linalg/tridiagonal.rs b/tests/linalg/tridiagonal.rs index fee0176a..368aba13 100644 --- a/tests/linalg/tridiagonal.rs +++ b/tests/linalg/tridiagonal.rs @@ -1,47 +1,56 @@ #![cfg(feature = "arbitrary")] -use std::cmp; -use na::{DMatrix, Matrix2, Matrix4}; -use core::helper::{RandScalar, RandComplex}; +macro_rules! gen_tests( + ($module: ident, $scalar: ty) => { + mod $module { + use std::cmp; -quickcheck! { - fn symm_tridiagonal(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let m = DMatrix::>::new_random(n, n).map(|e| e.0).hermitian_part(); - let tri = m.clone().symmetric_tridiagonalize(); - let recomp = tri.recompose(); + use na::{DMatrix, Matrix2, Matrix4}; + #[allow(unused_imports)] + use core::helper::{RandScalar, RandComplex}; - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + quickcheck! { + fn symm_tridiagonal(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 50)); + let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + let tri = m.clone().symmetric_tridiagonalize(); + let recomp = tri.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + } + + fn symm_tridiagonal_singular(n: usize) -> bool { + let n = cmp::max(1, cmp::min(n, 4)); + let mut m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + m.row_mut(n / 2).fill(na::zero()); + m.column_mut(n / 2).fill(na::zero()); + let tri = m.clone().symmetric_tridiagonalize(); + let recomp = tri.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + } + + fn symm_tridiagonal_static_square(m: Matrix4<$scalar>) -> bool { + let m = m.map(|e| e.0).hermitian_part(); + let tri = m.symmetric_tridiagonalize(); + let recomp = tri.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + } + + fn symm_tridiagonal_static_square_2x2(m: Matrix2<$scalar>) -> bool { + let m = m.map(|e| e.0).hermitian_part(); + let tri = m.symmetric_tridiagonalize(); + let recomp = tri.recompose(); + + relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + } + } + } } - - fn symm_tridiagonal_singular(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 4)); - let mut m = DMatrix::>::new_random(n, n).map(|e| e.0).hermitian_part(); - m.row_mut(n / 2).fill(na::zero()); - m.column_mut(n / 2).fill(na::zero()); - let tri = m.clone().symmetric_tridiagonalize(); - println!("Tri: {:?}", tri); - let recomp = tri.recompose(); - println!("Recomp: {:?}", recomp); +); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) - } - - fn symm_tridiagonal_static_square(m: Matrix4>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); - let tri = m.symmetric_tridiagonalize(); - let recomp = tri.recompose(); - - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) - } - - fn symm_tridiagonal_static_square_2x2(m: Matrix2>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); - let tri = m.symmetric_tridiagonalize(); - let recomp = tri.recompose(); - - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) - } -} +gen_tests!(complex, RandComplex); +gen_tests!(f64, RandScalar);