Cleanup warnings and rename Schur -> RealSchur

This commit is contained in:
sebcrozet 2019-03-23 11:46:56 +01:00
parent fd65738738
commit 1001e8ee0f
33 changed files with 714 additions and 691 deletions

View File

@ -1,28 +1,28 @@
use na::{Matrix4, RealSchur};
use na::{Matrix4, Schur};
use test::{self, Bencher};
#[bench]
fn schur_decompose_4x4(bh: &mut Bencher) {
let m = Matrix4::<f64>::new_random();
bh.iter(|| test::black_box(RealSchur::new(m.clone())))
bh.iter(|| test::black_box(Schur::new(m.clone())))
}
#[bench]
fn schur_decompose_10x10(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(10, 10);
bh.iter(|| test::black_box(RealSchur::new(m.clone())))
bh.iter(|| test::black_box(Schur::new(m.clone())))
}
#[bench]
fn schur_decompose_100x100(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(100, 100);
bh.iter(|| test::black_box(RealSchur::new(m.clone())))
bh.iter(|| test::black_box(Schur::new(m.clone())))
}
#[bench]
fn schur_decompose_200x200(bh: &mut Bencher) {
let m = ::reproductible_dmatrix(200, 200);
bh.iter(|| test::black_box(RealSchur::new(m.clone())))
bh.iter(|| test::black_box(Schur::new(m.clone())))
}
#[bench]

View File

@ -98,7 +98,7 @@ pub use self::eigen::Eigen;
pub use self::hessenberg::Hessenberg;
pub use self::lu::{LUScalar, LU};
pub use self::qr::QR;
pub use self::schur::RealSchur;
pub use self::schur::Schur;
pub use self::svd::SVD;
pub use self::symmetric_eigen::SymmetricEigen;

View File

@ -33,7 +33,7 @@ use lapack;
))
)]
#[derive(Clone, Debug)]
pub struct RealSchur<N: Scalar, D: Dim>
pub struct Schur<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
{
re: VectorN<N, D>,
@ -42,21 +42,21 @@ where DefaultAllocator: Allocator<N, D> + Allocator<N, D, D>
q: MatrixN<N, D>,
}
impl<N: Scalar, D: Dim> Copy for RealSchur<N, D>
impl<N: Scalar, D: Dim> Copy for Schur<N, D>
where
DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>,
MatrixN<N, D>: Copy,
VectorN<N, D>: Copy,
{}
impl<N: RealSchurScalar + Real, D: Dim> RealSchur<N, D>
impl<N: SchurScalar + Real, D: Dim> Schur<N, D>
where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
{
/// Computes the eigenvalues and real Schur form of the matrix `m`.
///
/// Panics if the method did not converge.
pub fn new(m: MatrixN<N, D>) -> Self {
Self::try_new(m).expect("RealSchur decomposition: convergence failed.")
Self::try_new(m).expect("Schur decomposition: convergence failed.")
}
/// Computes the eigenvalues and real Schur form of the matrix `m`.
@ -118,7 +118,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
);
lapack_check!(info);
Some(RealSchur {
Some(Schur {
re: wr,
im: wi,
t: m,
@ -162,7 +162,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, D>
*
*/
/// Trait implemented by scalars for which Lapack implements the Real Schur decomposition.
pub trait RealSchurScalar: Scalar {
pub trait SchurScalar: Scalar {
#[allow(missing_docs)]
fn xgees(
jobvs: u8,
@ -202,7 +202,7 @@ pub trait RealSchurScalar: Scalar {
macro_rules! real_eigensystem_scalar_impl (
($N: ty, $xgees: path) => (
impl RealSchurScalar for $N {
impl SchurScalar for $N {
#[inline]
fn xgees(jobvs: u8,
sort: u8,

View File

@ -2,6 +2,6 @@ mod cholesky;
mod lu;
mod qr;
mod real_eigensystem;
mod real_schur;
mod schur;
mod svd;
mod symmetric_eigen;

View File

@ -1,5 +1,5 @@
use na::{DMatrix, Matrix4};
use nl::RealSchur;
use nl::Schur;
use std::cmp;
quickcheck! {
@ -7,13 +7,13 @@ quickcheck! {
let n = cmp::max(1, cmp::min(n, 10));
let m = DMatrix::<f64>::new_random(n, n);
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
let (vecs, vals) = Schur::new(m.clone()).unpack();
relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)
}
fn schur_static(m: Matrix4<f64>) -> bool {
let (vecs, vals) = RealSchur::new(m.clone()).unpack();
let (vecs, vals) = Schur::new(m.clone()).unpack();
relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)
}

View File

@ -21,6 +21,8 @@ impl<N: Complex, D: Dim, S: Storage<N, D>> Vector<N, D, S> {
/// # Examples:
///
/// ```
/// # extern crate num_complex;
/// # extern crate nalgebra;
/// # use num_complex::Complex;
/// # use nalgebra::Vector3;
/// let vec = Vector3::new(Complex::new(11.0, 3.0), Complex::new(-15.0, 0.0), Complex::new(13.0, 5.0));
@ -197,11 +199,13 @@ impl<N: Complex, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// # Examples:
///
/// ```
/// # extern crate num_complex;
/// # extern crate nalgebra;
/// # use num_complex::Complex;
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(Complex::new(11.0, 1.0), Complex::new(-12.0, 2.0), Complex::new(13.0, 3.0),
/// Complex::new(21.0, 43.0), Complex::new(22.0, 5.0), Complex::new(-23.0, 0.0));
/// assert_eq!(mat.iamax_full(), (1, 0));
/// assert_eq!(mat.icamax_full(), (1, 0));
/// ```
#[inline]
pub fn icamax_full(&self) -> (usize, usize) {

View File

@ -773,7 +773,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
// FIXME: rename `apply` to `apply_mut` and `apply_into` to `apply`?
/// Returns `self` with each of its components replaced by the result of a closure `f` applied on it.
#[inline]
pub fn apply_into<F: FnMut(N) -> N>(mut self, mut f: F) -> Self{
pub fn apply_into<F: FnMut(N) -> N>(mut self, f: F) -> Self{
self.apply(f);
self
}
@ -1093,16 +1093,13 @@ impl<N: Complex, D: Dim, S: Storage<N, D, D>> SquareMatrix<N, D, S> {
pub fn hermitian_part(&self) -> MatrixMN<N, D, D>
where DefaultAllocator: Allocator<N, D, D> {
assert!(self.is_square(), "Cannot compute the hermitian part of a non-square matrix.");
let nrows = self.data.shape().0;
unsafe {
let mut tr = self.conjugate_transpose();
tr += self;
tr *= ::convert::<_, N>(0.5);
tr
}
}
}
impl<N: Scalar + One + Zero, D: DimAdd<U1> + IsNotStaticOne, S: Storage<N, D, D>> Matrix<N, D, D, S> {

View File

@ -7,7 +7,7 @@ use alga::general::{
AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul,
ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module,
Multiplicative, Real, RingCommutative, Complex
Multiplicative, RingCommutative, Complex
};
use alga::linear::{
FiniteDimInnerSpace, FiniteDimVectorSpace, InnerSpace, NormedSpace, VectorSpace,

View File

@ -1,8 +1,7 @@
use num::{Signed, Zero};
use std::cmp::PartialOrd;
use num::Zero;
use allocator::Allocator;
use ::{Real, Complex, Scalar};
use ::{Real, Complex};
use storage::{Storage, StorageMut};
use base::{DefaultAllocator, Matrix, Dim, MatrixMN};
use constraint::{SameNumberOfRows, SameNumberOfColumns, ShapeConstraint};

View File

@ -13,8 +13,6 @@ use abomonation::Abomonation;
use alga::general::{SubsetOf, Complex};
use alga::linear::NormedSpace;
use ::Real;
/// A wrapper that ensures the underlying algebraic entity has a unit norm.
///
/// Use `.as_ref()` or `.into_inner()` to obtain the underlying value by-reference or by-move.

View File

@ -3,22 +3,22 @@ use base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use alga::general::Real;
use alga::general::Complex;
use base::Scalar;
use base::allocator::Allocator;
use base::dimension::{Dim, Dynamic, U2};
use base::{DefaultAllocator, MatrixN};
use geometry::UnitComplex;
use num_complex::Complex;
use linalg::givens::GivensRotation;
/// A random orthogonal matrix.
#[derive(Clone, Debug)]
pub struct RandomOrthogonal<N: Real, D: Dim = Dynamic>
pub struct RandomOrthogonal<N: Scalar, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D>
{
m: MatrixN<N, D>,
}
impl<N: Real, D: Dim> RandomOrthogonal<N, D>
impl<N: Complex, D: Dim> RandomOrthogonal<N, D>
where DefaultAllocator: Allocator<N, D, D>
{
/// Retrieve the generated matrix.
@ -30,10 +30,9 @@ where DefaultAllocator: Allocator<N, D, D>
pub fn new<Rand: FnMut() -> N>(dim: D, mut rand: Rand) -> Self {
let mut res = MatrixN::identity_generic(dim, dim);
// Create an orthogonal matrix by compositing planar 2D rotations.
// Create an orthogonal matrix by composing random Givens rotations rotations.
for i in 0..dim.value() - 1 {
let c = Complex::new(rand(), rand());
let rot: UnitComplex<N> = UnitComplex::from_complex(c);
let rot = GivensRotation::new(rand(), rand()).0;
rot.rotate(&mut res.fixed_rows_mut::<U2>(i));
}
@ -42,7 +41,7 @@ where DefaultAllocator: Allocator<N, D, D>
}
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomOrthogonal<N, D>
impl<N: Complex + Arbitrary + Send, D: Dim> Arbitrary for RandomOrthogonal<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send,

View File

@ -3,7 +3,8 @@ use base::storage::Owned;
#[cfg(feature = "arbitrary")]
use quickcheck::{Arbitrary, Gen};
use alga::general::Real;
use alga::general::Complex;
use base::Scalar;
use base::allocator::Allocator;
use base::dimension::{Dim, Dynamic};
use base::{DefaultAllocator, MatrixN};
@ -12,13 +13,13 @@ use debug::RandomOrthogonal;
/// A random, well-conditioned, symmetric definite-positive matrix.
#[derive(Clone, Debug)]
pub struct RandomSDP<N: Real, D: Dim = Dynamic>
pub struct RandomSDP<N: Scalar, D: Dim = Dynamic>
where DefaultAllocator: Allocator<N, D, D>
{
m: MatrixN<N, D>,
}
impl<N: Real, D: Dim> RandomSDP<N, D>
impl<N: Complex, D: Dim> RandomSDP<N, D>
where DefaultAllocator: Allocator<N, D, D>
{
/// Retrieve the generated matrix.
@ -30,11 +31,11 @@ where DefaultAllocator: Allocator<N, D, D>
/// random reals generators.
pub fn new<Rand: FnMut() -> N>(dim: D, mut rand: Rand) -> Self {
let mut m = RandomOrthogonal::new(dim, || rand()).unwrap();
let mt = m.transpose();
let mt = m.conjugate_transpose();
for i in 0..dim.value() {
let mut col = m.column_mut(i);
let eigenval = N::one() + rand().abs();
let eigenval = N::one() + N::from_real(rand().modulus());
col *= eigenval;
}
@ -43,7 +44,7 @@ where DefaultAllocator: Allocator<N, D, D>
}
#[cfg(feature = "arbitrary")]
impl<N: Real + Arbitrary + Send, D: Dim> Arbitrary for RandomSDP<N, D>
impl<N: Complex + Arbitrary + Send, D: Dim> Arbitrary for RandomSDP<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
Owned<N, D, D>: Clone + Send,

View File

@ -66,7 +66,7 @@ an optimized set of tools for computer graphics and physics. Those features incl
* General transformations that does not have to be invertible, stored as an homogeneous matrix:
`Transform2`, `Transform3`.
* 3D projections for computer graphics: `Perspective3`, `Orthographic3`.
* Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `RealSchur`, `Hessenberg`, `SymmetricEigen`.
* Matrix factorizations: `Cholesky`, `QR`, `LU`, `FullPivLU`, `SVD`, `Schur`, `Hessenberg`, `SymmetricEigen`.
* Insertion and removal of rows of columns of a matrix.
* Implements traits from the [alga](https://crates.io/crates/alga) crate for
generic programming.

View File

@ -1,7 +1,6 @@
#[cfg(feature = "serde-serialize")]
use serde::{Deserialize, Serialize};
use num::Zero;
use alga::general::Complex;
use allocator::Allocator;
@ -59,23 +58,26 @@ where DefaultAllocator: Allocator<N, D, D>
let mut col_j = col_j.rows_range_mut(j..);
let col_k = col_k.rows_range(j..);
col_j.axpy(factor, &col_k, N::one());
col_j.axpy(factor.conjugate(), &col_k, N::one());
}
let diag = unsafe { *matrix.get_unchecked((j, j)) };
if diag.real() > N::Real::zero() {
let denom = diag.sqrt();
if !diag.is_zero() {
if let Some(denom) = diag.try_sqrt() {
unsafe {
*matrix.get_unchecked_mut((j, j)) = denom;
}
let mut col = matrix.slice_range_mut(j + 1.., j);
col /= denom;
} else {
return None;
continue;
}
}
return None;
}
Some(Cholesky { chol: matrix })
}
@ -119,7 +121,7 @@ where DefaultAllocator: Allocator<N, D, D>
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let _ = self.chol.solve_lower_triangular_mut(b);
let _ = self.chol.tr_solve_lower_triangular_mut(b);
let _ = self.chol.conjugate().tr_solve_lower_triangular_mut(b);
}
/// Returns the solution of the system `self * x = b` where `self` is the decomposed matrix and

View File

@ -15,7 +15,7 @@ use constraint::{DimEq, ShapeConstraint};
use geometry::{Reflection, UnitComplex};
use linalg::householder;
use linalg::RealSchur;
use linalg::Schur;
/// Eigendecomposition of a real matrix with real eigenvalues (or complex eigen values for complex matrices).
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
@ -76,7 +76,7 @@ where
);
let dim = m.nrows();
let (mut eigenvectors, mut eigenvalues) = RealSchur::new(m, 0).unwrap().unpack();
let (mut eigenvectors, mut eigenvalues) = Schur::new(m, 0).unwrap().unpack();
println!("Schur eigenvalues: {}", eigenvalues);

View File

@ -1,15 +1,13 @@
//! Construction of givens rotations.
use alga::general::{Complex, Real};
use alga::general::Complex;
use num::{Zero, One};
use num_complex::Complex as NumComplex;
use base::dimension::{Dim, U2};
use base::constraint::{ShapeConstraint, DimEq};
use base::storage::{Storage, StorageMut};
use base::{Vector, Matrix};
use geometry::UnitComplex;
/// A Givens rotation.
#[derive(Debug, Clone, Copy)]

View File

@ -12,12 +12,14 @@ use base::storage::Storage;
use base::{DefaultAllocator, MatrixN, SquareMatrix, Unit, Vector2, Vector3, VectorN};
use constraint::{DimEq, ShapeConstraint};
use geometry::{Reflection, UnitComplex};
use geometry::Reflection;
use linalg::householder;
use linalg::Hessenberg;
use linalg::givens::GivensRotation;
/// Real Schur decomposition of a square matrix.
/// Schur decomposition of a square matrix.
///
/// If this is a real matrix, this will be a Real Schur decomposition.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
@ -34,20 +36,20 @@ use linalg::givens::GivensRotation;
))
)]
#[derive(Clone, Debug)]
pub struct RealSchur<N: Complex, D: Dim>
pub struct Schur<N: Complex, D: Dim>
where DefaultAllocator: Allocator<N, D, D>
{
q: MatrixN<N, D>,
t: MatrixN<N, D>,
}
impl<N: Complex, D: Dim> Copy for RealSchur<N, D>
impl<N: Complex, D: Dim> Copy for Schur<N, D>
where
DefaultAllocator: Allocator<N, D, D>,
MatrixN<N, D>: Copy,
{}
impl<N: Complex, D: Dim> RealSchur<N, D>
impl<N: Complex, D: Dim> Schur<N, D>
where
D: DimSub<U1>, // For Hessenberg.
ShapeConstraint: DimEq<Dynamic, DimDiff<D, U1>>, // For Hessenberg.
@ -75,7 +77,7 @@ where
pub fn try_new(m: MatrixN<N, D>, eps: N::Real, max_niter: usize) -> Option<Self> {
let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) };
Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| RealSchur {
Self::do_decompose(m, &mut work, eps, max_niter, true).map(|(q, t)| Schur {
q: q.unwrap(),
t: t,
})
@ -474,8 +476,6 @@ fn compute_2x2_basis<N: Complex, S: Storage<N, U2, U2>>(
let x1 = eigval1 - m[(1, 1)];
let x2 = eigval2 - m[(1, 1)];
println!("eigval1: {}, eigval2: {}, h10: {}", eigval1, eigval2, h10);
// NOTE: Choose the one that yields a larger x component.
// This is necessary for numerical stability of the normalization of the complex
// number.
@ -499,8 +499,8 @@ where
+ Allocator<N, D>,
{
/// Computes the Schur decomposition of a square matrix.
pub fn real_schur(self) -> RealSchur<N, D> {
RealSchur::new(self.into_owned())
pub fn schur(self) -> Schur<N, D> {
Schur::new(self.into_owned())
}
/// Attempts to compute the Schur decomposition of a square matrix.
@ -514,8 +514,8 @@ where
/// * `max_niter` maximum total number of iterations performed by the algorithm. If this
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
/// continues indefinitely until convergence.
pub fn try_real_schur(self, eps: N::Real, max_niter: usize) -> Option<RealSchur<N, D>> {
RealSchur::try_new(self.into_owned(), eps, max_niter)
pub fn try_schur(self, eps: N::Real, max_niter: usize) -> Option<Schur<N, D>> {
Schur::try_new(self.into_owned(), eps, max_niter)
}
/// Computes the eigenvalues of this matrix.
@ -543,7 +543,7 @@ where
}
// FIXME: add balancing?
let schur = RealSchur::do_decompose(
let schur = Schur::do_decompose(
self.clone_owned(),
&mut work,
N::Real::default_epsilon(),
@ -551,7 +551,7 @@ where
false,
)
.unwrap();
if RealSchur::do_eigenvalues(&schur.1, &mut work) {
if Schur::do_eigenvalues(&schur.1, &mut work) {
Some(work)
} else {
None
@ -566,7 +566,7 @@ where
let dim = self.data.shape().0;
let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) };
let schur = RealSchur::do_decompose(
let schur = Schur::do_decompose(
self.clone_owned(),
&mut work,
N::default_epsilon(),
@ -575,7 +575,7 @@ where
)
.unwrap();
let mut eig = unsafe { VectorN::new_uninitialized_generic(dim, U1) };
RealSchur::do_complex_eigenvalues(&schur.1, &mut eig);
Schur::do_complex_eigenvalues(&schur.1, &mut eig);
eig
}
}

View File

@ -1,9 +1,7 @@
#[cfg(feature = "serde-serialize")]
use serde::{Deserialize, Serialize};
use num_complex::Complex as NumComplex;
use num::{Zero, One};
use std::ops::MulAssign;
use approx::AbsDiffEq;
use alga::general::{Real, Complex};
@ -13,7 +11,6 @@ use constraint::{SameNumberOfRows, ShapeConstraint};
use dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1, U2};
use storage::Storage;
use linalg::givens;
use linalg::symmetric_eigen;
use linalg::Bidiagonal;
use linalg::givens::GivensRotation;
@ -116,7 +113,7 @@ where
matrix.unscale_mut(m_amax);
}
let mut b = Bidiagonal::new(matrix);
let b = Bidiagonal::new(matrix);
let mut u = if compute_u { Some(b.u()) } else { None };
let mut v_t = if compute_v { Some(b.v_t()) } else { None };
let mut diagonal = b.diagonal();

View File

@ -1,10 +1,8 @@
#[cfg(feature = "serde-serialize")]
use serde::{Deserialize, Serialize};
use num::{Zero, One};
use num_complex::Complex as NumComplex;
use num::Zero;
use approx::AbsDiffEq;
use std::ops::MulAssign;
use alga::general::Complex;
use allocator::Allocator;
@ -12,7 +10,6 @@ use base::{DefaultAllocator, Matrix2, MatrixN, SquareMatrix, Vector2, VectorN};
use dimension::{Dim, DimDiff, DimSub, U1, U2};
use storage::Storage;
use geometry::UnitComplex;
use linalg::givens::GivensRotation;
use linalg::SymmetricTridiagonal;
@ -121,8 +118,6 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N::Real, D>
q = Some(res.0);
diag = res.1;
off_diag = res.2;
println!("Tridiagonalization q: {:.5?}", q);
} else {
let res = SymmetricTridiagonal::new(m).unpack_tridiagonal();
q = None;
@ -154,7 +149,6 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N::Real, D>
let j = i + 1;
if let Some((rot, norm)) = GivensRotation::cancel_y(&v) {
println!("Canceling: {:.5?} with norm: {:.5?}", rot, norm);
if i > start {
// Not the first iteration.
off_diag[i - 1] = norm;
@ -204,10 +198,6 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N::Real, D>
diag[start + 0] = eigvals[0];
diag[start + 1] = eigvals[1];
println!("Eigvals: {:.5?}", eigvals);
println!("m: {:.5}", m);
println!("Curr q: {:.5?}", q);
if let Some(ref mut q) = q {
if let Some((rot, _)) = GivensRotation::try_new(basis.x, basis.y, eps) {
let rot = GivensRotation::new_unchecked(rot.c(), N::from_real(rot.s()));
@ -372,7 +362,6 @@ mod test {
let expected = expected_shift(m);
let computed = super::wilkinson_shift(m.m11, m.m22, m.m12);
println!("{} {}", expected, computed);
assert!(relative_eq!(expected, computed, epsilon = 1.0e-7));
}
}

View File

@ -53,8 +53,6 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
pub fn new(mut m: MatrixN<N, D>) -> Self {
let dim = m.data.shape().0;
println!("Input m: {}", m.index((0.., 0..)));
assert!(
m.is_square(),
"Unable to compute the symmetric tridiagonal decomposition of a non-square matrix."
@ -84,7 +82,6 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
m.ger_symm(-N::one(), &p, &axis.conjugate(), N::one());
m.ger_symm(-N::one(), &axis, &p.conjugate(), N::one());
m.ger_symm(dot * ::convert(2.0), &axis, &axis.conjugate(), N::one());
println!("The m: {}", m);
}
}
@ -112,7 +109,7 @@ where DefaultAllocator: Allocator<N, D, D> + Allocator<N, DimDiff<D, U1>>
}
/// Retrieve the diagonal, and off diagonal elements of this decomposition.
pub fn unpack_tridiagonal(mut self) -> (VectorN<N::Real, D>, VectorN<N::Real, DimDiff<D, U1>>)
pub fn unpack_tridiagonal(self) -> (VectorN<N::Real, D>, VectorN<N::Real, DimDiff<D, U1>>)
where DefaultAllocator: Allocator<N::Real, D>
+ Allocator<N::Real, DimDiff<D, U1>> {
(self.diagonal(), self.off_diagonal.map(N::modulus))

View File

@ -40,7 +40,7 @@ fn assert_encode_and_decode<T: Abomonation + PartialEq + Clone>(original_data: T
// Encode
let mut bytes = Vec::new();
unsafe {
encode(&original_data, &mut bytes);
let _ = encode(&original_data, &mut bytes);
}
// Drop the original, so that dangling pointers are revealed by the test

View File

@ -1,11 +1,14 @@
#![cfg(feature = "arbitrary")]
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3};
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {
fn bidiagonal(m: DMatrix<RandComplex<f64>>) -> bool {
fn bidiagonal(m: DMatrix<$scalar>) -> bool {
let m = m.map(|e| e.0);
if m.len() == 0 {
return true;
@ -14,76 +17,62 @@ quickcheck! {
let bidiagonal = m.clone().bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("{}{}{}", &u, &d, &v_t);
println!("{:.7}{:.7}", &u * &d * &v_t, m);
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
}
fn bidiagonal_static_5_3(m: Matrix5x3<RandComplex<f64>>) -> bool {
fn bidiagonal_static_5_3(m: Matrix5x3<$scalar>) -> bool {
let m = m.map(|e| e.0);
let bidiagonal = m.bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("{}{}{}", &u, &d, &v_t);
println!("{:.7}{:.7}", &u * &d * &v_t, m);
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
}
fn bidiagonal_static_3_5(m: Matrix3x5<RandComplex<f64>>) -> bool {
fn bidiagonal_static_3_5(m: Matrix3x5<$scalar>) -> bool {
let m = m.map(|e| e.0);
let bidiagonal = m.bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("{}{}{}", &u, &d, &v_t);
println!("{:.7}{:.7}", &u * &d * &v_t, m);
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
}
fn bidiagonal_static_square(m: Matrix4<RandComplex<f64>>) -> bool {
fn bidiagonal_static_square(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let bidiagonal = m.bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("{}{}{}", &u, &d, &v_t);
println!("{:.7}{:.7}", &u * &d * &v_t, m);
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
}
fn bidiagonal_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
fn bidiagonal_static_square_2x2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0);
let bidiagonal = m.bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("{}{}{}", &u, &d, &v_t);
println!("{:.7}{:.7}", &u * &d * &v_t, m);
relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);
#[test]
fn bidiagonal_identity() {
let m = DMatrix::<f64>::identity(10, 10);
let m = na::DMatrix::<f64>::identity(10, 10);
let bidiagonal = m.clone().bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("u, s, v_t: {}{}{}", u, d, v_t);
println!("recomp: {}", &u * &d * &v_t);
assert_eq!(m, &u * d * &v_t);
let m = DMatrix::<f64>::identity(10, 15);
let m = na::DMatrix::<f64>::identity(10, 15);
let bidiagonal = m.clone().bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("u, s, v_t: {}{}{}", u, d, v_t);
assert_eq!(m, &u * d * &v_t);
let m = DMatrix::<f64>::identity(15, 10);
let m = na::DMatrix::<f64>::identity(15, 10);
let bidiagonal = m.clone().bidiagonalize();
let (u, d, v_t) = bidiagonal.unpack();
println!("u, s, v_t: {}{}{}", u, d, v_t);
assert_eq!(m, &u * d * &v_t);
}

View File

@ -1,28 +1,30 @@
#![cfg(all(feature = "arbitrary", feature = "debug"))]
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::debug::RandomSDP;
use na::dimension::U4;
use na::dimension::{U4, Dynamic};
use na::{DMatrix, DVector, Matrix4x3, Vector4};
use rand::random;
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
use std::cmp;
quickcheck! {
fn cholesky(m: RandomSDP<f64>) -> bool {
let mut m = m.unwrap();
// Put garbage on the upper triangle to make sure it is not read by the decomposition.
m.fill_upper_triangle(23.0, 1);
fn cholesky(n: usize) -> bool {
let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap();
let l = m.clone().cholesky().unwrap().unpack();
m.fill_upper_triangle_with_lower_triangle();
relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7)
relative_eq!(m, &l * l.conjugate_transpose(), epsilon = 1.0e-7)
}
fn cholesky_static(m: RandomSDP<f64, U4>) -> bool {
let m = m.unwrap();
fn cholesky_static(_m: RandomSDP<f64, U4>) -> bool {
let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap();
let chol = m.cholesky().unwrap();
let l = chol.unpack();
if !relative_eq!(m, &l * l.transpose(), epsilon = 1.0e-7) {
if !relative_eq!(m, &l * l.conjugate_transpose(), epsilon = 1.0e-7) {
false
}
else {
@ -30,15 +32,14 @@ quickcheck! {
}
}
fn cholesky_solve(m: RandomSDP<f64>, nb: usize) -> bool {
let m = m.unwrap();
let n = m.nrows();
fn cholesky_solve(n: usize, nb: usize) -> bool {
let n = n.max(1).min(50);
let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap();
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
let chol = m.clone().cholesky().unwrap();
let b1 = DVector::new_random(n);
let b2 = DMatrix::new_random(n, nb);
let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0);
let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0);
let sol1 = chol.solve(&b1);
let sol2 = chol.solve(&b2);
@ -47,11 +48,11 @@ quickcheck! {
relative_eq!(&m * &sol2, b2, epsilon = 1.0e-7)
}
fn cholesky_solve_static(m: RandomSDP<f64, U4>) -> bool {
let m = m.unwrap();
fn cholesky_solve_static(_n: usize) -> bool {
let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap();
let chol = m.clone().cholesky().unwrap();
let b1 = Vector4::new_random();
let b2 = Matrix4x3::new_random();
let b1 = Vector4::<$scalar>::new_random().map(|e| e.0);
let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0);
let sol1 = chol.solve(&b1);
let sol2 = chol.solve(&b2);
@ -60,9 +61,8 @@ quickcheck! {
relative_eq!(m * sol2, b2, epsilon = 1.0e-7)
}
fn cholesky_inverse(m: RandomSDP<f64>) -> bool {
let m = m.unwrap();
fn cholesky_inverse(n: usize) -> bool {
let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap();
let m1 = m.clone().cholesky().unwrap().inverse();
let id1 = &m * &m1;
let id2 = &m1 * &m;
@ -70,8 +70,8 @@ quickcheck! {
id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)
}
fn cholesky_inverse_static(m: RandomSDP<f64, U4>) -> bool {
let m = m.unwrap();
fn cholesky_inverse_static(_n: usize) -> bool {
let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap();
let m1 = m.clone().cholesky().unwrap().inverse();
let id1 = &m * &m1;
let id2 = &m1 * &m;
@ -79,3 +79,9 @@ quickcheck! {
id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);

View File

@ -4,69 +4,66 @@ use na::DMatrix;
#[cfg(feature = "arbitrary")]
mod quickcheck_tests {
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::{DMatrix, Matrix2, Matrix3, Matrix4};
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
use std::cmp;
quickcheck! {
fn symmetric_eigen(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 10));
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0).hermitian_part();
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part();
let eig = m.clone().symmetric_eigen();
let recomp = eig.recompose();
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
}
fn symmetric_eigen_singular(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 10));
let mut m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0).hermitian_part();
let mut m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part();
m.row_mut(n / 2).fill(na::zero());
m.column_mut(n / 2).fill(na::zero());
let eig = m.clone().symmetric_eigen();
let recomp = eig.recompose();
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
}
fn symmetric_eigen_static_square_4x4(m: Matrix4<RandComplex<f64>>) -> bool {
fn symmetric_eigen_static_square_4x4(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0).hermitian_part();
let eig = m.symmetric_eigen();
let recomp = eig.recompose();
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
}
fn symmetric_eigen_static_square_3x3(m: Matrix3<RandComplex<f64>>) -> bool {
fn symmetric_eigen_static_square_3x3(m: Matrix3<$scalar>) -> bool {
let m = m.map(|e| e.0).hermitian_part();
let eig = m.symmetric_eigen();
let recomp = eig.recompose();
println!("Eigenvectors: {}", eig.eigenvectors);
println!("Eigenvalues: {}", eig.eigenvalues);
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
}
fn symmetric_eigen_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
fn symmetric_eigen_static_square_2x2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0).hermitian_part();
let eig = m.symmetric_eigen();
let recomp = eig.recompose();
println!("Eigenvectors: {}", eig.eigenvectors);
println!("{}{}", m.lower_triangle(), recomp.lower_triangle());
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);
}
// Test proposed on the issue #176 of rulinalg.
#[test]

View File

@ -42,14 +42,20 @@ fn full_piv_lu_simple_with_pivot() {
#[cfg(feature = "arbitrary")]
mod quickcheck_tests {
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use std::cmp;
use num::One;
use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4};
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {
fn full_piv_lu(m: DMatrix<f64>) -> bool {
let mut m = m;
fn full_piv_lu(m: DMatrix<$scalar>) -> bool {
let mut m = m.map(|e| e.0);
if m.len() == 0 {
m = DMatrix::new_random(1, 1);
m = DMatrix::<$scalar>::new_random(1, 1).map(|e| e.0);
}
let lu = m.clone().full_piv_lu();
@ -61,7 +67,8 @@ mod quickcheck_tests {
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn full_piv_lu_static_3_5(m: Matrix3x5<f64>) -> bool {
fn full_piv_lu_static_3_5(m: Matrix3x5<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
@ -71,7 +78,8 @@ mod quickcheck_tests {
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn full_piv_lu_static_5_3(m: Matrix5x3<f64>) -> bool {
fn full_piv_lu_static_5_3(m: Matrix5x3<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
@ -81,7 +89,8 @@ mod quickcheck_tests {
relative_eq!(m, lu, epsilon = 1.0e-7)
}
fn full_piv_lu_static_square(m: Matrix4<f64>) -> bool {
fn full_piv_lu_static_square(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
@ -95,11 +104,11 @@ mod quickcheck_tests {
if n != 0 && nb != 0 {
let n = cmp::min(n, 50); // To avoid slowing down the test too much.
let nb = cmp::min(nb, 50); // To avoid slowing down the test too much.
let m = DMatrix::<f64>::new_random(n, n);
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let lu = m.clone().full_piv_lu();
let b1 = DVector::new_random(n);
let b2 = DMatrix::new_random(n, nb);
let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0);
let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0);
let sol1 = lu.solve(&b1);
let sol2 = lu.solve(&b2);
@ -111,10 +120,11 @@ mod quickcheck_tests {
return true;
}
fn full_piv_lu_solve_static(m: Matrix4<f64>) -> bool {
fn full_piv_lu_solve_static(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.full_piv_lu();
let b1 = Vector4::new_random();
let b2 = Matrix4x3::new_random();
let b1 = Vector4::<$scalar>::new_random().map(|e| e.0);
let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0);
let sol1 = lu.solve(&b1);
let sol2 = lu.solve(&b2);
@ -125,14 +135,14 @@ mod quickcheck_tests {
fn full_piv_lu_inverse(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
let m = DMatrix::<f64>::new_random(n, n);
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let mut l = m.lower_triangle();
let mut u = m.upper_triangle();
// Ensure the matrix is well conditioned for inversion.
l.fill_diagonal(1.0);
u.fill_diagonal(1.0);
l.fill_diagonal(One::one());
u.fill_diagonal(One::one());
let m = l * u;
let m1 = m.clone().full_piv_lu().try_inverse().unwrap();
@ -142,7 +152,8 @@ mod quickcheck_tests {
return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5);
}
fn full_piv_lu_inverse_static(m: Matrix4<f64>) -> bool {
fn full_piv_lu_inverse_static(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let lu = m.full_piv_lu();
if let Some(m1) = lu.try_inverse() {
@ -157,6 +168,13 @@ mod quickcheck_tests {
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);
}
/*
#[test]

View File

@ -1,8 +1,6 @@
#![cfg(feature = "arbitrary")]
use na::{DMatrix, Matrix2, Matrix4};
use core::helper::{RandScalar, RandComplex};
use std::cmp;
use na::Matrix2;
#[test]
@ -13,27 +11,42 @@ fn hessenberg_simple() {
assert!(relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7))
}
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::{DMatrix, Matrix2, Matrix4};
use std::cmp;
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {
fn hessenberg(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 50));
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0);
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let hess = m.clone().hessenberg();
let (p, h) = hess.unpack();
relative_eq!(m, &p * h * p.conjugate_transpose(), epsilon = 1.0e-7)
}
fn hessenberg_static_mat2(m: Matrix2<RandComplex<f64>>) -> bool {
fn hessenberg_static_mat2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0);
let hess = m.hessenberg();
let (p, h) = hess.unpack();
relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7)
}
fn hessenberg_static(m: Matrix4<RandComplex<f64>>) -> bool {
fn hessenberg_static(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let hess = m.hessenberg();
let (p, h) = hess.unpack();
relative_eq!(m, p * h * p.conjugate_transpose(), epsilon = 1.0e-7)
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);

View File

@ -40,6 +40,7 @@ fn lu_simple_with_pivot() {
#[cfg(feature = "arbitrary")]
mod quickcheck_tests {
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
macro_rules! gen_tests(

View File

@ -7,7 +7,7 @@ mod hessenberg;
mod inverse;
mod lu;
mod qr;
mod real_schur;
mod schur;
mod solve;
mod svd;
mod tridiagonal;

View File

@ -1,12 +1,12 @@
#![cfg(feature = "arbitrary")]
use core::helper::{RandScalar, RandComplex};
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::{DMatrix, DVector, Matrix3x5, Matrix4, Matrix4x3, Matrix5x3, Vector4};
use std::cmp;
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {

View File

@ -8,7 +8,7 @@ fn schur_simpl_mat3() {
-2.0, 1.0, 2.0,
4.0, 2.0, 5.0);
let schur = m.real_schur();
let schur = m.schur();
let (vecs, vals) = schur.unpack();
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7));
@ -16,16 +16,20 @@ fn schur_simpl_mat3() {
#[cfg(feature = "arbitrary")]
mod quickcheck_tests {
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use std::cmp;
use na::{DMatrix, Matrix2, Matrix3, Matrix4};
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {
fn schur(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 10));
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0);
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let (vecs, vals) = m.clone().real_schur().unpack();
let (vecs, vals) = m.clone().schur().unpack();
if !relative_eq!(&vecs * &vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7) {
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose());
@ -34,9 +38,9 @@ mod quickcheck_tests {
relative_eq!(&vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7)
}
fn schur_static_mat2(m: Matrix2<RandComplex<f64>>) -> bool {
fn schur_static_mat2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0);
let (vecs, vals) = m.clone().real_schur().unpack();
let (vecs, vals) = m.clone().schur().unpack();
let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7);
if !ok {
@ -46,9 +50,9 @@ mod quickcheck_tests {
ok
}
fn schur_static_mat3(m: Matrix3<RandComplex<f64>>) -> bool {
fn schur_static_mat3(m: Matrix3<$scalar>) -> bool {
let m = m.map(|e| e.0);
let (vecs, vals) = m.clone().real_schur().unpack();
let (vecs, vals) = m.clone().schur().unpack();
let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7);
if !ok {
@ -58,18 +62,25 @@ mod quickcheck_tests {
ok
}
fn schur_static_mat4(m: Matrix4<RandComplex<f64>>) -> bool {
fn schur_static_mat4(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let (vecs, vals) = m.clone().real_schur().unpack();
let (vecs, vals) = m.clone().schur().unpack();
let ok = relative_eq!(vecs * vals * vecs.conjugate_transpose(), m, epsilon = 1.0e-7);
if !ok {
println!("{:.5}{:.5}", m, &vecs * &vals * vecs.conjugate_transpose());
}
ok
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);
}
#[test]
fn schur_static_mat4_fail() {
@ -79,8 +90,7 @@ fn schur_static_mat4_fail() {
-94.61793793643038, -18.64216213611094, 88.32376703241675, -99.30169870309795,
90.62661897246733, 96.74200696130146, 34.7421322611369, 84.86773307198098);
let (vecs, vals) = m.clone().real_schur().unpack();
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
let (vecs, vals) = m.clone().schur().unpack();
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
}
@ -92,8 +102,7 @@ fn schur_static_mat4_fail2() {
27.932377940728202, 82.94220150938, -35.5898884705951, 67.56447552434219,
55.66754906908682, -42.14328890569226, -20.684709585152206, -87.9456949841046);
let (vecs, vals) = m.clone().real_schur().unpack();
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
let (vecs, vals) = m.clone().schur().unpack();
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
}
@ -104,8 +113,7 @@ fn schur_static_mat3_fail() {
-7.525423104386547, -17.827350599642287, 11.297377444555849,
38.080736654870464, -84.27428302131528, -95.88198590331922);
let (vecs, vals) = m.clone().real_schur().unpack();
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
let (vecs, vals) = m.clone().schur().unpack();
assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
}
@ -138,7 +146,6 @@ fn schur_singular() {
0.0, 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0]);
let (vecs, vals) = m.clone().real_schur().unpack();
println!("{:.6}{:.6}", m, &vecs * &vals * vecs.transpose());
let (vecs, vals) = m.clone().schur().unpack();
assert!(relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7))
}

View File

@ -1,57 +1,65 @@
#![cfg(feature = "arbitrary")]
use na::{Matrix4, Matrix4x5};
fn unzero_diagonal(a: &mut Matrix4<f64>) {
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::{Matrix4, Matrix4x5, Complex};
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
fn unzero_diagonal<N: Complex>(a: &mut Matrix4<N>) {
for i in 0..4 {
if a[(i, i)] < 1.0e-7 {
a[(i, i)] = 1.0;
if a[(i, i)].asum() < na::convert(1.0e-7) {
a[(i, i)] = N::one();
}
}
}
quickcheck! {
fn solve_lower_triangular(a: Matrix4<f64>, b: Matrix4x5<f64>) -> bool {
let mut a = a;
fn solve_lower_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool {
let b = b.map(|e| e.0);
let mut a = a.map(|e| e.0);
unzero_diagonal(&mut a);
let tri = a.lower_triangle();
let x = a.solve_lower_triangular(&b).unwrap();
println!("{}\n{}\n{}\n{}", tri, x, tri * x, b);
relative_eq!(tri * x, b, epsilon = 1.0e-7)
}
fn solve_upper_triangular(a: Matrix4<f64>, b: Matrix4x5<f64>) -> bool {
let mut a = a;
fn solve_upper_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool {
let b = b.map(|e| e.0);
let mut a = a.map(|e| e.0);
unzero_diagonal(&mut a);
let tri = a.upper_triangle();
let x = a.solve_upper_triangular(&b).unwrap();
println!("{}\n{}\n{}\n{}", tri, x, tri * x, b);
relative_eq!(tri * x, b, epsilon = 1.0e-7)
}
fn tr_solve_lower_triangular(a: Matrix4<f64>, b: Matrix4x5<f64>) -> bool {
let mut a = a;
fn tr_solve_lower_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool {
let b = b.map(|e| e.0);
let mut a = a.map(|e| e.0);
unzero_diagonal(&mut a);
let tri = a.lower_triangle();
let x = a.tr_solve_lower_triangular(&b).unwrap();
println!("{}\n{}\n{}\n{}", tri, x, tri * x, b);
relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7)
}
fn tr_solve_upper_triangular(a: Matrix4<f64>, b: Matrix4x5<f64>) -> bool {
let mut a = a;
fn tr_solve_upper_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool {
let b = b.map(|e| e.0);
let mut a = a.map(|e| e.0);
unzero_diagonal(&mut a);
let tri = a.upper_triangle();
let x = a.tr_solve_upper_triangular(&b).unwrap();
println!("{}\n{}\n{}\n{}", tri, x, tri * x, b);
relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7)
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);

View File

@ -3,16 +3,19 @@ use na::{DMatrix, Matrix6};
#[cfg(feature = "arbitrary")]
mod quickcheck_tests {
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use na::{
DMatrix, DVector, Matrix2, Matrix2x5, Matrix3, Matrix3x5, Matrix4, Matrix5x2, Matrix5x3,
Complex
};
use std::cmp;
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {
fn svd(m: DMatrix<RandComplex<f64>>) -> bool {
fn svd(m: DMatrix<$scalar>) -> bool {
let m = m.map(|e| e.0);
if m.len() > 0 {
let svd = m.clone().svd(true, true);
@ -20,8 +23,6 @@ mod quickcheck_tests {
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
let ds = DMatrix::from_diagonal(&s.map(|e| Complex::from_real(e)));
println!("{}{}", &m, &u * &ds * &v_t);
s.iter().all(|e| *e >= 0.0) &&
relative_eq!(&u * ds * &v_t, recomp_m, epsilon = 1.0e-5) &&
relative_eq!(m, recomp_m, epsilon = 1.0e-5)
@ -31,7 +32,7 @@ mod quickcheck_tests {
}
}
fn svd_static_5_3(m: Matrix5x3<RandComplex<f64>>) -> bool {
fn svd_static_5_3(m: Matrix5x3<$scalar>) -> bool {
let m = m.map(|e| e.0);
let svd = m.svd(true, true);
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
@ -43,7 +44,7 @@ mod quickcheck_tests {
v_t.is_orthogonal(1.0e-5)
}
fn svd_static_5_2(m: Matrix5x2<RandComplex<f64>>) -> bool {
fn svd_static_5_2(m: Matrix5x2<$scalar>) -> bool {
let m = m.map(|e| e.0);
let svd = m.svd(true, true);
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
@ -55,7 +56,7 @@ mod quickcheck_tests {
v_t.is_orthogonal(1.0e-5)
}
fn svd_static_3_5(m: Matrix3x5<RandComplex<f64>>) -> bool {
fn svd_static_3_5(m: Matrix3x5<$scalar>) -> bool {
let m = m.map(|e| e.0);
let svd = m.svd(true, true);
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
@ -66,7 +67,7 @@ mod quickcheck_tests {
relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5)
}
fn svd_static_2_5(m: Matrix2x5<RandComplex<f64>>) -> bool {
fn svd_static_2_5(m: Matrix2x5<$scalar>) -> bool {
let m = m.map(|e| e.0);
let svd = m.svd(true, true);
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
@ -76,7 +77,7 @@ mod quickcheck_tests {
relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5)
}
fn svd_static_square(m: Matrix4<RandComplex<f64>>) -> bool {
fn svd_static_square(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0);
let svd = m.svd(true, true);
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
@ -88,24 +89,19 @@ mod quickcheck_tests {
v_t.is_orthogonal(1.0e-5)
}
fn svd_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
fn svd_static_square_2x2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0);
let svd = m.svd(true, true);
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
let ds = Matrix2::from_diagonal(&s.map(|e| Complex::from_real(e)));
println!("u, s, v_t: {}{}{}", u, s, v_t);
println!("m: {}", m);
println!("recomp: {}", u * ds * v_t);
println!("uu_t, vv_t: {}{}", u * u.conjugate_transpose(), v_t.conjugate_transpose() * v_t);
s.iter().all(|e| *e >= 0.0) &&
relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) &&
u.is_orthogonal(1.0e-5) &&
v_t.is_orthogonal(1.0e-5)
}
fn svd_pseudo_inverse(m: DMatrix<RandComplex<f64>>) -> bool {
fn svd_pseudo_inverse(m: DMatrix<$scalar>) -> bool {
let m = m.map(|e| e.0);
if m.len() > 0 {
@ -113,11 +109,9 @@ mod quickcheck_tests {
let pinv = svd.pseudo_inverse(1.0e-10).unwrap();
if m.nrows() > m.ncols() {
println!("{}", &pinv * &m);
(pinv * m).is_identity(1.0e-5)
}
else {
println!("{}", &m * &pinv);
(m * pinv).is_identity(1.0e-5)
}
}
@ -129,13 +123,13 @@ mod quickcheck_tests {
fn svd_solve(n: usize, nb: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 10));
let nb = cmp::min(nb, 10);
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0);
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0);
let svd = m.clone().svd(true, true);
if svd.rank(1.0e-7) == n {
let b1 = DVector::<RandComplex<f64>>::new_random(n).map(|e| e.0);
let b2 = DMatrix::<RandComplex<f64>>::new_random(n, nb).map(|e| e.0);
let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0);
let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0);
let sol1 = svd.solve(&b1, 1.0e-7).unwrap();
let sol2 = svd.solve(&b2, 1.0e-7).unwrap();
@ -159,6 +153,12 @@ mod quickcheck_tests {
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);
}
// Test proposed on the issue #176 of rulinalg.
@ -194,8 +194,6 @@ fn svd_singular() {
let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap());
let ds = DMatrix::from_diagonal(&s);
println!("{:.5}", &u * &ds * &v_t);
assert!(s.iter().all(|e| *e >= 0.0));
assert!(u.is_orthogonal(1.0e-5));
assert!(v_t.is_orthogonal(1.0e-5));
@ -345,11 +343,7 @@ fn svd_fail() {
0.12293810556077789, 0.6617084679545999, 0.9002240700227326, 0.027153062135304884, 0.3630189466989524, 0.18207502727558866,
0.843196731466686, 0.08951878746549924, 0.7533450877576973, 0.009558876499740077, 0.9429679490873482, 0.9355764454129878);
let svd = m.clone().svd(true, true);
println!("Singular values: {}", svd.singular_values);
println!("u: {:.5}", svd.u.unwrap());
println!("v: {:.5}", svd.v_t.unwrap());
let recomp = svd.recompose().unwrap();
println!("{:.5}{:.5}", m, recomp);
assert_relative_eq!(m, recomp, epsilon = 1.0e-5);
}

View File

@ -1,14 +1,19 @@
#![cfg(feature = "arbitrary")]
macro_rules! gen_tests(
($module: ident, $scalar: ty) => {
mod $module {
use std::cmp;
use na::{DMatrix, Matrix2, Matrix4};
#[allow(unused_imports)]
use core::helper::{RandScalar, RandComplex};
quickcheck! {
fn symm_tridiagonal(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 50));
let m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0).hermitian_part();
let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part();
let tri = m.clone().symmetric_tridiagonalize();
let recomp = tri.recompose();
@ -17,19 +22,16 @@ quickcheck! {
fn symm_tridiagonal_singular(n: usize) -> bool {
let n = cmp::max(1, cmp::min(n, 4));
let mut m = DMatrix::<RandComplex<f64>>::new_random(n, n).map(|e| e.0).hermitian_part();
let mut m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part();
m.row_mut(n / 2).fill(na::zero());
m.column_mut(n / 2).fill(na::zero());
let tri = m.clone().symmetric_tridiagonalize();
println!("Tri: {:?}", tri);
let recomp = tri.recompose();
println!("Recomp: {:?}", recomp);
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
}
fn symm_tridiagonal_static_square(m: Matrix4<RandComplex<f64>>) -> bool {
fn symm_tridiagonal_static_square(m: Matrix4<$scalar>) -> bool {
let m = m.map(|e| e.0).hermitian_part();
let tri = m.symmetric_tridiagonalize();
let recomp = tri.recompose();
@ -37,7 +39,7 @@ quickcheck! {
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
}
fn symm_tridiagonal_static_square_2x2(m: Matrix2<RandComplex<f64>>) -> bool {
fn symm_tridiagonal_static_square_2x2(m: Matrix2<$scalar>) -> bool {
let m = m.map(|e| e.0).hermitian_part();
let tri = m.symmetric_tridiagonalize();
let recomp = tri.recompose();
@ -45,3 +47,10 @@ quickcheck! {
relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)
}
}
}
}
);
gen_tests!(complex, RandComplex<f64>);
gen_tests!(f64, RandScalar<f64>);