From 02767fa8947e7ade9becf48b2c3a99d4bab15791 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Crozet?= Date: Wed, 2 Aug 2017 19:38:28 +0200 Subject: [PATCH] Add nalgebra-lapack as a crate on this workspace. --- nalgebra-lapack/CHANGELOG.md | 22 ++ nalgebra-lapack/Cargo.toml | 40 +++ nalgebra-lapack/LICENSE.txt | 21 ++ nalgebra-lapack/Makefile | 11 + nalgebra-lapack/README.md | 53 ++++ nalgebra-lapack/benches/lib.rs | 8 + nalgebra-lapack/benches/linalg/hessenberg.rs | 21 ++ nalgebra-lapack/benches/linalg/lu.rs | 34 +++ nalgebra-lapack/benches/linalg/mod.rs | 3 + nalgebra-lapack/benches/linalg/qr.rs | 33 +++ nalgebra-lapack/src/cholesky.rs | 130 +++++++++ nalgebra-lapack/src/eigen.rs | 182 +++++++++++++ nalgebra-lapack/src/hessenberg.rs | 155 +++++++++++ nalgebra-lapack/src/lapack_check.rs | 21 ++ nalgebra-lapack/src/lib.rs | 81 ++++++ nalgebra-lapack/src/lu.rs | 244 +++++++++++++++++ nalgebra-lapack/src/qr.rs | 171 ++++++++++++ nalgebra-lapack/src/svd.rs | 247 ++++++++++++++++++ nalgebra-lapack/tests/lib.rs | 9 + nalgebra-lapack/tests/linalg/cholesky.rs | 101 +++++++ nalgebra-lapack/tests/linalg/hessenberg.rs | 38 +++ nalgebra-lapack/tests/linalg/lu.rs | 107 ++++++++ nalgebra-lapack/tests/linalg/mod.rs | 5 + nalgebra-lapack/tests/linalg/qr.rs | 20 ++ .../tests/linalg/real_eigensystem.rs | 48 ++++ nalgebra-lapack/tests/linalg/svd.rs | 57 ++++ 26 files changed, 1862 insertions(+) create mode 100644 nalgebra-lapack/CHANGELOG.md create mode 100644 nalgebra-lapack/Cargo.toml create mode 100644 nalgebra-lapack/LICENSE.txt create mode 100644 nalgebra-lapack/Makefile create mode 100644 nalgebra-lapack/README.md create mode 100644 nalgebra-lapack/benches/lib.rs create mode 100644 nalgebra-lapack/benches/linalg/hessenberg.rs create mode 100644 nalgebra-lapack/benches/linalg/lu.rs create mode 100644 nalgebra-lapack/benches/linalg/mod.rs create mode 100644 nalgebra-lapack/benches/linalg/qr.rs create mode 100644 nalgebra-lapack/src/cholesky.rs create mode 100644 nalgebra-lapack/src/eigen.rs create mode 100644 nalgebra-lapack/src/hessenberg.rs create mode 100644 nalgebra-lapack/src/lapack_check.rs create mode 100644 nalgebra-lapack/src/lib.rs create mode 100644 nalgebra-lapack/src/lu.rs create mode 100644 nalgebra-lapack/src/qr.rs create mode 100644 nalgebra-lapack/src/svd.rs create mode 100644 nalgebra-lapack/tests/lib.rs create mode 100644 nalgebra-lapack/tests/linalg/cholesky.rs create mode 100644 nalgebra-lapack/tests/linalg/hessenberg.rs create mode 100644 nalgebra-lapack/tests/linalg/lu.rs create mode 100644 nalgebra-lapack/tests/linalg/mod.rs create mode 100644 nalgebra-lapack/tests/linalg/qr.rs create mode 100644 nalgebra-lapack/tests/linalg/real_eigensystem.rs create mode 100644 nalgebra-lapack/tests/linalg/svd.rs diff --git a/nalgebra-lapack/CHANGELOG.md b/nalgebra-lapack/CHANGELOG.md new file mode 100644 index 00000000..c003fcf7 --- /dev/null +++ b/nalgebra-lapack/CHANGELOG.md @@ -0,0 +1,22 @@ +# Change Log + +## [0.4.0] - 2016-09-07 + +* Made all traits use associated types for their output type parameters. This + simplifies usage of the traits and is consistent with the concept of + associated types used as output type parameters (not input type parameters) as + described in [the associated type + RFC](https://github.com/rust-lang/rfcs/blob/master/text/0195-associated-items.md). +* Implemented `check_info!` macro to check all LAPACK calls. +* Implemented error handling with [error_chain](https://crates.io/crates/error-chain). + +## [0.3.0] - 2016-09-06 + +* Documentation is hosted at https://docs.rs/nalgebra-lapack/ +* Updated `nalgebra` to 0.10. +* Rename traits `HasSVD` to `SVD` and `HasEigensystem` to `Eigensystem`. +* Added `Solve` trait for solving a linear matrix equation. +* Added `Inverse` for computing the multiplicative inverse of a matrix. +* Added `Cholesky` for decomposing a positive-definite matrix. +* The `Eigensystem` and `SVD` traits are now generic over types. The + associated types have been removed. diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml new file mode 100644 index 00000000..e60a7010 --- /dev/null +++ b/nalgebra-lapack/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "nalgebra-lapack" +version = "0.11.2" +authors = [ "Sébastien Crozet " ] + +description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices." +documentation = "http://nalgebra.org/doc/nalgebra/index.html" +homepage = "http://nalgebra.org" +repository = "https://github.com/sebcrozet/nalgebra" +readme = "README.md" +keywords = [ "linear", "algebra", "matrix", "vector" ] +license = "BSD-3-Clause" + +[features] +serde-serialize = [ "serde", "serde_derive" ] + +# For BLAS/LAPACK +default = ["openblas"] +openblas = ["lapack/openblas"] +netlib = ["lapack/netlib"] +accelerate = ["lapack/accelerate"] + +[dependencies] +nalgebra = { version = "0.12", path = ".." } +num-traits = "0.1" +num-complex = "0.1" +alga = "0.5" +serde = { version = "0.9", optional = true } +serde_derive = { version = "0.9", optional = true } +# clippy = "*" + +[dependencies.lapack] +version = "0.11" +default-features = false + +[dev-dependencies] +nalgebra = { version = "0.12", path = "..", features = [ "arbitrary" ] } +quickcheck = "0.4" +approx = "0.1" +rand = "0.3" diff --git a/nalgebra-lapack/LICENSE.txt b/nalgebra-lapack/LICENSE.txt new file mode 100644 index 00000000..ec8f0063 --- /dev/null +++ b/nalgebra-lapack/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew D. Straw + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/nalgebra-lapack/Makefile b/nalgebra-lapack/Makefile new file mode 100644 index 00000000..7ff57c89 --- /dev/null +++ b/nalgebra-lapack/Makefile @@ -0,0 +1,11 @@ +all: + cargo build + +test: + cargo test + +doc: + cargo doc --all --no-deps + +bench: + cargo bench diff --git a/nalgebra-lapack/README.md b/nalgebra-lapack/README.md new file mode 100644 index 00000000..0ae829df --- /dev/null +++ b/nalgebra-lapack/README.md @@ -0,0 +1,53 @@ +# nalgebra-lapack [![Version][version-img]][version-url] [![Status][status-img]][status-url] [![Doc][doc-img]][doc-url] + +Rust library for linear algebra using nalgebra and LAPACK + +## Documentation + +Documentation is available [here](https://docs.rs/nalgebra-lapack/). + +## License + +MIT + +## Cargo features to select lapack provider + +Like the [lapack crate](https://crates.io/crates/lapack) from which this +behavior is inherited, nalgebra-lapack uses [cargo +features](http://doc.crates.io/manifest.html#the-[features]-section) to select +which lapack provider (or implementation) is used. Command line arguments to +cargo are the easiest way to do this, and the best provider depends on your +particular system. In some cases, the providers can be further tuned with +environment variables. + +Below are given examples of how to invoke `cargo build` on two different systems +using two different providers. The `--no-default-features --features "provider"` +arguments will be consistent for other `cargo` commands. + +### Ubuntu + +As tested on Ubuntu 12.04, do this to build the lapack package against +the system installation of netlib without LAPACKE (note the E) or +CBLAS: + + sudo apt-get install gfortran libblas3gf liblapack3gf + export CARGO_FEATURE_SYSTEM_NETLIB=1 + export CARGO_FEATURE_EXCLUDE_LAPACKE=1 + export CARGO_FEATURE_EXCLUDE_CBLAS=1 + + export CARGO_FEATURES='--no-default-features --features netlib' + cargo build ${CARGO_FEATURES} + +### Mac OS X + +On Mac OS X, do this to use Apple's Accelerate framework: + + export CARGO_FEATURES='--no-default-features --features accelerate' + cargo build ${CARGO_FEATURES} + +[version-img]: https://img.shields.io/crates/v/nalgebra-lapack.svg +[version-url]: https://crates.io/crates/nalgebra-lapack +[status-img]: https://travis-ci.org/strawlab/nalgebra-lapack.svg?branch=master +[status-url]: https://travis-ci.org/strawlab/nalgebra-lapack +[doc-img]: https://docs.rs/nalgebra-lapack/badge.svg +[doc-url]: https://docs.rs/nalgebra-lapack/ diff --git a/nalgebra-lapack/benches/lib.rs b/nalgebra-lapack/benches/lib.rs new file mode 100644 index 00000000..de0c170a --- /dev/null +++ b/nalgebra-lapack/benches/lib.rs @@ -0,0 +1,8 @@ +#![feature(test)] + +extern crate test; +extern crate rand; +extern crate nalgebra as na; +extern crate nalgebra_lapack as nl; + +mod linalg; diff --git a/nalgebra-lapack/benches/linalg/hessenberg.rs b/nalgebra-lapack/benches/linalg/hessenberg.rs new file mode 100644 index 00000000..90c97b8b --- /dev/null +++ b/nalgebra-lapack/benches/linalg/hessenberg.rs @@ -0,0 +1,21 @@ +use test::{self, Bencher}; +use na::{DMatrix, Matrix4}; +use nl::Hessenberg; + +#[bench] +fn hessenberg_decompose_100x100(bh: &mut Bencher) { + let m = DMatrix::::new_random(100, 100); + bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) +} + +#[bench] +fn hessenberg_decompose_4x4(bh: &mut Bencher) { + let m = Matrix4::::new_random(); + bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) +} + +#[bench] +fn hessenberg_decompose_500x500(bh: &mut Bencher) { + let m = DMatrix::::new_random(500, 500); + bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) +} diff --git a/nalgebra-lapack/benches/linalg/lu.rs b/nalgebra-lapack/benches/linalg/lu.rs new file mode 100644 index 00000000..572c26b4 --- /dev/null +++ b/nalgebra-lapack/benches/linalg/lu.rs @@ -0,0 +1,34 @@ +use test::{self, Bencher}; +use na::{DMatrix, Matrix4}; +use nl::LU; + + +#[bench] +fn lu_decompose_100x100(bh: &mut Bencher) { + let m = DMatrix::::new_random(100, 100); + bh.iter(|| test::black_box(LU::new(m.clone()))) +} + +#[bench] +fn lu_decompose_100x500(bh: &mut Bencher) { + let m = DMatrix::::new_random(100, 500); + bh.iter(|| test::black_box(LU::new(m.clone()))) +} + +#[bench] +fn lu_decompose_4x4(bh: &mut Bencher) { + let m = Matrix4::::new_random(); + bh.iter(|| test::black_box(LU::new(m.clone()))) +} + +#[bench] +fn lu_decompose_500x100(bh: &mut Bencher) { + let m = DMatrix::::new_random(500, 100); + bh.iter(|| test::black_box(LU::new(m.clone()))) +} + +#[bench] +fn lu_decompose_500x500(bh: &mut Bencher) { + let m = DMatrix::::new_random(500, 500); + bh.iter(|| test::black_box(LU::new(m.clone()))) +} diff --git a/nalgebra-lapack/benches/linalg/mod.rs b/nalgebra-lapack/benches/linalg/mod.rs new file mode 100644 index 00000000..f42ec321 --- /dev/null +++ b/nalgebra-lapack/benches/linalg/mod.rs @@ -0,0 +1,3 @@ +mod qr; +mod lu; +mod hessenberg; diff --git a/nalgebra-lapack/benches/linalg/qr.rs b/nalgebra-lapack/benches/linalg/qr.rs new file mode 100644 index 00000000..07b830d9 --- /dev/null +++ b/nalgebra-lapack/benches/linalg/qr.rs @@ -0,0 +1,33 @@ +use test::{self, Bencher}; +use na::{DMatrix, Matrix4}; +use nl::QR; + +#[bench] +fn qr_decompose_100x100(bh: &mut Bencher) { + let m = DMatrix::::new_random(100, 100); + bh.iter(|| test::black_box(QR::new(m.clone()))) +} + +#[bench] +fn qr_decompose_100x500(bh: &mut Bencher) { + let m = DMatrix::::new_random(100, 500); + bh.iter(|| test::black_box(QR::new(m.clone()))) +} + +#[bench] +fn qr_decompose_4x4(bh: &mut Bencher) { + let m = Matrix4::::new_random(); + bh.iter(|| test::black_box(QR::new(m.clone()))) +} + +#[bench] +fn qr_decompose_500x100(bh: &mut Bencher) { + let m = DMatrix::::new_random(500, 100); + bh.iter(|| test::black_box(QR::new(m.clone()))) +} + +#[bench] +fn qr_decompose_500x500(bh: &mut Bencher) { + let m = DMatrix::::new_random(500, 500); + bh.iter(|| test::black_box(QR::new(m.clone()))) +} diff --git a/nalgebra-lapack/src/cholesky.rs b/nalgebra-lapack/src/cholesky.rs new file mode 100644 index 00000000..4b3c8a39 --- /dev/null +++ b/nalgebra-lapack/src/cholesky.rs @@ -0,0 +1,130 @@ +use num::Zero; +use num_complex::Complex; + +use na::{Scalar, DefaultAllocator, MatrixN, MatrixMN}; +use na::dimension::Dim; +use na::allocator::Allocator; + +use lapack::fortran as interface; + +/// The cholesky decomposion of a symmetric-definite-positive matrix. +pub struct Cholesky + where DefaultAllocator: Allocator { + l: MatrixN +} + +impl Cholesky + where DefaultAllocator: Allocator { + + /// Complutes the cholesky decomposition of the given symmetric-definite-positive square + /// matrix. + /// + /// Only the lower-triangular part of the input matrix is considered. + #[inline] + pub fn new(mut m: MatrixN) -> Option { + // FIXME: check symmetry as well? + assert!(m.is_square(), "Unable to compute the cholesky decomposition of a non-square matrix."); + + let uplo = b'L'; + let dim = m.nrows() as i32; + let mut info = 0; + + N::xpotrf(uplo, dim, m.as_mut_slice(), dim, &mut info); + lapack_check!(info); + + Some(Cholesky { l: m }) + } + + pub fn unpack(mut self) -> MatrixN { + self.l.fill_upper_triangle(Zero::zero(), 1); + self.l + } + + pub fn l(&self) -> MatrixN { + let mut res = self.l.clone(); + res.fill_upper_triangle(Zero::zero(), 1); + res + } + + /// Solves the symmetric-definite-positive linear system `self * x = b`, where `x` is the + /// unknown to be determined. + pub fn solve(&self, mut b: MatrixMN) + -> Option> + where DefaultAllocator: Allocator { + + let dim = self.l.nrows(); + + assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`."); + + let nrhs = b.ncols() as i32; + let lda = dim as i32; + let ldb = dim as i32; + let mut info = 0; + + N::xpotrs(b'L', dim as i32, nrhs, self.l.as_slice(), lda, b.as_mut_slice(), ldb, &mut info); + lapack_check!(info); + + Some(b) + } + + /// Computes the inverse of the decomposed matrix. + pub fn inverse(mut self) -> Option> { + let dim = self.l.nrows(); + let mut info = 0; + + N::xpotri(b'L', dim as i32, self.l.as_mut_slice(), dim as i32, &mut info); + lapack_check!(info); + + // Copy lower triangle to upper triangle. + for i in 0 .. dim { + for j in i + 1 .. dim { + unsafe { *self.l.get_unchecked_mut(i, j) = *self.l.get_unchecked(j, i) }; + } + } + + Some(self.l) + } +} + + + + +/* + * + * Lapack functions dispatch. + * + */ +/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) +/// supported by the cholesky decompotition. +pub trait CholeskyScalar: Scalar { + fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); + fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, b: &mut [Self], ldb: i32, info: &mut i32); + fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32); +} + +macro_rules! cholesky_scalar_impl( + ($N: ty, $xpotrf: path, $xpotrs: path, $xpotri: path) => ( + impl CholeskyScalar for $N { + #[inline] + fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) { + $xpotrf(uplo, n, a, lda, info) + } + + #[inline] + fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, + b: &mut [Self], ldb: i32, info: &mut i32) { + $xpotrs(uplo, n, nrhs, a, lda, b, ldb, info) + } + + #[inline] + fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) { + $xpotri(uplo, n, a, lda, info) + } + } + ) +); + +cholesky_scalar_impl!(f32, interface::spotrf, interface::spotrs, interface::spotri); +cholesky_scalar_impl!(f64, interface::dpotrf, interface::dpotrs, interface::dpotri); +cholesky_scalar_impl!(Complex, interface::cpotrf, interface::cpotrs, interface::cpotri); +cholesky_scalar_impl!(Complex, interface::zpotrf, interface::zpotrs, interface::zpotri); diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs new file mode 100644 index 00000000..648fd88a --- /dev/null +++ b/nalgebra-lapack/src/eigen.rs @@ -0,0 +1,182 @@ +use num::Zero; + +use alga::general::Real; + +use ::ComplexHelper; +use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN}; +use na::dimension::{Dim, U1}; +use na::storage::Storage; +use na::allocator::Allocator; + +use lapack::fortran as interface; + +/// Eigendecomposition of a real square matrix with real eigenvalues. +pub struct RealEigensystem + where DefaultAllocator: Allocator + + Allocator { + pub eigenvalues: VectorN, + pub eigenvectors: Option>, + pub left_eigenvectors: Option> +} + + +impl RealEigensystem + where DefaultAllocator: Allocator + + Allocator { + /// Computes the eigenvalues and eigenvectors of the square matrix `m`. + /// + /// If `eigenvectors` is `false` then, the eigenvectors are not computed explicitly. + pub fn new(mut m: MatrixN, left_eigenvectors: bool, eigenvectors: bool) + -> Option> { + + assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix."); + + let ljob = if left_eigenvectors { b'V' } else { b'N' }; + let rjob = if eigenvectors { b'V' } else { b'N' }; + + let (nrows, ncols) = m.data.shape(); + let n = nrows.value(); + + let lda = n as i32; + + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + // FIXME: Tap into the workspace. + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + + + let mut info = 0; + let mut placeholder1 = [ N::zero() ]; + let mut placeholder2 = [ N::zero() ]; + + let lwork = N::xgeev_work_size(ljob, rjob, n as i32, m.as_mut_slice(), lda, + wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1, + n as i32, &mut placeholder2, n as i32, &mut info); + + lapack_check!(info); + + let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; + + match (left_eigenvectors, eigenvectors) { + (true, true) => { + let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + + N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), + wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut vr.as_mut_slice(), + n as i32, &mut work, lwork, &mut info); + lapack_check!(info); + + if wi.iter().all(|e| e.is_zero()) { + return Some(RealEigensystem { + eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: Some(vr) + }) + } + }, + (true, false) => { + let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + + N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), + wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut placeholder2, + 1 as i32, &mut work, lwork, &mut info); + lapack_check!(info); + + if wi.iter().all(|e| e.is_zero()) { + return Some(RealEigensystem { + eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: None + }); + } + }, + (false, true) => { + let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + + N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), + wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut vr.as_mut_slice(), + n as i32, &mut work, lwork, &mut info); + lapack_check!(info); + + if wi.iter().all(|e| e.is_zero()) { + return Some(RealEigensystem { + eigenvalues: wr, left_eigenvectors: None, eigenvectors: Some(vr) + }); + } + }, + (false, false) => { + N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(), + wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2, + 1 as i32, &mut work, lwork, &mut info); + lapack_check!(info); + + if wi.iter().all(|e| e.is_zero()) { + return Some(RealEigensystem { + eigenvalues: wr, left_eigenvectors: None, eigenvectors: None + }); + } + } + } + + None + } + + /// The determinant of the decomposed matrix. + #[inline] + pub fn determinant(&self) -> N { + let mut det = N::one(); + for e in self.eigenvalues.iter() { + det *= *e; + } + + det + } +} + + + + + +/* + * + * Lapack functions dispatch. + * + */ +pub trait RealEigensystemScalar: Scalar { + fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, + wr: &mut [Self], wi: &mut [Self], + vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32, + work: &mut [Self], lwork: i32, info: &mut i32); + fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, + wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32, + vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32; +} + +macro_rules! real_eigensystem_scalar_impl ( + ($N: ty, $xgeev: path) => ( + impl RealEigensystemScalar for $N { + #[inline] + fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, + wr: &mut [Self], wi: &mut [Self], + vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32, + work: &mut [Self], lwork: i32, info: &mut i32) { + $xgeev(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info) + } + + + #[inline] + fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32, + wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32, + vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32 { + let mut work = [ Zero::zero() ]; + let lwork = -1 as i32; + + $xgeev(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, &mut work, lwork, info); + ComplexHelper::real_part(work[0]) as i32 + } + } + ) +); + +real_eigensystem_scalar_impl!(f32, interface::sgeev); +real_eigensystem_scalar_impl!(f64, interface::dgeev); + +//// FIXME: decomposition of complex matrix and matrices with complex eigenvalues. +// eigensystem_complex_impl!(f32, interface::cgeev); +// eigensystem_complex_impl!(f64, interface::zgeev); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs new file mode 100644 index 00000000..68a9c433 --- /dev/null +++ b/nalgebra-lapack/src/hessenberg.rs @@ -0,0 +1,155 @@ +use num::Zero; +use num_complex::Complex; + +use ::ComplexHelper; +use na::{Scalar, Matrix, DefaultAllocator, VectorN, MatrixN}; +use na::dimension::{DimSub, DimDiff, U1}; +use na::storage::Storage; +use na::allocator::Allocator; + +use lapack::fortran as interface; + + +/// The Hessenberg decomposition of a general matrix. +pub struct Hessenberg> + where DefaultAllocator: Allocator + + Allocator> { + h: MatrixN, + tau: VectorN> +} + + +impl> Hessenberg + where DefaultAllocator: Allocator + + Allocator> { + /// Computes the hessenberg decomposition of the matrix `m`. + pub fn new(mut m: MatrixN) -> Hessenberg { + let nrows = m.data.shape().0; + let n = nrows.value() as i32; + + assert!(m.is_square(), "Unable to compute the hessenberg decomposition of a non-square matrix."); + assert!(!m.is_empty(), "Unable to compute the hessenberg decomposition of an empty matrix."); + + let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) }; + + let mut info = 0; + let lwork = N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info); + let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; + + lapack_panic!(info); + + N::xgehrd(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut work, lwork, &mut info); + lapack_panic!(info); + + Hessenberg { h: m, tau: tau } + } + + /// Computes the hessenberg matrix of this decomposition. + #[inline] + pub fn h(&self) -> MatrixN { + let mut h = self.h.clone_owned(); + h.fill_lower_triangle(N::zero(), 2); + + h + } +} + +impl> Hessenberg + where DefaultAllocator: Allocator + + Allocator> { + /// Computes the matrices `(Q, H)` of this decomposition. + #[inline] + pub fn unpack(self) -> (MatrixN, MatrixN) { + (self.q(), self.h()) + } + + /// Computes the unitary matrix `Q` of this decomposition. + #[inline] + pub fn q(&self) -> MatrixN { + let n = self.h.nrows() as i32; + let mut q = self.h.clone_owned(); + let mut info = 0; + + let lwork = N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info); + let mut work = vec![ N::zero(); lwork as usize ]; + + N::xorghr(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut work, lwork, &mut info); + + q + } +} + + + + +/* + * + * Lapack functions dispatch. + * + */ +pub trait HessenbergScalar: Scalar { + fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32); + fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + tau: &mut [Self], info: &mut i32) -> i32; +} + +pub trait HessenbergReal: HessenbergScalar { + fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self], + work: &mut [Self], lwork: i32, info: &mut i32); + fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + tau: &[Self], info: &mut i32) -> i32; +} + +macro_rules! hessenberg_scalar_impl( + ($N: ty, $xgehrd: path) => ( + impl HessenbergScalar for $N { + #[inline] + fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32) { + $xgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info) + } + + #[inline] + fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + tau: &mut [Self], info: &mut i32) -> i32 { + let mut work = [ Zero::zero() ]; + let lwork = -1 as i32; + + $xgehrd(n, ilo, ihi, a, lda, tau, &mut work, lwork, info); + ComplexHelper::real_part(work[0]) as i32 + } + } + ) +); + +macro_rules! hessenberg_real_impl( + ($N: ty, $xorghr: path) => ( + impl HessenbergReal for $N { + #[inline] + fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self], + work: &mut [Self], lwork: i32, info: &mut i32) { + $xorghr(n, ilo, ihi, a, lda, tau, work, lwork, info) + } + + #[inline] + fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, + tau: &[Self], info: &mut i32) -> i32 { + let mut work = [ Zero::zero() ]; + let lwork = -1 as i32; + + $xorghr(n, ilo, ihi, a, lda, tau, &mut work, lwork, info); + ComplexHelper::real_part(work[0]) as i32 + } + } + ) +); + +hessenberg_scalar_impl!(f32, interface::sgehrd); +hessenberg_scalar_impl!(f64, interface::dgehrd); +hessenberg_scalar_impl!(Complex, interface::cgehrd); +hessenberg_scalar_impl!(Complex, interface::zgehrd); + +hessenberg_real_impl!(f32, interface::sorghr); +hessenberg_real_impl!(f64, interface::dorghr); + diff --git a/nalgebra-lapack/src/lapack_check.rs b/nalgebra-lapack/src/lapack_check.rs new file mode 100644 index 00000000..ee777706 --- /dev/null +++ b/nalgebra-lapack/src/lapack_check.rs @@ -0,0 +1,21 @@ +#![macro_use] + +macro_rules! lapack_check( + ($info: expr) => ( + // FIXME: return a richer error. + if $info != 0 { + return None; + } + // if $info < 0 { + // return Err(Error::from(ErrorKind::LapackIllegalArgument(-$info))); + // } else if $info > 0 { + // return Err(Error::from(ErrorKind::LapackFailure($info))); + // } + ); +); + +macro_rules! lapack_panic( + ($info: expr) => ( + assert!($info == 0, "Lapack error."); + ); +); diff --git a/nalgebra-lapack/src/lib.rs b/nalgebra-lapack/src/lib.rs new file mode 100644 index 00000000..9ad8f41e --- /dev/null +++ b/nalgebra-lapack/src/lib.rs @@ -0,0 +1,81 @@ + +#![deny(non_camel_case_types)] +#![deny(unused_parens)] +#![deny(non_upper_case_globals)] +#![deny(unused_qualifications)] +#![deny(unused_results)] +#![warn(missing_docs)] +#![doc(html_root_url = "http://nalgebra.org/rustdoc")] + +extern crate num_traits as num; +extern crate num_complex; +extern crate lapack; +extern crate alga; +extern crate nalgebra as na; + +mod lapack_check; +mod svd; +mod eigen; +mod cholesky; +mod lu; +mod qr; +mod hessenberg; + +use num_complex::Complex; + +pub use self::svd::SVD; +pub use self::cholesky::{Cholesky, CholeskyScalar}; +pub use self::lu::{LU, LUScalar}; +pub use self::eigen::RealEigensystem; +pub use self::qr::QR; +pub use self::hessenberg::Hessenberg; + + +trait ComplexHelper { + type RealPart; + + fn real_part(self) -> Self::RealPart; +} + +impl ComplexHelper for f32 { + type RealPart = f32; + + #[inline] + fn real_part(self) -> Self::RealPart { + self + } +} + +impl ComplexHelper for f64 { + type RealPart = f64; + + #[inline] + fn real_part(self) -> Self::RealPart { + self + } +} + +impl ComplexHelper for Complex { + type RealPart = f32; + + #[inline] + fn real_part(self) -> Self::RealPart { + self.re + } +} + +impl ComplexHelper for Complex { + type RealPart = f64; + + #[inline] + fn real_part(self) -> Self::RealPart { + self.re + } +} + +unsafe fn uninitialized_vec(n: usize) -> Vec { + let mut res = Vec::new(); + res.reserve_exact(n); + res.set_len(n); + res +} diff --git a/nalgebra-lapack/src/lu.rs b/nalgebra-lapack/src/lu.rs new file mode 100644 index 00000000..cc3337b0 --- /dev/null +++ b/nalgebra-lapack/src/lu.rs @@ -0,0 +1,244 @@ +use num::{Zero, One}; +use num_complex::Complex; + +use ::ComplexHelper; +use na::{Scalar, DefaultAllocator, Matrix, MatrixMN, MatrixN, VectorN}; +use na::dimension::{Dim, DimMin, DimMinimum, U1}; +use na::storage::Storage; +use na::allocator::Allocator; + +use lapack::fortran as interface; + +/// LU decomposition with partial pivoting. +/// +/// This decomposes a matrix `M` with m rows and n columns into three parts: +/// * `L` which is a `m × min(m, n)` lower-triangular matrix. +/// * `U` which is a `min(m, n) × n` upper-triangular matrix. +/// * `P` which is a `m * m` permutation matrix. +/// +/// Those are such that `M == P * L * U`. +pub struct LU, C: Dim> + where DefaultAllocator: Allocator> + + Allocator { + lu: MatrixMN, + p: VectorN> +} + +impl LU + where N: Zero + One, + R: DimMin, + DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator, C> + + Allocator> { + + pub fn new(mut m: MatrixMN) -> Self { + let (nrows, ncols) = m.data.shape(); + let min_nrows_ncols = nrows.min(ncols); + let nrows = nrows.value() as i32; + let ncols = ncols.value() as i32; + + let mut ipiv: VectorN = Matrix::zeros_generic(min_nrows_ncols, U1); + + let mut info = 0; + + N::xgetrf(nrows, ncols, m.as_mut_slice(), nrows, ipiv.as_mut_slice(), &mut info); + lapack_panic!(info); + + LU { lu: m, p: ipiv } + } + + /// Gets the lower-triangular matrix part of the decomposition. + #[inline] + pub fn l(&self) -> MatrixMN> { + let (nrows, ncols) = self.lu.data.shape(); + let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned(); + + res.fill_upper_triangle(Zero::zero(), 1); + res.fill_diagonal(One::one()); + + res + } + + /// Gets the upper-triangular matrix part of the decomposition. + #[inline] + pub fn u(&self) -> MatrixMN, C> { + let (nrows, ncols) = self.lu.data.shape(); + let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned(); + + res.fill_lower_triangle(Zero::zero(), 1); + + res + } + + /// Gets the row permutation matrix of this decomposition. + /// + /// Computing the permutation matrix explicitly is costly and usually not necessary. + /// To permute rows of a matrix or vector, use the method `self.permute(...)` instead. + #[inline] + pub fn p(&self) -> MatrixN { + let (dim, _) = self.lu.data.shape(); + let mut id = Matrix::identity_generic(dim, dim); + self.permute(&mut id); + + id + } + + // FIXME: when we support resizing a matrix, we could add unwrap_u/unwrap_l that would + // re-use the memory from the internal matrix! + + /// Gets the LAPACK permutation indices. + #[inline] + pub fn permutation_indices(&self) -> &VectorN> { + &self.p + } + + /// Applies the permutation matrix to a given matrix or vector in-place. + #[inline] + pub fn permute(&self, rhs: &mut MatrixMN) + where DefaultAllocator: Allocator { + + let (nrows, ncols) = rhs.shape(); + + N::xlaswp(ncols as i32, rhs.as_mut_slice(), nrows as i32, + 1, self.p.len() as i32, self.p.as_slice(), -1); + } + + fn generic_solve(&self, trans: u8, mut b: MatrixMN) + -> Option> + where DefaultAllocator: Allocator + + Allocator { + + let dim = self.lu.nrows(); + + assert!(self.lu.is_square(), "Unable to solve a set of under/over-determined equations."); + assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`."); + + let nrhs = b.ncols() as i32; + let lda = dim as i32; + let ldb = dim as i32; + let mut info = 0; + + N::xgetrs(trans, dim as i32, nrhs, self.lu.as_slice(), lda, self.p.as_slice(), + b.as_mut_slice(), ldb, &mut info); + lapack_check!(info); + + Some(b) + } + + /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. + pub fn solve(&self, b: MatrixMN) + -> Option> + where DefaultAllocator: Allocator + + Allocator { + + self.generic_solve(b'N', b) + } + + /// Solves the linear system `self.transpose() * x = b`, where `x` is the unknown to be + /// determined. + pub fn solve_transpose(&self, b: MatrixMN) + -> Option> + where DefaultAllocator: Allocator + + Allocator { + + self.generic_solve(b'T', b) + } + + /// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to + /// be determined. + pub fn solve_conjugate_transpose(&self, b: MatrixMN) + -> Option> + where DefaultAllocator: Allocator + + Allocator { + + self.generic_solve(b'T', b) + } +} + +impl LU + where N: Zero + One, + D: DimMin, + DefaultAllocator: Allocator + + Allocator { + /// Computes the inverse of the decomposed matrix. + pub fn inverse(mut self) -> Option> { + let dim = self.lu.nrows() as i32; + let mut info = 0; + let lwork = N::xgetri_work_size(dim, self.lu.as_mut_slice(), + dim, self.p.as_mut_slice(), + &mut info); + lapack_check!(info); + + let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; + + N::xgetri(dim, self.lu.as_mut_slice(), dim, self.p.as_mut_slice(), + &mut work, lwork, &mut info); + lapack_check!(info); + + Some(self.lu) + } +} + + + + +/* + * + * Lapack functions dispatch. + * + */ +pub trait LUScalar: Scalar { + fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32); + fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32); + fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32], + b: &mut [Self], ldb: i32, info: &mut i32); + fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], + work: &mut [Self], lwork: i32, info: &mut i32); + fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32; +} + + +macro_rules! lup_scalar_impl( + ($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => ( + impl LUScalar for $N { + #[inline] + fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32) { + $xgetrf(m, n, a, lda, ipiv, info) + } + + #[inline] + fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32) { + $xlaswp(n, a, lda, k1, k2, ipiv, incx) + } + + #[inline] + fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32], + b: &mut [Self], ldb: i32, info: &mut i32) { + $xgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info) + } + + #[inline] + fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], + work: &mut [Self], lwork: i32, info: &mut i32) { + $xgetri(n, a, lda, ipiv, work, lwork, info) + } + + #[inline] + fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32 { + let mut work = [ Zero::zero() ]; + let lwork = -1 as i32; + + $xgetri(n, a, lda, ipiv, &mut work, lwork, info); + ComplexHelper::real_part(work[0]) as i32 + } + } + ) +); + + +lup_scalar_impl!(f32, interface::sgetrf, interface::slaswp, interface::sgetrs, interface::sgetri); +lup_scalar_impl!(f64, interface::dgetrf, interface::dlaswp, interface::dgetrs, interface::dgetri); +lup_scalar_impl!(Complex, interface::cgetrf, interface::claswp, interface::cgetrs, interface::cgetri); +lup_scalar_impl!(Complex, interface::zgetrf, interface::zlaswp, interface::zgetrs, interface::zgetri); diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs new file mode 100644 index 00000000..5a21babf --- /dev/null +++ b/nalgebra-lapack/src/qr.rs @@ -0,0 +1,171 @@ +use num_complex::Complex; +use num::Zero; + +use ::ComplexHelper; +use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixMN}; +use na::dimension::{Dim, DimMin, DimMinimum, U1}; +use na::storage::Storage; +use na::allocator::Allocator; + +use lapack::fortran as interface; + + +/// The QR decomposition of a general matrix. +pub struct QR, C: Dim> + where DefaultAllocator: Allocator + + Allocator> { + qr: MatrixMN, + tau: VectorN> +} + +impl, C: Dim> QR + where DefaultAllocator: Allocator + + Allocator> + + Allocator, C> + + Allocator> { + /// Computes the QR decomposition of the matrix `m`. + pub fn new(mut m: MatrixMN) -> QR { + let (nrows, ncols) = m.data.shape(); + + let mut info = 0; + let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; + + if nrows.value() == 0 || ncols.value() == 0 { + return QR { qr: m, tau: tau }; + } + + let lwork = N::xgeqrf_work_size(nrows.value() as i32, ncols.value() as i32, + m.as_mut_slice(), nrows.value() as i32, + tau.as_mut_slice(), &mut info); + + let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; + + N::xgeqrf(nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), + nrows.value() as i32, tau.as_mut_slice(), &mut work, lwork, &mut info); + + QR { qr: m, tau: tau } + } + + /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. + #[inline] + pub fn r(&self) -> MatrixMN, C> { + let (nrows, ncols) = self.qr.data.shape(); + self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle() + } +} + +impl, C: Dim> QR + where DefaultAllocator: Allocator + + Allocator> + + Allocator, C> + + Allocator> { + /// Retrieves the matrices `(Q, R)` of this decompositions. + pub fn unpack(self) -> (MatrixMN>, MatrixMN, C>) { + (self.q(), self.r()) + } + + + /// Computes the orthogonal matrix `Q` of this decomposition. + #[inline] + pub fn q(&self) -> MatrixMN> { + let (nrows, ncols) = self.qr.data.shape(); + let min_nrows_ncols = nrows.min(ncols); + + if min_nrows_ncols.value() == 0 { + return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero()); + } + + let mut q = self.qr.generic_slice((0, 0), (nrows, min_nrows_ncols)).into_owned(); + + let mut info = 0; + let nrows = nrows.value() as i32; + + let lwork = N::xorgqr_work_size(nrows, min_nrows_ncols.value() as i32, + self.tau.len() as i32, q.as_mut_slice(), nrows, + self.tau.as_slice(), &mut info); + + let mut work = vec![ N::zero(); lwork as usize ]; + + N::xorgqr(nrows, min_nrows_ncols.value() as i32, self.tau.len() as i32, q.as_mut_slice(), + nrows, self.tau.as_slice(), &mut work, lwork, &mut info); + + q + } +} + + + + + +/* + * + * Lapack functions dispatch. + * + */ +pub trait QRScalar: Scalar { + fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self], + work: &mut [Self], lwork: i32, info: &mut i32); + + fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32, + tau: &mut [Self], info: &mut i32) -> i32; +} + +pub trait QRReal: QRScalar { + fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], work: &mut [Self], + lwork: i32, info: &mut i32); + + fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, + tau: &[Self], info: &mut i32) -> i32; +} + +macro_rules! qr_scalar_impl( + ($N: ty, $xgeqrf: path) => ( + impl QRScalar for $N { + #[inline] + fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self], + work: &mut [Self], lwork: i32, info: &mut i32) { + $xgeqrf(m, n, a, lda, tau, work, lwork, info) + } + + #[inline] + fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self], + info: &mut i32) -> i32 { + let mut work = [ Zero::zero() ]; + let lwork = -1 as i32; + + $xgeqrf(m, n, a, lda, tau, &mut work, lwork, info); + ComplexHelper::real_part(work[0]) as i32 + } + } + ) +); + +macro_rules! qr_real_impl( + ($N: ty, $xorgqr: path) => ( + impl QRReal for $N { + #[inline] + fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], + work: &mut [Self], lwork: i32, info: &mut i32) { + $xorgqr(m, n, k, a, lda, tau, work, lwork, info) + } + + #[inline] + fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], + info: &mut i32) -> i32 { + let mut work = [ Zero::zero() ]; + let lwork = -1 as i32; + + $xorgqr(m, n, k, a, lda, tau, &mut work, lwork, info); + ComplexHelper::real_part(work[0]) as i32 + } + } + ) +); + +qr_scalar_impl!(f32, interface::sgeqrf); +qr_scalar_impl!(f64, interface::dgeqrf); +qr_scalar_impl!(Complex, interface::cgeqrf); +qr_scalar_impl!(Complex, interface::zgeqrf); + +qr_real_impl!(f32, interface::sorgqr); +qr_real_impl!(f64, interface::dorgqr); diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs new file mode 100644 index 00000000..932cb3ad --- /dev/null +++ b/nalgebra-lapack/src/svd.rs @@ -0,0 +1,247 @@ +use std::cmp; +use num::Signed; + +use na::{Scalar, Matrix, VectorN, MatrixN, MatrixMN, + DefaultAllocator}; +use na::dimension::{Dim, DimMin, DimMinimum, U1}; +use na::storage::Storage; +use na::allocator::Allocator; + +use lapack::fortran as interface; + + +/// The SVD decomposition of a general matrix. +pub struct SVD, C: Dim> + where DefaultAllocator: Allocator + + Allocator> + + Allocator { + pub u: MatrixN, + pub s: VectorN>, + pub vt: MatrixN +} + + +/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex`, `Complex`) +/// supported by the Singular Value Decompotition. +pub trait SVDScalar, C: Dim>: Scalar + where DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator { + /// Computes the SVD decomposition of `m`. + fn compute(m: MatrixMN) -> Option>; +} + +impl, R: DimMin, C: Dim> SVD + where DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator { + pub fn new(m: MatrixMN) -> Option { + N::compute(m) + } +} + +macro_rules! svd_impl( + ($t: ty, $lapack_func: path) => ( + impl SVDScalar for $t + where R: DimMin, + DefaultAllocator: Allocator<$t, R, C> + + Allocator<$t, R, R> + + Allocator<$t, C, C> + + Allocator<$t, DimMinimum> { + + fn compute(mut m: MatrixMN<$t, R, C>) -> Option> { + let (nrows, ncols) = m.data.shape(); + + if nrows.value() == 0 || ncols.value() == 0 { + return None; + } + + let job = b'A'; + + let lda = nrows.value() as i32; + + let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; + let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + + let ldu = nrows.value(); + let ldvt = ncols.value(); + + let mut work = [ 0.0 ]; + let mut lwork = -1 as i32; + let mut info = 0; + let mut iwork = unsafe { ::uninitialized_vec(8 * cmp::min(nrows.value(), ncols.value())) }; + + $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), + lda, &mut s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(), + ldvt as i32, &mut work, lwork, &mut iwork, &mut info); + lapack_check!(info); + + lwork = work[0] as i32; + let mut work = unsafe { ::uninitialized_vec(lwork as usize) }; + + $lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), + lda, &mut s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(), + ldvt as i32, &mut work, lwork, &mut iwork, &mut info); + lapack_check!(info); + + Some(SVD { u: u, s: s, vt: vt }) + } + } + + impl, C: Dim> SVD<$t, R, C> + // FIXME: All those bounds… + where DefaultAllocator: Allocator<$t, R, C> + + Allocator<$t, C, R> + + Allocator<$t, U1, R> + + Allocator<$t, U1, C> + + Allocator<$t, R, R> + + Allocator<$t, DimMinimum> + + Allocator<$t, DimMinimum, R> + + Allocator<$t, DimMinimum, C> + + Allocator<$t, R, DimMinimum> + + Allocator<$t, C, C> { + /// Reconstructs the matrix from its decomposition. + /// + /// Useful if some components (e.g. some singular values) of this decomposition have + /// been manually changed by the user. + #[inline] + pub fn matrix(&self) -> MatrixMN<$t, R, C> { + let nrows = self.u.data.shape().0; + let ncols = self.vt.data.shape().1; + let min_nrows_ncols = nrows.min(ncols); + + let mut res: MatrixMN<_, R, C> = Matrix::zeros_generic(nrows, ncols); + + { + let mut sres = res.generic_slice_mut((0, 0), (min_nrows_ncols, ncols)); + sres.copy_from(&self.vt.rows_generic(0, min_nrows_ncols)); + + for i in 0 .. min_nrows_ncols.value() { + let eigval = self.s[i]; + let mut row = sres.row_mut(i); + row *= eigval; + } + } + + &self.u * res + } + + /// Computes the pseudo-inverse of the decomposed matrix. + /// + /// All singular value bellow epsilon will be set to zero instead of being inverted. + #[inline] + pub fn pseudo_inverse(&self, epsilon: $t) -> MatrixMN<$t, C, R> { + let nrows = self.u.data.shape().0; + let ncols = self.vt.data.shape().1; + let min_nrows_ncols = nrows.min(ncols); + + let mut res: MatrixMN<_, C, R> = Matrix::zeros_generic(ncols, nrows); + + { + let mut sres = res.generic_slice_mut((0, 0), (min_nrows_ncols, nrows)); + self.u.columns_generic(0, min_nrows_ncols).transpose_to(&mut sres); + + for i in 0 .. min_nrows_ncols.value() { + let eigval = self.s[i]; + let mut row = sres.row_mut(i); + + if eigval.abs() > epsilon { + row /= eigval + } + else { + row.fill(0.0); + } + } + } + + self.vt.tr_mul(&res) + } + + /// The rank of the decomposed matrix. + /// + /// This is the number of singular values that are not too small (i.e. greater than + /// the given `epsilon`). + #[inline] + pub fn rank(&self, epsilon: $t) -> usize { + let mut i = 0; + + for e in self.s.as_slice().iter() { + if e.abs() > epsilon { + i += 1; + } + } + + i + } + + // FIXME: add methods to retrieve the null-space and column-space? (Respectively + // corresponding to the zero and non-zero singular values). + } + ); +); + +/* +macro_rules! svd_complex_impl( + ($name: ident, $t: ty, $lapack_func: path) => ( + impl SVDScalar for Complex<$t> { + fn compute(mut m: Matrix<$t, R, C, S>) -> Option> + Option<(MatrixN, R, S::Alloc>, + VectorN<$t, DimMinimum, S::Alloc>, + MatrixN, C, S::Alloc>)> + where R: DimMin, + S: ContiguousStorage, R, C>, + S::Alloc: OwnedAllocator, R, C, S> + + Allocator, R, R> + + Allocator, C, C> + + Allocator<$t, DimMinimum> { + let (nrows, ncols) = m.data.shape(); + + if nrows.value() == 0 || ncols.value() == 0 { + return None; + } + + let jobu = b'A'; + let jobvt = b'A'; + + let lda = nrows.value() as i32; + let min_nrows_ncols = nrows.min(ncols); + + + let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; + let mut s = unsafe { Matrix::new_uninitialized_generic(min_nrows_ncols, U1) }; + let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + + let ldu = nrows.value(); + let ldvt = ncols.value(); + + let mut work = [ Complex::new(0.0, 0.0) ]; + let mut lwork = -1 as i32; + let mut rwork = vec![ 0.0; (5 * min_nrows_ncols.value()) ]; + let mut info = 0; + + $lapack_func(jobu, jobvt, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), + lda, s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(), + ldvt as i32, &mut work, lwork, &mut rwork, &mut info); + lapack_check!(info); + + lwork = work[0].re as i32; + let mut work = vec![Complex::new(0.0, 0.0); lwork as usize]; + + $lapack_func(jobu, jobvt, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(), + lda, s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(), + ldvt as i32, &mut work, lwork, &mut rwork, &mut info); + lapack_check!(info); + + Some((u, s, vt)) + } + ); +); +*/ + +svd_impl!(f32, interface::sgesdd); +svd_impl!(f64, interface::dgesdd); +// svd_complex_impl!(lapack_svd_complex_f32, f32, interface::cgesvd); +// svd_complex_impl!(lapack_svd_complex_f64, f64, interface::zgesvd); diff --git a/nalgebra-lapack/tests/lib.rs b/nalgebra-lapack/tests/lib.rs new file mode 100644 index 00000000..4e5ceac2 --- /dev/null +++ b/nalgebra-lapack/tests/lib.rs @@ -0,0 +1,9 @@ +#[macro_use] +extern crate quickcheck; +#[macro_use] +extern crate approx; +extern crate nalgebra as na; +extern crate nalgebra_lapack as nl; + + +mod linalg; diff --git a/nalgebra-lapack/tests/linalg/cholesky.rs b/nalgebra-lapack/tests/linalg/cholesky.rs new file mode 100644 index 00000000..9f945c59 --- /dev/null +++ b/nalgebra-lapack/tests/linalg/cholesky.rs @@ -0,0 +1,101 @@ +use std::cmp; + +use nl::Cholesky; +use na::{DMatrix, DVector, Vector4, Matrix3, Matrix4x3, Matrix4}; + +quickcheck!{ + fn cholesky(m: DMatrix) -> bool { + if m.len() != 0 { + let m = &m * m.transpose(); + if let Some(chol) = Cholesky::new(m.clone()) { + let l = chol.unpack(); + let reconstructed_m = &l * l.transpose(); + + return relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) + } + } + return true + } + + fn cholesky_static(m: Matrix3) -> bool { + let m = &m * m.transpose(); + if let Some(chol) = Cholesky::new(m) { + let l = chol.unpack(); + let reconstructed_m = &l * l.transpose(); + + relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) + } + else { + false + } + } + + fn cholesky_solve(n: usize, nb: usize) -> bool { + if n != 0 { + let n = cmp::min(n, 15); // To avoid slowing down the test too much. + let nb = cmp::min(nb, 15); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); + let m = &m * m.transpose(); + + if let Some(chol) = Cholesky::new(m.clone()) { + let b1 = DVector::new_random(n); + let b2 = DMatrix::new_random(n, nb); + + let sol1 = chol.solve(b1.clone()).unwrap(); + let sol2 = chol.solve(b2.clone()).unwrap(); + + return relative_eq!(&m * sol1, b1, epsilon = 1.0e-6) && + relative_eq!(&m * sol2, b2, epsilon = 1.0e-6) + } + } + + return true; + } + + fn cholesky_solve_static(m: Matrix4) -> bool { + let m = &m * m.transpose(); + match Cholesky::new(m) { + Some(chol) => { + let b1 = Vector4::new_random(); + let b2 = Matrix4x3::new_random(); + + let sol1 = chol.solve(b1).unwrap(); + let sol2 = chol.solve(b2).unwrap(); + + relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && + relative_eq!(m * sol2, b2, epsilon = 1.0e-7) + }, + None => true + } + } + + fn cholesky_inverse(n: usize) -> bool { + if n != 0 { + let n = cmp::min(n, 15); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); + let m = &m * m.transpose(); + + if let Some(m1) = Cholesky::new(m.clone()).unwrap().inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; + + return id1.is_identity(1.0e-6) && id2.is_identity(1.0e-6); + } + } + + return true; + } + + fn cholesky_inverse_static(m: Matrix4) -> bool { + let m = m * m.transpose(); + match Cholesky::new(m.clone()).unwrap().inverse() { + Some(m1) => { + let id1 = &m * &m1; + let id2 = &m1 * &m; + + id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) + }, + None => true + } + } +} diff --git a/nalgebra-lapack/tests/linalg/hessenberg.rs b/nalgebra-lapack/tests/linalg/hessenberg.rs new file mode 100644 index 00000000..bb48633e --- /dev/null +++ b/nalgebra-lapack/tests/linalg/hessenberg.rs @@ -0,0 +1,38 @@ +use std::cmp; + +use nl::Hessenberg; +use na::{DMatrix, Matrix4}; + +quickcheck!{ + fn hessenberg(n: usize) -> bool { + if n != 0 { + let n = cmp::min(n, 25); + let m = DMatrix::::new_random(n, n); + + match Hessenberg::new(m.clone()) { + Some(hess) => { + let h = hess.h(); + let p = hess.p(); + + relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7) + }, + None => true + } + } + else { + true + } + } + + fn hessenberg_static(m: Matrix4) -> bool { + match Hessenberg::new(m) { + Some(hess) => { + let h = hess.h(); + let p = hess.p(); + + relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7) + }, + None => true + } + } +} diff --git a/nalgebra-lapack/tests/linalg/lu.rs b/nalgebra-lapack/tests/linalg/lu.rs new file mode 100644 index 00000000..667bc860 --- /dev/null +++ b/nalgebra-lapack/tests/linalg/lu.rs @@ -0,0 +1,107 @@ +use std::cmp; + +use nl::LU; +use na::{DMatrix, DVector, Matrix4, Matrix4x3, Matrix3x4, Vector4}; + +quickcheck!{ + fn lup(m: DMatrix) -> bool { + if m.len() != 0 { + let lup = LU::new(m.clone()); + let l = lup.l(); + let u = lup.u(); + let mut computed1 = &l * &u; + lup.permute(&mut computed1); + + let computed2 = lup.p() * l * u; + + relative_eq!(computed1, m, epsilon = 1.0e-7) && + relative_eq!(computed2, m, epsilon = 1.0e-7) + } + else { + true + } + } + + fn lu_static(m: Matrix3x4) -> bool { + let lup = LU::new(m); + let l = lup.l(); + let u = lup.u(); + let mut computed1 = l * u; + lup.permute(&mut computed1); + + let computed2 = lup.p() * l * u; + + relative_eq!(computed1, m, epsilon = 1.0e-7) && + relative_eq!(computed2, m, epsilon = 1.0e-7) + } + + fn lu_solve(n: usize, nb: usize) -> bool { + if n != 0 { + let n = cmp::min(n, 25); // To avoid slowing down the test too much. + let nb = cmp::min(nb, 25); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); + + let lup = LU::new(m.clone()); + let b1 = DVector::new_random(n); + let b2 = DMatrix::new_random(n, nb); + + let sol1 = lup.solve(b1.clone()).unwrap(); + let sol2 = lup.solve(b2.clone()).unwrap(); + + let tr_sol1 = lup.solve_transpose(b1.clone()).unwrap(); + let tr_sol2 = lup.solve_transpose(b2.clone()).unwrap(); + + relative_eq!(&m * sol1, b1, epsilon = 1.0e-7) && + relative_eq!(&m * sol2, b2, epsilon = 1.0e-7) && + relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7) && + relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7) + } + else { + true + } + } + + fn lu_solve_static(m: Matrix4) -> bool { + let lup = LU::new(m); + let b1 = Vector4::new_random(); + let b2 = Matrix4x3::new_random(); + + let sol1 = lup.solve(b1).unwrap(); + let sol2 = lup.solve(b2).unwrap(); + let tr_sol1 = lup.solve_transpose(b1).unwrap(); + let tr_sol2 = lup.solve_transpose(b2).unwrap(); + + relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && + relative_eq!(m * sol2, b2, epsilon = 1.0e-7) && + relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7) && + relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7) + } + + fn lu_inverse(n: usize) -> bool { + if n != 0 { + let n = cmp::min(n, 25); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); + + if let Some(m1) = LU::new(m.clone()).inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; + + return id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7); + } + } + + return true; + } + + fn lu_inverse_static(m: Matrix4) -> bool { + match LU::new(m.clone()).inverse() { + Some(m1) => { + let id1 = &m * &m1; + let id2 = &m1 * &m; + + id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) + }, + None => true + } + } +} diff --git a/nalgebra-lapack/tests/linalg/mod.rs b/nalgebra-lapack/tests/linalg/mod.rs new file mode 100644 index 00000000..8caadde1 --- /dev/null +++ b/nalgebra-lapack/tests/linalg/mod.rs @@ -0,0 +1,5 @@ +mod real_eigensystem; +mod cholesky; +mod lu; +mod qr; +mod svd; diff --git a/nalgebra-lapack/tests/linalg/qr.rs b/nalgebra-lapack/tests/linalg/qr.rs new file mode 100644 index 00000000..baac445b --- /dev/null +++ b/nalgebra-lapack/tests/linalg/qr.rs @@ -0,0 +1,20 @@ +use nl::QR; +use na::{DMatrix, Matrix4x3}; + +quickcheck!{ + fn qr(m: DMatrix) -> bool { + let qr = QR::new(m.clone()); + let q = qr.q(); + let r = qr.r(); + + relative_eq!(m, q * r, epsilon = 1.0e-7) + } + + fn qr_static(m: Matrix4x3) -> bool { + let qr = QR::new(m); + let q = qr.q(); + let r = qr.r(); + + relative_eq!(m, q * r, epsilon = 1.0e-7) + } +} diff --git a/nalgebra-lapack/tests/linalg/real_eigensystem.rs b/nalgebra-lapack/tests/linalg/real_eigensystem.rs new file mode 100644 index 00000000..a616bbfd --- /dev/null +++ b/nalgebra-lapack/tests/linalg/real_eigensystem.rs @@ -0,0 +1,48 @@ +use std::cmp; + +use nl::RealEigensystem; +use na::{DMatrix, Matrix4}; + +quickcheck!{ + fn eigensystem(n: usize) -> bool { + if n != 0 { + let n = cmp::min(n, 25); + let m = DMatrix::::new_random(n, n); + + match RealEigensystem::new(m.clone(), true, true) { + Some(eig) => { + let eigvals = DMatrix::from_diagonal(&eig.eigenvalues); + let transformed_eigvectors = &m * eig.eigenvectors.as_ref().unwrap(); + let scaled_eigvectors = eig.eigenvectors.as_ref().unwrap() * &eigvals; + + let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.as_ref().unwrap(); + let scaled_left_eigvectors = eig.left_eigenvectors.as_ref().unwrap() * &eigvals; + + relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7) && + relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7) + }, + None => true + } + } + else { + true + } + } + + fn eigensystem_static(m: Matrix4) -> bool { + match RealEigensystem::new(m, true, true) { + Some(eig) => { + let eigvals = Matrix4::from_diagonal(&eig.eigenvalues); + let transformed_eigvectors = m * eig.eigenvectors.unwrap(); + let scaled_eigvectors = eig.eigenvectors.unwrap() * eigvals; + + let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.unwrap(); + let scaled_left_eigvectors = eig.left_eigenvectors.unwrap() * eigvals; + + relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7) && + relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7) + }, + None => true + } + } +} diff --git a/nalgebra-lapack/tests/linalg/svd.rs b/nalgebra-lapack/tests/linalg/svd.rs new file mode 100644 index 00000000..7a76d445 --- /dev/null +++ b/nalgebra-lapack/tests/linalg/svd.rs @@ -0,0 +1,57 @@ +use nl::SVD; +use na::{DMatrix, Matrix3x4}; + +quickcheck!{ + fn svd(m: DMatrix) -> bool { + if m.nrows() != 0 && m.ncols() != 0 { + let svd = SVD::new(m.clone()).unwrap(); + let sm = DMatrix::from_partial_diagonal(m.nrows(), m.ncols(), svd.s.as_slice()); + + let reconstructed_m = &svd.u * sm * &svd.vt; + let reconstructed_m2 = svd.matrix(); + + relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) && + relative_eq!(reconstructed_m2, reconstructed_m, epsilon = 1.0e-7) + } + else { + true + } + } + + fn svd_static(m: Matrix3x4) -> bool { + let svd = SVD::new(m).unwrap(); + let sm = Matrix3x4::from_partial_diagonal(svd.s.as_slice()); + + let reconstructed_m = &svd.u * &sm * &svd.vt; + let reconstructed_m2 = svd.matrix(); + + relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) && + relative_eq!(reconstructed_m2, m, epsilon = 1.0e-7) + } + + fn pseudo_inverse(m: DMatrix) -> bool { + if m.nrows() == 0 || m.ncols() == 0 { + return true; + } + + let svd = SVD::new(m.clone()).unwrap(); + let im = svd.pseudo_inverse(1.0e-7); + + if m.nrows() <= m.ncols() { + return (&m * &im).is_identity(1.0e-7) + } + + if m.nrows() >= m.ncols() { + return (im * m).is_identity(1.0e-7) + } + + return true; + } + + fn pseudo_inverse_static(m: Matrix3x4) -> bool { + let svd = SVD::new(m).unwrap(); + let im = svd.pseudo_inverse(1.0e-7); + + (m * im).is_identity(1.0e-7) + } +}