Add nalgebra-lapack as a crate on this workspace.

This commit is contained in:
Sébastien Crozet 2017-08-02 19:38:28 +02:00 committed by Sébastien Crozet
parent 3f70af97dd
commit 02767fa894
26 changed files with 1862 additions and 0 deletions

View File

@ -0,0 +1,22 @@
# Change Log
## [0.4.0] - 2016-09-07
* Made all traits use associated types for their output type parameters. This
simplifies usage of the traits and is consistent with the concept of
associated types used as output type parameters (not input type parameters) as
described in [the associated type
RFC](https://github.com/rust-lang/rfcs/blob/master/text/0195-associated-items.md).
* Implemented `check_info!` macro to check all LAPACK calls.
* Implemented error handling with [error_chain](https://crates.io/crates/error-chain).
## [0.3.0] - 2016-09-06
* Documentation is hosted at https://docs.rs/nalgebra-lapack/
* Updated `nalgebra` to 0.10.
* Rename traits `HasSVD` to `SVD` and `HasEigensystem` to `Eigensystem`.
* Added `Solve` trait for solving a linear matrix equation.
* Added `Inverse` for computing the multiplicative inverse of a matrix.
* Added `Cholesky` for decomposing a positive-definite matrix.
* The `Eigensystem` and `SVD` traits are now generic over types. The
associated types have been removed.

View File

@ -0,0 +1,40 @@
[package]
name = "nalgebra-lapack"
version = "0.11.2"
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices."
documentation = "http://nalgebra.org/doc/nalgebra/index.html"
homepage = "http://nalgebra.org"
repository = "https://github.com/sebcrozet/nalgebra"
readme = "README.md"
keywords = [ "linear", "algebra", "matrix", "vector" ]
license = "BSD-3-Clause"
[features]
serde-serialize = [ "serde", "serde_derive" ]
# For BLAS/LAPACK
default = ["openblas"]
openblas = ["lapack/openblas"]
netlib = ["lapack/netlib"]
accelerate = ["lapack/accelerate"]
[dependencies]
nalgebra = { version = "0.12", path = ".." }
num-traits = "0.1"
num-complex = "0.1"
alga = "0.5"
serde = { version = "0.9", optional = true }
serde_derive = { version = "0.9", optional = true }
# clippy = "*"
[dependencies.lapack]
version = "0.11"
default-features = false
[dev-dependencies]
nalgebra = { version = "0.12", path = "..", features = [ "arbitrary" ] }
quickcheck = "0.4"
approx = "0.1"
rand = "0.3"

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Andrew D. Straw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

11
nalgebra-lapack/Makefile Normal file
View File

@ -0,0 +1,11 @@
all:
cargo build
test:
cargo test
doc:
cargo doc --all --no-deps
bench:
cargo bench

53
nalgebra-lapack/README.md Normal file
View File

@ -0,0 +1,53 @@
# nalgebra-lapack [![Version][version-img]][version-url] [![Status][status-img]][status-url] [![Doc][doc-img]][doc-url]
Rust library for linear algebra using nalgebra and LAPACK
## Documentation
Documentation is available [here](https://docs.rs/nalgebra-lapack/).
## License
MIT
## Cargo features to select lapack provider
Like the [lapack crate](https://crates.io/crates/lapack) from which this
behavior is inherited, nalgebra-lapack uses [cargo
features](http://doc.crates.io/manifest.html#the-[features]-section) to select
which lapack provider (or implementation) is used. Command line arguments to
cargo are the easiest way to do this, and the best provider depends on your
particular system. In some cases, the providers can be further tuned with
environment variables.
Below are given examples of how to invoke `cargo build` on two different systems
using two different providers. The `--no-default-features --features "provider"`
arguments will be consistent for other `cargo` commands.
### Ubuntu
As tested on Ubuntu 12.04, do this to build the lapack package against
the system installation of netlib without LAPACKE (note the E) or
CBLAS:
sudo apt-get install gfortran libblas3gf liblapack3gf
export CARGO_FEATURE_SYSTEM_NETLIB=1
export CARGO_FEATURE_EXCLUDE_LAPACKE=1
export CARGO_FEATURE_EXCLUDE_CBLAS=1
export CARGO_FEATURES='--no-default-features --features netlib'
cargo build ${CARGO_FEATURES}
### Mac OS X
On Mac OS X, do this to use Apple's Accelerate framework:
export CARGO_FEATURES='--no-default-features --features accelerate'
cargo build ${CARGO_FEATURES}
[version-img]: https://img.shields.io/crates/v/nalgebra-lapack.svg
[version-url]: https://crates.io/crates/nalgebra-lapack
[status-img]: https://travis-ci.org/strawlab/nalgebra-lapack.svg?branch=master
[status-url]: https://travis-ci.org/strawlab/nalgebra-lapack
[doc-img]: https://docs.rs/nalgebra-lapack/badge.svg
[doc-url]: https://docs.rs/nalgebra-lapack/

View File

@ -0,0 +1,8 @@
#![feature(test)]
extern crate test;
extern crate rand;
extern crate nalgebra as na;
extern crate nalgebra_lapack as nl;
mod linalg;

View File

@ -0,0 +1,21 @@
use test::{self, Bencher};
use na::{DMatrix, Matrix4};
use nl::Hessenberg;
#[bench]
fn hessenberg_decompose_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
}
#[bench]
fn hessenberg_decompose_4x4(bh: &mut Bencher) {
let m = Matrix4::<f64>::new_random();
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
}
#[bench]
fn hessenberg_decompose_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.iter(|| test::black_box(Hessenberg::new(m.clone())))
}

View File

@ -0,0 +1,34 @@
use test::{self, Bencher};
use na::{DMatrix, Matrix4};
use nl::LU;
#[bench]
fn lu_decompose_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.iter(|| test::black_box(LU::new(m.clone())))
}
#[bench]
fn lu_decompose_100x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 500);
bh.iter(|| test::black_box(LU::new(m.clone())))
}
#[bench]
fn lu_decompose_4x4(bh: &mut Bencher) {
let m = Matrix4::<f64>::new_random();
bh.iter(|| test::black_box(LU::new(m.clone())))
}
#[bench]
fn lu_decompose_500x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 100);
bh.iter(|| test::black_box(LU::new(m.clone())))
}
#[bench]
fn lu_decompose_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.iter(|| test::black_box(LU::new(m.clone())))
}

View File

@ -0,0 +1,3 @@
mod qr;
mod lu;
mod hessenberg;

View File

@ -0,0 +1,33 @@
use test::{self, Bencher};
use na::{DMatrix, Matrix4};
use nl::QR;
#[bench]
fn qr_decompose_100x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 100);
bh.iter(|| test::black_box(QR::new(m.clone())))
}
#[bench]
fn qr_decompose_100x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(100, 500);
bh.iter(|| test::black_box(QR::new(m.clone())))
}
#[bench]
fn qr_decompose_4x4(bh: &mut Bencher) {
let m = Matrix4::<f64>::new_random();
bh.iter(|| test::black_box(QR::new(m.clone())))
}
#[bench]
fn qr_decompose_500x100(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 100);
bh.iter(|| test::black_box(QR::new(m.clone())))
}
#[bench]
fn qr_decompose_500x500(bh: &mut Bencher) {
let m = DMatrix::<f64>::new_random(500, 500);
bh.iter(|| test::black_box(QR::new(m.clone())))
}

View File

@ -0,0 +1,130 @@
use num::Zero;
use num_complex::Complex;
use na::{Scalar, DefaultAllocator, MatrixN, MatrixMN};
use na::dimension::Dim;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The cholesky decomposion of a symmetric-definite-positive matrix.
pub struct Cholesky<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D, D> {
l: MatrixN<N, D>
}
impl<N: CholeskyScalar + Zero, D: Dim> Cholesky<N, D>
where DefaultAllocator: Allocator<N, D, D> {
/// Complutes the cholesky decomposition of the given symmetric-definite-positive square
/// matrix.
///
/// Only the lower-triangular part of the input matrix is considered.
#[inline]
pub fn new(mut m: MatrixN<N, D>) -> Option<Self> {
// FIXME: check symmetry as well?
assert!(m.is_square(), "Unable to compute the cholesky decomposition of a non-square matrix.");
let uplo = b'L';
let dim = m.nrows() as i32;
let mut info = 0;
N::xpotrf(uplo, dim, m.as_mut_slice(), dim, &mut info);
lapack_check!(info);
Some(Cholesky { l: m })
}
pub fn unpack(mut self) -> MatrixN<N, D> {
self.l.fill_upper_triangle(Zero::zero(), 1);
self.l
}
pub fn l(&self) -> MatrixN<N, D> {
let mut res = self.l.clone();
res.fill_upper_triangle(Zero::zero(), 1);
res
}
/// Solves the symmetric-definite-positive linear system `self * x = b`, where `x` is the
/// unknown to be determined.
pub fn solve<R2: Dim, C2: Dim>(&self, mut b: MatrixMN<N, R2, C2>)
-> Option<MatrixMN<N, R2, C2>>
where DefaultAllocator: Allocator<N, R2, C2> {
let dim = self.l.nrows();
assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`.");
let nrhs = b.ncols() as i32;
let lda = dim as i32;
let ldb = dim as i32;
let mut info = 0;
N::xpotrs(b'L', dim as i32, nrhs, self.l.as_slice(), lda, b.as_mut_slice(), ldb, &mut info);
lapack_check!(info);
Some(b)
}
/// Computes the inverse of the decomposed matrix.
pub fn inverse(mut self) -> Option<MatrixN<N, D>> {
let dim = self.l.nrows();
let mut info = 0;
N::xpotri(b'L', dim as i32, self.l.as_mut_slice(), dim as i32, &mut info);
lapack_check!(info);
// Copy lower triangle to upper triangle.
for i in 0 .. dim {
for j in i + 1 .. dim {
unsafe { *self.l.get_unchecked_mut(i, j) = *self.l.get_unchecked(j, i) };
}
}
Some(self.l)
}
}
/*
*
* Lapack functions dispatch.
*
*/
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
/// supported by the cholesky decompotition.
pub trait CholeskyScalar: Scalar {
fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, b: &mut [Self], ldb: i32, info: &mut i32);
fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32);
}
macro_rules! cholesky_scalar_impl(
($N: ty, $xpotrf: path, $xpotrs: path, $xpotri: path) => (
impl CholeskyScalar for $N {
#[inline]
fn xpotrf(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) {
$xpotrf(uplo, n, a, lda, info)
}
#[inline]
fn xpotrs(uplo: u8, n: i32, nrhs: i32, a: &[Self], lda: i32,
b: &mut [Self], ldb: i32, info: &mut i32) {
$xpotrs(uplo, n, nrhs, a, lda, b, ldb, info)
}
#[inline]
fn xpotri(uplo: u8, n: i32, a: &mut [Self], lda: i32, info: &mut i32) {
$xpotri(uplo, n, a, lda, info)
}
}
)
);
cholesky_scalar_impl!(f32, interface::spotrf, interface::spotrs, interface::spotri);
cholesky_scalar_impl!(f64, interface::dpotrf, interface::dpotrs, interface::dpotri);
cholesky_scalar_impl!(Complex<f32>, interface::cpotrf, interface::cpotrs, interface::cpotri);
cholesky_scalar_impl!(Complex<f64>, interface::zpotrf, interface::zpotrs, interface::zpotri);

View File

@ -0,0 +1,182 @@
use num::Zero;
use alga::general::Real;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixN};
use na::dimension::{Dim, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// Eigendecomposition of a real square matrix with real eigenvalues.
pub struct RealEigensystem<N: Scalar, D: Dim>
where DefaultAllocator: Allocator<N, D> +
Allocator<N, D, D> {
pub eigenvalues: VectorN<N, D>,
pub eigenvectors: Option<MatrixN<N, D>>,
pub left_eigenvectors: Option<MatrixN<N, D>>
}
impl<N: RealEigensystemScalar + Real, D: Dim> RealEigensystem<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, D> {
/// Computes the eigenvalues and eigenvectors of the square matrix `m`.
///
/// If `eigenvectors` is `false` then, the eigenvectors are not computed explicitly.
pub fn new(mut m: MatrixN<N, D>, left_eigenvectors: bool, eigenvectors: bool)
-> Option<RealEigensystem<N, D>> {
assert!(m.is_square(), "Unable to compute the eigenvalue decomposition of a non-square matrix.");
let ljob = if left_eigenvectors { b'V' } else { b'N' };
let rjob = if eigenvectors { b'V' } else { b'N' };
let (nrows, ncols) = m.data.shape();
let n = nrows.value();
let lda = n as i32;
let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
// FIXME: Tap into the workspace.
let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) };
let mut info = 0;
let mut placeholder1 = [ N::zero() ];
let mut placeholder2 = [ N::zero() ];
let lwork = N::xgeev_work_size(ljob, rjob, n as i32, m.as_mut_slice(), lda,
wr.as_mut_slice(), wi.as_mut_slice(), &mut placeholder1,
n as i32, &mut placeholder2, n as i32, &mut info);
lapack_check!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
match (left_eigenvectors, eigenvectors) {
(true, true) => {
let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut vr.as_mut_slice(),
n as i32, &mut work, lwork, &mut info);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(RealEigensystem {
eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: Some(vr)
})
}
},
(true, false) => {
let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut vl.as_mut_slice(), n as i32, &mut placeholder2,
1 as i32, &mut work, lwork, &mut info);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(RealEigensystem {
eigenvalues: wr, left_eigenvectors: Some(vl), eigenvectors: None
});
}
},
(false, true) => {
let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) };
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut vr.as_mut_slice(),
n as i32, &mut work, lwork, &mut info);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(RealEigensystem {
eigenvalues: wr, left_eigenvectors: None, eigenvectors: Some(vr)
});
}
},
(false, false) => {
N::xgeev(ljob, rjob, n as i32, m.as_mut_slice(), lda, wr.as_mut_slice(),
wi.as_mut_slice(), &mut placeholder1, 1 as i32, &mut placeholder2,
1 as i32, &mut work, lwork, &mut info);
lapack_check!(info);
if wi.iter().all(|e| e.is_zero()) {
return Some(RealEigensystem {
eigenvalues: wr, left_eigenvectors: None, eigenvectors: None
});
}
}
}
None
}
/// The determinant of the decomposed matrix.
#[inline]
pub fn determinant(&self) -> N {
let mut det = N::one();
for e in self.eigenvalues.iter() {
det *= *e;
}
det
}
}
/*
*
* Lapack functions dispatch.
*
*/
pub trait RealEigensystemScalar: Scalar {
fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32,
wr: &mut [Self], wi: &mut [Self],
vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32,
work: &mut [Self], lwork: i32, info: &mut i32);
fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32,
wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32,
vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32;
}
macro_rules! real_eigensystem_scalar_impl (
($N: ty, $xgeev: path) => (
impl RealEigensystemScalar for $N {
#[inline]
fn xgeev(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32,
wr: &mut [Self], wi: &mut [Self],
vl: &mut [Self], ldvl: i32, vr: &mut [Self], ldvr: i32,
work: &mut [Self], lwork: i32, info: &mut i32) {
$xgeev(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info)
}
#[inline]
fn xgeev_work_size(jobvl: u8, jobvr: u8, n: i32, a: &mut [Self], lda: i32,
wr: &mut [Self], wi: &mut [Self], vl: &mut [Self], ldvl: i32,
vr: &mut [Self], ldvr: i32, info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ];
let lwork = -1 as i32;
$xgeev(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, &mut work, lwork, info);
ComplexHelper::real_part(work[0]) as i32
}
}
)
);
real_eigensystem_scalar_impl!(f32, interface::sgeev);
real_eigensystem_scalar_impl!(f64, interface::dgeev);
//// FIXME: decomposition of complex matrix and matrices with complex eigenvalues.
// eigensystem_complex_impl!(f32, interface::cgeev);
// eigensystem_complex_impl!(f64, interface::zgeev);

View File

@ -0,0 +1,155 @@
use num::Zero;
use num_complex::Complex;
use ::ComplexHelper;
use na::{Scalar, Matrix, DefaultAllocator, VectorN, MatrixN};
use na::dimension::{DimSub, DimDiff, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The Hessenberg decomposition of a general matrix.
pub struct Hessenberg<N: Scalar, D: DimSub<U1>>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>> {
h: MatrixN<N, D>,
tau: VectorN<N, DimDiff<D, U1>>
}
impl<N: HessenbergScalar + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>> {
/// Computes the hessenberg decomposition of the matrix `m`.
pub fn new(mut m: MatrixN<N, D>) -> Hessenberg<N, D> {
let nrows = m.data.shape().0;
let n = nrows.value() as i32;
assert!(m.is_square(), "Unable to compute the hessenberg decomposition of a non-square matrix.");
assert!(!m.is_empty(), "Unable to compute the hessenberg decomposition of an empty matrix.");
let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) };
let mut info = 0;
let lwork = N::xgehrd_work_size(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
lapack_panic!(info);
N::xgehrd(n, 1, n, m.as_mut_slice(), n, tau.as_mut_slice(), &mut work, lwork, &mut info);
lapack_panic!(info);
Hessenberg { h: m, tau: tau }
}
/// Computes the hessenberg matrix of this decomposition.
#[inline]
pub fn h(&self) -> MatrixN<N, D> {
let mut h = self.h.clone_owned();
h.fill_lower_triangle(N::zero(), 2);
h
}
}
impl<N: HessenbergReal + Zero, D: DimSub<U1>> Hessenberg<N, D>
where DefaultAllocator: Allocator<N, D, D> +
Allocator<N, DimDiff<D, U1>> {
/// Computes the matrices `(Q, H)` of this decomposition.
#[inline]
pub fn unpack(self) -> (MatrixN<N, D>, MatrixN<N, D>) {
(self.q(), self.h())
}
/// Computes the unitary matrix `Q` of this decomposition.
#[inline]
pub fn q(&self) -> MatrixN<N, D> {
let n = self.h.nrows() as i32;
let mut q = self.h.clone_owned();
let mut info = 0;
let lwork = N::xorghr_work_size(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut info);
let mut work = vec![ N::zero(); lwork as usize ];
N::xorghr(n, 1, n, q.as_mut_slice(), n, self.tau.as_slice(), &mut work, lwork, &mut info);
q
}
}
/*
*
* Lapack functions dispatch.
*
*/
pub trait HessenbergScalar: Scalar {
fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32);
fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], info: &mut i32) -> i32;
}
pub trait HessenbergReal: HessenbergScalar {
fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self],
work: &mut [Self], lwork: i32, info: &mut i32);
fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &[Self], info: &mut i32) -> i32;
}
macro_rules! hessenberg_scalar_impl(
($N: ty, $xgehrd: path) => (
impl HessenbergScalar for $N {
#[inline]
fn xgehrd(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], work: &mut [Self], lwork: i32, info: &mut i32) {
$xgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info)
}
#[inline]
fn xgehrd_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ];
let lwork = -1 as i32;
$xgehrd(n, ilo, ihi, a, lda, tau, &mut work, lwork, info);
ComplexHelper::real_part(work[0]) as i32
}
}
)
);
macro_rules! hessenberg_real_impl(
($N: ty, $xorghr: path) => (
impl HessenbergReal for $N {
#[inline]
fn xorghr(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32, tau: &[Self],
work: &mut [Self], lwork: i32, info: &mut i32) {
$xorghr(n, ilo, ihi, a, lda, tau, work, lwork, info)
}
#[inline]
fn xorghr_work_size(n: i32, ilo: i32, ihi: i32, a: &mut [Self], lda: i32,
tau: &[Self], info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ];
let lwork = -1 as i32;
$xorghr(n, ilo, ihi, a, lda, tau, &mut work, lwork, info);
ComplexHelper::real_part(work[0]) as i32
}
}
)
);
hessenberg_scalar_impl!(f32, interface::sgehrd);
hessenberg_scalar_impl!(f64, interface::dgehrd);
hessenberg_scalar_impl!(Complex<f32>, interface::cgehrd);
hessenberg_scalar_impl!(Complex<f64>, interface::zgehrd);
hessenberg_real_impl!(f32, interface::sorghr);
hessenberg_real_impl!(f64, interface::dorghr);

View File

@ -0,0 +1,21 @@
#![macro_use]
macro_rules! lapack_check(
($info: expr) => (
// FIXME: return a richer error.
if $info != 0 {
return None;
}
// if $info < 0 {
// return Err(Error::from(ErrorKind::LapackIllegalArgument(-$info)));
// } else if $info > 0 {
// return Err(Error::from(ErrorKind::LapackFailure($info)));
// }
);
);
macro_rules! lapack_panic(
($info: expr) => (
assert!($info == 0, "Lapack error.");
);
);

View File

@ -0,0 +1,81 @@
#![deny(non_camel_case_types)]
#![deny(unused_parens)]
#![deny(non_upper_case_globals)]
#![deny(unused_qualifications)]
#![deny(unused_results)]
#![warn(missing_docs)]
#![doc(html_root_url = "http://nalgebra.org/rustdoc")]
extern crate num_traits as num;
extern crate num_complex;
extern crate lapack;
extern crate alga;
extern crate nalgebra as na;
mod lapack_check;
mod svd;
mod eigen;
mod cholesky;
mod lu;
mod qr;
mod hessenberg;
use num_complex::Complex;
pub use self::svd::SVD;
pub use self::cholesky::{Cholesky, CholeskyScalar};
pub use self::lu::{LU, LUScalar};
pub use self::eigen::RealEigensystem;
pub use self::qr::QR;
pub use self::hessenberg::Hessenberg;
trait ComplexHelper {
type RealPart;
fn real_part(self) -> Self::RealPart;
}
impl ComplexHelper for f32 {
type RealPart = f32;
#[inline]
fn real_part(self) -> Self::RealPart {
self
}
}
impl ComplexHelper for f64 {
type RealPart = f64;
#[inline]
fn real_part(self) -> Self::RealPart {
self
}
}
impl ComplexHelper for Complex<f32> {
type RealPart = f32;
#[inline]
fn real_part(self) -> Self::RealPart {
self.re
}
}
impl ComplexHelper for Complex<f64> {
type RealPart = f64;
#[inline]
fn real_part(self) -> Self::RealPart {
self.re
}
}
unsafe fn uninitialized_vec<T: Copy>(n: usize) -> Vec<T> {
let mut res = Vec::new();
res.reserve_exact(n);
res.set_len(n);
res
}

244
nalgebra-lapack/src/lu.rs Normal file
View File

@ -0,0 +1,244 @@
use num::{Zero, One};
use num_complex::Complex;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, MatrixMN, MatrixN, VectorN};
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// LU decomposition with partial pivoting.
///
/// This decomposes a matrix `M` with m rows and n columns into three parts:
/// * `L` which is a `m × min(m, n)` lower-triangular matrix.
/// * `U` which is a `min(m, n) × n` upper-triangular matrix.
/// * `P` which is a `m * m` permutation matrix.
///
/// Those are such that `M == P * L * U`.
pub struct LU<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<i32, DimMinimum<R, C>> +
Allocator<N, R, C> {
lu: MatrixMN<N, R, C>,
p: VectorN<i32, DimMinimum<R, C>>
}
impl<N: LUScalar, R: Dim, C: Dim> LU<N, R, C>
where N: Zero + One,
R: DimMin<C>,
DefaultAllocator: Allocator<N, R, C> +
Allocator<N, R, R> +
Allocator<N, R, DimMinimum<R, C>> +
Allocator<N, DimMinimum<R, C>, C> +
Allocator<i32, DimMinimum<R, C>> {
pub fn new(mut m: MatrixMN<N, R, C>) -> Self {
let (nrows, ncols) = m.data.shape();
let min_nrows_ncols = nrows.min(ncols);
let nrows = nrows.value() as i32;
let ncols = ncols.value() as i32;
let mut ipiv: VectorN<i32, _> = Matrix::zeros_generic(min_nrows_ncols, U1);
let mut info = 0;
N::xgetrf(nrows, ncols, m.as_mut_slice(), nrows, ipiv.as_mut_slice(), &mut info);
lapack_panic!(info);
LU { lu: m, p: ipiv }
}
/// Gets the lower-triangular matrix part of the decomposition.
#[inline]
pub fn l(&self) -> MatrixMN<N, R, DimMinimum<R, C>> {
let (nrows, ncols) = self.lu.data.shape();
let mut res = self.lu.columns_generic(0, nrows.min(ncols)).into_owned();
res.fill_upper_triangle(Zero::zero(), 1);
res.fill_diagonal(One::one());
res
}
/// Gets the upper-triangular matrix part of the decomposition.
#[inline]
pub fn u(&self) -> MatrixMN<N, DimMinimum<R, C>, C> {
let (nrows, ncols) = self.lu.data.shape();
let mut res = self.lu.rows_generic(0, nrows.min(ncols)).into_owned();
res.fill_lower_triangle(Zero::zero(), 1);
res
}
/// Gets the row permutation matrix of this decomposition.
///
/// Computing the permutation matrix explicitly is costly and usually not necessary.
/// To permute rows of a matrix or vector, use the method `self.permute(...)` instead.
#[inline]
pub fn p(&self) -> MatrixN<N, R> {
let (dim, _) = self.lu.data.shape();
let mut id = Matrix::identity_generic(dim, dim);
self.permute(&mut id);
id
}
// FIXME: when we support resizing a matrix, we could add unwrap_u/unwrap_l that would
// re-use the memory from the internal matrix!
/// Gets the LAPACK permutation indices.
#[inline]
pub fn permutation_indices(&self) -> &VectorN<i32, DimMinimum<R, C>> {
&self.p
}
/// Applies the permutation matrix to a given matrix or vector in-place.
#[inline]
pub fn permute<C2: Dim>(&self, rhs: &mut MatrixMN<N, R, C2>)
where DefaultAllocator: Allocator<N, R, C2> {
let (nrows, ncols) = rhs.shape();
N::xlaswp(ncols as i32, rhs.as_mut_slice(), nrows as i32,
1, self.p.len() as i32, self.p.as_slice(), -1);
}
fn generic_solve<R2: Dim, C2: Dim>(&self, trans: u8, mut b: MatrixMN<N, R2, C2>)
-> Option<MatrixMN<N, R2, C2>>
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
let dim = self.lu.nrows();
assert!(self.lu.is_square(), "Unable to solve a set of under/over-determined equations.");
assert!(b.nrows() == dim, "The number of rows of `b` must be equal to the dimension of the matrix `a`.");
let nrhs = b.ncols() as i32;
let lda = dim as i32;
let ldb = dim as i32;
let mut info = 0;
N::xgetrs(trans, dim as i32, nrhs, self.lu.as_slice(), lda, self.p.as_slice(),
b.as_mut_slice(), ldb, &mut info);
lapack_check!(info);
Some(b)
}
/// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
pub fn solve<R2: Dim, C2: Dim>(&self, b: MatrixMN<N, R2, C2>)
-> Option<MatrixMN<N, R2, C2>>
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
self.generic_solve(b'N', b)
}
/// Solves the linear system `self.transpose() * x = b`, where `x` is the unknown to be
/// determined.
pub fn solve_transpose<R2: Dim, C2: Dim>(&self, b: MatrixMN<N, R2, C2>)
-> Option<MatrixMN<N, R2, C2>>
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
self.generic_solve(b'T', b)
}
/// Solves the linear system `self.conjugate_transpose() * x = b`, where `x` is the unknown to
/// be determined.
pub fn solve_conjugate_transpose<R2: Dim, C2: Dim>(&self, b: MatrixMN<N, R2, C2>)
-> Option<MatrixMN<N, R2, C2>>
where DefaultAllocator: Allocator<N, R2, C2> +
Allocator<i32, R2> {
self.generic_solve(b'T', b)
}
}
impl<N: LUScalar, D: Dim> LU<N, D, D>
where N: Zero + One,
D: DimMin<D, Output = D>,
DefaultAllocator: Allocator<N, D, D> +
Allocator<i32, D> {
/// Computes the inverse of the decomposed matrix.
pub fn inverse(mut self) -> Option<MatrixN<N, D>> {
let dim = self.lu.nrows() as i32;
let mut info = 0;
let lwork = N::xgetri_work_size(dim, self.lu.as_mut_slice(),
dim, self.p.as_mut_slice(),
&mut info);
lapack_check!(info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgetri(dim, self.lu.as_mut_slice(), dim, self.p.as_mut_slice(),
&mut work, lwork, &mut info);
lapack_check!(info);
Some(self.lu)
}
}
/*
*
* Lapack functions dispatch.
*
*/
pub trait LUScalar: Scalar {
fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32);
fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32);
fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32],
b: &mut [Self], ldb: i32, info: &mut i32);
fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32],
work: &mut [Self], lwork: i32, info: &mut i32);
fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32;
}
macro_rules! lup_scalar_impl(
($N: ty, $xgetrf: path, $xlaswp: path, $xgetrs: path, $xgetri: path) => (
impl LUScalar for $N {
#[inline]
fn xgetrf(m: i32, n: i32, a: &mut [Self], lda: i32, ipiv: &mut [i32], info: &mut i32) {
$xgetrf(m, n, a, lda, ipiv, info)
}
#[inline]
fn xlaswp(n: i32, a: &mut [Self], lda: i32, k1: i32, k2: i32, ipiv: &[i32], incx: i32) {
$xlaswp(n, a, lda, k1, k2, ipiv, incx)
}
#[inline]
fn xgetrs(trans: u8, n: i32, nrhs: i32, a: &[Self], lda: i32, ipiv: &[i32],
b: &mut [Self], ldb: i32, info: &mut i32) {
$xgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
}
#[inline]
fn xgetri(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32],
work: &mut [Self], lwork: i32, info: &mut i32) {
$xgetri(n, a, lda, ipiv, work, lwork, info)
}
#[inline]
fn xgetri_work_size(n: i32, a: &mut [Self], lda: i32, ipiv: &[i32], info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ];
let lwork = -1 as i32;
$xgetri(n, a, lda, ipiv, &mut work, lwork, info);
ComplexHelper::real_part(work[0]) as i32
}
}
)
);
lup_scalar_impl!(f32, interface::sgetrf, interface::slaswp, interface::sgetrs, interface::sgetri);
lup_scalar_impl!(f64, interface::dgetrf, interface::dlaswp, interface::dgetrs, interface::dgetri);
lup_scalar_impl!(Complex<f32>, interface::cgetrf, interface::claswp, interface::cgetrs, interface::cgetri);
lup_scalar_impl!(Complex<f64>, interface::zgetrf, interface::zlaswp, interface::zgetrs, interface::zgetri);

171
nalgebra-lapack/src/qr.rs Normal file
View File

@ -0,0 +1,171 @@
use num_complex::Complex;
use num::Zero;
use ::ComplexHelper;
use na::{Scalar, DefaultAllocator, Matrix, VectorN, MatrixMN};
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The QR decomposition of a general matrix.
pub struct QR<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>> {
qr: MatrixMN<N, R, C>,
tau: VectorN<N, DimMinimum<R, C>>
}
impl<N: QRScalar + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, R, DimMinimum<R, C>> +
Allocator<N, DimMinimum<R, C>, C> +
Allocator<N, DimMinimum<R, C>> {
/// Computes the QR decomposition of the matrix `m`.
pub fn new(mut m: MatrixMN<N, R, C>) -> QR<N, R, C> {
let (nrows, ncols) = m.data.shape();
let mut info = 0;
let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) };
if nrows.value() == 0 || ncols.value() == 0 {
return QR { qr: m, tau: tau };
}
let lwork = N::xgeqrf_work_size(nrows.value() as i32, ncols.value() as i32,
m.as_mut_slice(), nrows.value() as i32,
tau.as_mut_slice(), &mut info);
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
N::xgeqrf(nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
nrows.value() as i32, tau.as_mut_slice(), &mut work, lwork, &mut info);
QR { qr: m, tau: tau }
}
/// Retrieves the upper trapezoidal submatrix `R` of this decomposition.
#[inline]
pub fn r(&self) -> MatrixMN<N, DimMinimum<R, C>, C> {
let (nrows, ncols) = self.qr.data.shape();
self.qr.rows_generic(0, nrows.min(ncols)).upper_triangle()
}
}
impl<N: QRReal + Zero, R: DimMin<C>, C: Dim> QR<N, R, C>
where DefaultAllocator: Allocator<N, R, C> +
Allocator<N, R, DimMinimum<R, C>> +
Allocator<N, DimMinimum<R, C>, C> +
Allocator<N, DimMinimum<R, C>> {
/// Retrieves the matrices `(Q, R)` of this decompositions.
pub fn unpack(self) -> (MatrixMN<N, R, DimMinimum<R, C>>, MatrixMN<N, DimMinimum<R, C>, C>) {
(self.q(), self.r())
}
/// Computes the orthogonal matrix `Q` of this decomposition.
#[inline]
pub fn q(&self) -> MatrixMN<N, R, DimMinimum<R, C>> {
let (nrows, ncols) = self.qr.data.shape();
let min_nrows_ncols = nrows.min(ncols);
if min_nrows_ncols.value() == 0 {
return MatrixMN::from_element_generic(nrows, min_nrows_ncols, N::zero());
}
let mut q = self.qr.generic_slice((0, 0), (nrows, min_nrows_ncols)).into_owned();
let mut info = 0;
let nrows = nrows.value() as i32;
let lwork = N::xorgqr_work_size(nrows, min_nrows_ncols.value() as i32,
self.tau.len() as i32, q.as_mut_slice(), nrows,
self.tau.as_slice(), &mut info);
let mut work = vec![ N::zero(); lwork as usize ];
N::xorgqr(nrows, min_nrows_ncols.value() as i32, self.tau.len() as i32, q.as_mut_slice(),
nrows, self.tau.as_slice(), &mut work, lwork, &mut info);
q
}
}
/*
*
* Lapack functions dispatch.
*
*/
pub trait QRScalar: Scalar {
fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self],
work: &mut [Self], lwork: i32, info: &mut i32);
fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32,
tau: &mut [Self], info: &mut i32) -> i32;
}
pub trait QRReal: QRScalar {
fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self], work: &mut [Self],
lwork: i32, info: &mut i32);
fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32,
tau: &[Self], info: &mut i32) -> i32;
}
macro_rules! qr_scalar_impl(
($N: ty, $xgeqrf: path) => (
impl QRScalar for $N {
#[inline]
fn xgeqrf(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self],
work: &mut [Self], lwork: i32, info: &mut i32) {
$xgeqrf(m, n, a, lda, tau, work, lwork, info)
}
#[inline]
fn xgeqrf_work_size(m: i32, n: i32, a: &mut [Self], lda: i32, tau: &mut [Self],
info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ];
let lwork = -1 as i32;
$xgeqrf(m, n, a, lda, tau, &mut work, lwork, info);
ComplexHelper::real_part(work[0]) as i32
}
}
)
);
macro_rules! qr_real_impl(
($N: ty, $xorgqr: path) => (
impl QRReal for $N {
#[inline]
fn xorgqr(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self],
work: &mut [Self], lwork: i32, info: &mut i32) {
$xorgqr(m, n, k, a, lda, tau, work, lwork, info)
}
#[inline]
fn xorgqr_work_size(m: i32, n: i32, k: i32, a: &mut [Self], lda: i32, tau: &[Self],
info: &mut i32) -> i32 {
let mut work = [ Zero::zero() ];
let lwork = -1 as i32;
$xorgqr(m, n, k, a, lda, tau, &mut work, lwork, info);
ComplexHelper::real_part(work[0]) as i32
}
}
)
);
qr_scalar_impl!(f32, interface::sgeqrf);
qr_scalar_impl!(f64, interface::dgeqrf);
qr_scalar_impl!(Complex<f32>, interface::cgeqrf);
qr_scalar_impl!(Complex<f64>, interface::zgeqrf);
qr_real_impl!(f32, interface::sorgqr);
qr_real_impl!(f64, interface::dorgqr);

247
nalgebra-lapack/src/svd.rs Normal file
View File

@ -0,0 +1,247 @@
use std::cmp;
use num::Signed;
use na::{Scalar, Matrix, VectorN, MatrixN, MatrixMN,
DefaultAllocator};
use na::dimension::{Dim, DimMin, DimMinimum, U1};
use na::storage::Storage;
use na::allocator::Allocator;
use lapack::fortran as interface;
/// The SVD decomposition of a general matrix.
pub struct SVD<N: Scalar, R: DimMin<C>, C: Dim>
where DefaultAllocator: Allocator<N, R, R> +
Allocator<N, DimMinimum<R, C>> +
Allocator<N, C, C> {
pub u: MatrixN<N, R>,
pub s: VectorN<N, DimMinimum<R, C>>,
pub vt: MatrixN<N, C>
}
/// Trait implemented by floats (`f32`, `f64`) and complex floats (`Complex<f32>`, `Complex<f64>`)
/// supported by the Singular Value Decompotition.
pub trait SVDScalar<R: DimMin<C>, C: Dim>: Scalar
where DefaultAllocator: Allocator<Self, R, R> +
Allocator<Self, R, C> +
Allocator<Self, DimMinimum<R, C>> +
Allocator<Self, C, C> {
/// Computes the SVD decomposition of `m`.
fn compute(m: MatrixMN<Self, R, C>) -> Option<SVD<Self, R, C>>;
}
impl<N: SVDScalar<R, C>, R: DimMin<C>, C: Dim> SVD<N, R, C>
where DefaultAllocator: Allocator<N, R, R> +
Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>> +
Allocator<N, C, C> {
pub fn new(m: MatrixMN<N, R, C>) -> Option<Self> {
N::compute(m)
}
}
macro_rules! svd_impl(
($t: ty, $lapack_func: path) => (
impl<R: Dim, C: Dim> SVDScalar<R, C> for $t
where R: DimMin<C>,
DefaultAllocator: Allocator<$t, R, C> +
Allocator<$t, R, R> +
Allocator<$t, C, C> +
Allocator<$t, DimMinimum<R, C>> {
fn compute(mut m: MatrixMN<$t, R, C>) -> Option<SVD<$t, R, C>> {
let (nrows, ncols) = m.data.shape();
if nrows.value() == 0 || ncols.value() == 0 {
return None;
}
let job = b'A';
let lda = nrows.value() as i32;
let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) };
let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) };
let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) };
let ldu = nrows.value();
let ldvt = ncols.value();
let mut work = [ 0.0 ];
let mut lwork = -1 as i32;
let mut info = 0;
let mut iwork = unsafe { ::uninitialized_vec(8 * cmp::min(nrows.value(), ncols.value())) };
$lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
lda, &mut s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(),
ldvt as i32, &mut work, lwork, &mut iwork, &mut info);
lapack_check!(info);
lwork = work[0] as i32;
let mut work = unsafe { ::uninitialized_vec(lwork as usize) };
$lapack_func(job, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
lda, &mut s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(),
ldvt as i32, &mut work, lwork, &mut iwork, &mut info);
lapack_check!(info);
Some(SVD { u: u, s: s, vt: vt })
}
}
impl<R: DimMin<C>, C: Dim> SVD<$t, R, C>
// FIXME: All those bounds…
where DefaultAllocator: Allocator<$t, R, C> +
Allocator<$t, C, R> +
Allocator<$t, U1, R> +
Allocator<$t, U1, C> +
Allocator<$t, R, R> +
Allocator<$t, DimMinimum<R, C>> +
Allocator<$t, DimMinimum<R, C>, R> +
Allocator<$t, DimMinimum<R, C>, C> +
Allocator<$t, R, DimMinimum<R, C>> +
Allocator<$t, C, C> {
/// Reconstructs the matrix from its decomposition.
///
/// Useful if some components (e.g. some singular values) of this decomposition have
/// been manually changed by the user.
#[inline]
pub fn matrix(&self) -> MatrixMN<$t, R, C> {
let nrows = self.u.data.shape().0;
let ncols = self.vt.data.shape().1;
let min_nrows_ncols = nrows.min(ncols);
let mut res: MatrixMN<_, R, C> = Matrix::zeros_generic(nrows, ncols);
{
let mut sres = res.generic_slice_mut((0, 0), (min_nrows_ncols, ncols));
sres.copy_from(&self.vt.rows_generic(0, min_nrows_ncols));
for i in 0 .. min_nrows_ncols.value() {
let eigval = self.s[i];
let mut row = sres.row_mut(i);
row *= eigval;
}
}
&self.u * res
}
/// Computes the pseudo-inverse of the decomposed matrix.
///
/// All singular value bellow epsilon will be set to zero instead of being inverted.
#[inline]
pub fn pseudo_inverse(&self, epsilon: $t) -> MatrixMN<$t, C, R> {
let nrows = self.u.data.shape().0;
let ncols = self.vt.data.shape().1;
let min_nrows_ncols = nrows.min(ncols);
let mut res: MatrixMN<_, C, R> = Matrix::zeros_generic(ncols, nrows);
{
let mut sres = res.generic_slice_mut((0, 0), (min_nrows_ncols, nrows));
self.u.columns_generic(0, min_nrows_ncols).transpose_to(&mut sres);
for i in 0 .. min_nrows_ncols.value() {
let eigval = self.s[i];
let mut row = sres.row_mut(i);
if eigval.abs() > epsilon {
row /= eigval
}
else {
row.fill(0.0);
}
}
}
self.vt.tr_mul(&res)
}
/// The rank of the decomposed matrix.
///
/// This is the number of singular values that are not too small (i.e. greater than
/// the given `epsilon`).
#[inline]
pub fn rank(&self, epsilon: $t) -> usize {
let mut i = 0;
for e in self.s.as_slice().iter() {
if e.abs() > epsilon {
i += 1;
}
}
i
}
// FIXME: add methods to retrieve the null-space and column-space? (Respectively
// corresponding to the zero and non-zero singular values).
}
);
);
/*
macro_rules! svd_complex_impl(
($name: ident, $t: ty, $lapack_func: path) => (
impl SVDScalar for Complex<$t> {
fn compute<R: Dim, C: Dim, S>(mut m: Matrix<$t, R, C, S>) -> Option<SVD<$t, R, C, S::Alloc>>
Option<(MatrixN<Complex<$t>, R, S::Alloc>,
VectorN<$t, DimMinimum<R, C>, S::Alloc>,
MatrixN<Complex<$t>, C, S::Alloc>)>
where R: DimMin<C>,
S: ContiguousStorage<Complex<$t>, R, C>,
S::Alloc: OwnedAllocator<Complex<$t>, R, C, S> +
Allocator<Complex<$t>, R, R> +
Allocator<Complex<$t>, C, C> +
Allocator<$t, DimMinimum<R, C>> {
let (nrows, ncols) = m.data.shape();
if nrows.value() == 0 || ncols.value() == 0 {
return None;
}
let jobu = b'A';
let jobvt = b'A';
let lda = nrows.value() as i32;
let min_nrows_ncols = nrows.min(ncols);
let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) };
let mut s = unsafe { Matrix::new_uninitialized_generic(min_nrows_ncols, U1) };
let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) };
let ldu = nrows.value();
let ldvt = ncols.value();
let mut work = [ Complex::new(0.0, 0.0) ];
let mut lwork = -1 as i32;
let mut rwork = vec![ 0.0; (5 * min_nrows_ncols.value()) ];
let mut info = 0;
$lapack_func(jobu, jobvt, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
lda, s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(),
ldvt as i32, &mut work, lwork, &mut rwork, &mut info);
lapack_check!(info);
lwork = work[0].re as i32;
let mut work = vec![Complex::new(0.0, 0.0); lwork as usize];
$lapack_func(jobu, jobvt, nrows.value() as i32, ncols.value() as i32, m.as_mut_slice(),
lda, s.as_mut_slice(), u.as_mut_slice(), ldu as i32, vt.as_mut_slice(),
ldvt as i32, &mut work, lwork, &mut rwork, &mut info);
lapack_check!(info);
Some((u, s, vt))
}
);
);
*/
svd_impl!(f32, interface::sgesdd);
svd_impl!(f64, interface::dgesdd);
// svd_complex_impl!(lapack_svd_complex_f32, f32, interface::cgesvd);
// svd_complex_impl!(lapack_svd_complex_f64, f64, interface::zgesvd);

View File

@ -0,0 +1,9 @@
#[macro_use]
extern crate quickcheck;
#[macro_use]
extern crate approx;
extern crate nalgebra as na;
extern crate nalgebra_lapack as nl;
mod linalg;

View File

@ -0,0 +1,101 @@
use std::cmp;
use nl::Cholesky;
use na::{DMatrix, DVector, Vector4, Matrix3, Matrix4x3, Matrix4};
quickcheck!{
fn cholesky(m: DMatrix<f64>) -> bool {
if m.len() != 0 {
let m = &m * m.transpose();
if let Some(chol) = Cholesky::new(m.clone()) {
let l = chol.unpack();
let reconstructed_m = &l * l.transpose();
return relative_eq!(reconstructed_m, m, epsilon = 1.0e-7)
}
}
return true
}
fn cholesky_static(m: Matrix3<f64>) -> bool {
let m = &m * m.transpose();
if let Some(chol) = Cholesky::new(m) {
let l = chol.unpack();
let reconstructed_m = &l * l.transpose();
relative_eq!(reconstructed_m, m, epsilon = 1.0e-7)
}
else {
false
}
}
fn cholesky_solve(n: usize, nb: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 15); // To avoid slowing down the test too much.
let nb = cmp::min(nb, 15); // To avoid slowing down the test too much.
let m = DMatrix::<f64>::new_random(n, n);
let m = &m * m.transpose();
if let Some(chol) = Cholesky::new(m.clone()) {
let b1 = DVector::new_random(n);
let b2 = DMatrix::new_random(n, nb);
let sol1 = chol.solve(b1.clone()).unwrap();
let sol2 = chol.solve(b2.clone()).unwrap();
return relative_eq!(&m * sol1, b1, epsilon = 1.0e-6) &&
relative_eq!(&m * sol2, b2, epsilon = 1.0e-6)
}
}
return true;
}
fn cholesky_solve_static(m: Matrix4<f64>) -> bool {
let m = &m * m.transpose();
match Cholesky::new(m) {
Some(chol) => {
let b1 = Vector4::new_random();
let b2 = Matrix4x3::new_random();
let sol1 = chol.solve(b1).unwrap();
let sol2 = chol.solve(b2).unwrap();
relative_eq!(m * sol1, b1, epsilon = 1.0e-7) &&
relative_eq!(m * sol2, b2, epsilon = 1.0e-7)
},
None => true
}
}
fn cholesky_inverse(n: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 15); // To avoid slowing down the test too much.
let m = DMatrix::<f64>::new_random(n, n);
let m = &m * m.transpose();
if let Some(m1) = Cholesky::new(m.clone()).unwrap().inverse() {
let id1 = &m * &m1;
let id2 = &m1 * &m;
return id1.is_identity(1.0e-6) && id2.is_identity(1.0e-6);
}
}
return true;
}
fn cholesky_inverse_static(m: Matrix4<f64>) -> bool {
let m = m * m.transpose();
match Cholesky::new(m.clone()).unwrap().inverse() {
Some(m1) => {
let id1 = &m * &m1;
let id2 = &m1 * &m;
id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5)
},
None => true
}
}
}

View File

@ -0,0 +1,38 @@
use std::cmp;
use nl::Hessenberg;
use na::{DMatrix, Matrix4};
quickcheck!{
fn hessenberg(n: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 25);
let m = DMatrix::<f64>::new_random(n, n);
match Hessenberg::new(m.clone()) {
Some(hess) => {
let h = hess.h();
let p = hess.p();
relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)
},
None => true
}
}
else {
true
}
}
fn hessenberg_static(m: Matrix4<f64>) -> bool {
match Hessenberg::new(m) {
Some(hess) => {
let h = hess.h();
let p = hess.p();
relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)
},
None => true
}
}
}

View File

@ -0,0 +1,107 @@
use std::cmp;
use nl::LU;
use na::{DMatrix, DVector, Matrix4, Matrix4x3, Matrix3x4, Vector4};
quickcheck!{
fn lup(m: DMatrix<f64>) -> bool {
if m.len() != 0 {
let lup = LU::new(m.clone());
let l = lup.l();
let u = lup.u();
let mut computed1 = &l * &u;
lup.permute(&mut computed1);
let computed2 = lup.p() * l * u;
relative_eq!(computed1, m, epsilon = 1.0e-7) &&
relative_eq!(computed2, m, epsilon = 1.0e-7)
}
else {
true
}
}
fn lu_static(m: Matrix3x4<f64>) -> bool {
let lup = LU::new(m);
let l = lup.l();
let u = lup.u();
let mut computed1 = l * u;
lup.permute(&mut computed1);
let computed2 = lup.p() * l * u;
relative_eq!(computed1, m, epsilon = 1.0e-7) &&
relative_eq!(computed2, m, epsilon = 1.0e-7)
}
fn lu_solve(n: usize, nb: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 25); // To avoid slowing down the test too much.
let nb = cmp::min(nb, 25); // To avoid slowing down the test too much.
let m = DMatrix::<f64>::new_random(n, n);
let lup = LU::new(m.clone());
let b1 = DVector::new_random(n);
let b2 = DMatrix::new_random(n, nb);
let sol1 = lup.solve(b1.clone()).unwrap();
let sol2 = lup.solve(b2.clone()).unwrap();
let tr_sol1 = lup.solve_transpose(b1.clone()).unwrap();
let tr_sol2 = lup.solve_transpose(b2.clone()).unwrap();
relative_eq!(&m * sol1, b1, epsilon = 1.0e-7) &&
relative_eq!(&m * sol2, b2, epsilon = 1.0e-7) &&
relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7) &&
relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7)
}
else {
true
}
}
fn lu_solve_static(m: Matrix4<f64>) -> bool {
let lup = LU::new(m);
let b1 = Vector4::new_random();
let b2 = Matrix4x3::new_random();
let sol1 = lup.solve(b1).unwrap();
let sol2 = lup.solve(b2).unwrap();
let tr_sol1 = lup.solve_transpose(b1).unwrap();
let tr_sol2 = lup.solve_transpose(b2).unwrap();
relative_eq!(m * sol1, b1, epsilon = 1.0e-7) &&
relative_eq!(m * sol2, b2, epsilon = 1.0e-7) &&
relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7) &&
relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7)
}
fn lu_inverse(n: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 25); // To avoid slowing down the test too much.
let m = DMatrix::<f64>::new_random(n, n);
if let Some(m1) = LU::new(m.clone()).inverse() {
let id1 = &m * &m1;
let id2 = &m1 * &m;
return id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7);
}
}
return true;
}
fn lu_inverse_static(m: Matrix4<f64>) -> bool {
match LU::new(m.clone()).inverse() {
Some(m1) => {
let id1 = &m * &m1;
let id2 = &m1 * &m;
id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5)
},
None => true
}
}
}

View File

@ -0,0 +1,5 @@
mod real_eigensystem;
mod cholesky;
mod lu;
mod qr;
mod svd;

View File

@ -0,0 +1,20 @@
use nl::QR;
use na::{DMatrix, Matrix4x3};
quickcheck!{
fn qr(m: DMatrix<f64>) -> bool {
let qr = QR::new(m.clone());
let q = qr.q();
let r = qr.r();
relative_eq!(m, q * r, epsilon = 1.0e-7)
}
fn qr_static(m: Matrix4x3<f64>) -> bool {
let qr = QR::new(m);
let q = qr.q();
let r = qr.r();
relative_eq!(m, q * r, epsilon = 1.0e-7)
}
}

View File

@ -0,0 +1,48 @@
use std::cmp;
use nl::RealEigensystem;
use na::{DMatrix, Matrix4};
quickcheck!{
fn eigensystem(n: usize) -> bool {
if n != 0 {
let n = cmp::min(n, 25);
let m = DMatrix::<f64>::new_random(n, n);
match RealEigensystem::new(m.clone(), true, true) {
Some(eig) => {
let eigvals = DMatrix::from_diagonal(&eig.eigenvalues);
let transformed_eigvectors = &m * eig.eigenvectors.as_ref().unwrap();
let scaled_eigvectors = eig.eigenvectors.as_ref().unwrap() * &eigvals;
let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.as_ref().unwrap();
let scaled_left_eigvectors = eig.left_eigenvectors.as_ref().unwrap() * &eigvals;
relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7) &&
relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7)
},
None => true
}
}
else {
true
}
}
fn eigensystem_static(m: Matrix4<f64>) -> bool {
match RealEigensystem::new(m, true, true) {
Some(eig) => {
let eigvals = Matrix4::from_diagonal(&eig.eigenvalues);
let transformed_eigvectors = m * eig.eigenvectors.unwrap();
let scaled_eigvectors = eig.eigenvectors.unwrap() * eigvals;
let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.unwrap();
let scaled_left_eigvectors = eig.left_eigenvectors.unwrap() * eigvals;
relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7) &&
relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7)
},
None => true
}
}
}

View File

@ -0,0 +1,57 @@
use nl::SVD;
use na::{DMatrix, Matrix3x4};
quickcheck!{
fn svd(m: DMatrix<f64>) -> bool {
if m.nrows() != 0 && m.ncols() != 0 {
let svd = SVD::new(m.clone()).unwrap();
let sm = DMatrix::from_partial_diagonal(m.nrows(), m.ncols(), svd.s.as_slice());
let reconstructed_m = &svd.u * sm * &svd.vt;
let reconstructed_m2 = svd.matrix();
relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) &&
relative_eq!(reconstructed_m2, reconstructed_m, epsilon = 1.0e-7)
}
else {
true
}
}
fn svd_static(m: Matrix3x4<f64>) -> bool {
let svd = SVD::new(m).unwrap();
let sm = Matrix3x4::from_partial_diagonal(svd.s.as_slice());
let reconstructed_m = &svd.u * &sm * &svd.vt;
let reconstructed_m2 = svd.matrix();
relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) &&
relative_eq!(reconstructed_m2, m, epsilon = 1.0e-7)
}
fn pseudo_inverse(m: DMatrix<f64>) -> bool {
if m.nrows() == 0 || m.ncols() == 0 {
return true;
}
let svd = SVD::new(m.clone()).unwrap();
let im = svd.pseudo_inverse(1.0e-7);
if m.nrows() <= m.ncols() {
return (&m * &im).is_identity(1.0e-7)
}
if m.nrows() >= m.ncols() {
return (im * m).is_identity(1.0e-7)
}
return true;
}
fn pseudo_inverse_static(m: Matrix3x4<f64>) -> bool {
let svd = SVD::new(m).unwrap();
let im = svd.pseudo_inverse(1.0e-7);
(m * im).is_identity(1.0e-7)
}
}