Merge pull request #535 from rustsim/sparse

Limited, experimental, sparse matrix support
This commit is contained in:
Sébastien Crozet 2019-02-03 16:54:30 +01:00 committed by GitHub
commit 8117f61c1c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 2874 additions and 344 deletions

View File

@ -23,8 +23,10 @@ stdweb = [ "rand/stdweb" ]
arbitrary = [ "quickcheck" ] arbitrary = [ "quickcheck" ]
serde-serialize = [ "serde", "serde_derive", "num-complex/serde" ] serde-serialize = [ "serde", "serde_derive", "num-complex/serde" ]
abomonation-serialize = [ "abomonation" ] abomonation-serialize = [ "abomonation" ]
sparse = [ ]
debug = [ ] debug = [ ]
alloc = [ ] alloc = [ ]
io = [ "pest", "pest_derive" ]
[dependencies] [dependencies]
typenum = "1.10" typenum = "1.10"
@ -33,13 +35,15 @@ rand = { version = "0.6", default-features = false }
num-traits = { version = "0.2", default-features = false } num-traits = { version = "0.2", default-features = false }
num-complex = { version = "0.2", default-features = false } num-complex = { version = "0.2", default-features = false }
approx = { version = "0.3", default-features = false } approx = { version = "0.3", default-features = false }
alga = { version = "0.7", default-features = false } alga = { version = "0.8", default-features = false }
matrixmultiply = { version = "0.2", optional = true } matrixmultiply = { version = "0.2", optional = true }
serde = { version = "1.0", optional = true } serde = { version = "1.0", optional = true }
serde_derive = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true }
abomonation = { version = "0.7", optional = true } abomonation = { version = "0.7", optional = true }
mint = { version = "0.5", optional = true } mint = { version = "0.5", optional = true }
quickcheck = { version = "0.7", optional = true } quickcheck = { version = "0.8", optional = true }
pest = { version = "2.0", optional = true }
pest_derive = { version = "2.0", optional = true }
[dev-dependencies] [dev-dependencies]
serde_json = "1.0" serde_json = "1.0"

View File

@ -11,7 +11,7 @@ if [ -z "$NO_STD" ]; then
cargo build --verbose -p nalgebra --features "serde-serialize"; cargo build --verbose -p nalgebra --features "serde-serialize";
cargo build --verbose -p nalgebra --features "abomonation-serialize"; cargo build --verbose -p nalgebra --features "abomonation-serialize";
cargo build --verbose -p nalgebra --features "debug"; cargo build --verbose -p nalgebra --features "debug";
cargo build --verbose -p nalgebra --features "debug arbitrary mint serde-serialize abomonation-serialize"; cargo build --verbose -p nalgebra --all-features
else else
cargo build -p nalgebra-lapack; cargo build -p nalgebra-lapack;
fi fi

View File

@ -51,8 +51,8 @@ fn main() {
// Components listed column-by-column. // Components listed column-by-column.
1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
] ]
.iter() .iter()
.cloned(), .cloned(),
); );
assert_eq!(dm, dm1); assert_eq!(dm, dm1);

View File

@ -23,5 +23,5 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
[dependencies] [dependencies]
num-traits = { version = "0.2", default-features = false } num-traits = { version = "0.2", default-features = false }
approx = { version = "0.3", default-features = false } approx = { version = "0.3", default-features = false }
alga = { version = "0.7", default-features = false } alga = { version = "0.8", default-features = false }
nalgebra = { path = "..", version = "^0.16.13", default-features = false } nalgebra = { path = "..", version = "^0.16.13", default-features = false }

View File

@ -9,7 +9,7 @@ where DefaultAllocator: Alloc<N, D, D> {
TMat::<N, D, D>::identity() TMat::<N, D, D>::identity()
} }
/// Build a right hand look at view matrix /// Build a look at view matrix based on the right handedness.
/// ///
/// # Parameters: /// # Parameters:
/// ///

View File

@ -110,6 +110,7 @@
and keep in mind it is possible to convert, e.g., an `Isometry3` to a `Mat4` and vice-versa (see the [conversions section](#conversions)). and keep in mind it is possible to convert, e.g., an `Isometry3` to a `Mat4` and vice-versa (see the [conversions section](#conversions)).
*/ */
#![doc(html_favicon_url = "http://nalgebra.org/img/favicon.ico")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
extern crate num_traits as num; extern crate num_traits as num;

View File

@ -25,7 +25,7 @@ intel-mkl = ["lapack-src/intel-mkl"]
nalgebra = { version = "0.16", path = ".." } nalgebra = { version = "0.16", path = ".." }
num-traits = "0.2" num-traits = "0.2"
num-complex = { version = "0.2", default-features = false } num-complex = { version = "0.2", default-features = false }
alga = { version = "0.7", default-features = false } alga = { version = "0.8", default-features = false }
serde = { version = "1.0", optional = true } serde = { version = "1.0", optional = true }
serde_derive = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true }
lapack = { version = "0.16", default-features = false } lapack = { version = "0.16", default-features = false }
@ -34,6 +34,6 @@ lapack-src = { version = "0.2", default-features = false }
[dev-dependencies] [dev-dependencies]
nalgebra = { version = "0.16", path = "..", features = [ "arbitrary" ] } nalgebra = { version = "0.16", path = "..", features = [ "arbitrary" ] }
quickcheck = "0.7" quickcheck = "0.8"
approx = "0.3" approx = "0.3"
rand = "0.6" rand = "0.6"

View File

@ -68,8 +68,10 @@
#![deny(unused_qualifications)] #![deny(unused_qualifications)]
#![deny(unused_results)] #![deny(unused_results)]
#![deny(missing_docs)] #![deny(missing_docs)]
#![doc(html_favicon_url = "http://nalgebra.org/img/favicon.ico", #![doc(
html_root_url = "http://nalgebra.org/rustdoc")] html_favicon_url = "http://nalgebra.org/img/favicon.ico",
html_root_url = "http://nalgebra.org/rustdoc"
)]
extern crate alga; extern crate alga;
extern crate lapack; extern crate lapack;

View File

@ -1,4 +1,4 @@
// Non-conventional componentwise operators. // Non-conventional component-wise operators.
use num::{Signed, Zero}; use num::{Signed, Zero};
use std::ops::{Add, Mul}; use std::ops::{Add, Mul};

View File

@ -12,8 +12,10 @@ use typenum::Prod;
use base::allocator::{Allocator, SameShapeAllocator}; use base::allocator::{Allocator, SameShapeAllocator};
use base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use base::dimension::{ use base::dimension::{
Dim, DimName, Dynamic, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9, Dim, DimName, U1, U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9,
}; };
#[cfg(any(feature = "std", feature = "alloc"))]
use base::dimension::Dynamic;
use base::iter::{MatrixIter, MatrixIterMut}; use base::iter::{MatrixIter, MatrixIterMut};
use base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut}; use base::storage::{ContiguousStorage, ContiguousStorageMut, Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))] #[cfg(any(feature = "std", feature = "alloc"))]

View File

@ -364,7 +364,8 @@ impl<
G: Bit + Any + Debug + Copy + PartialEq + Send + Sync, G: Bit + Any + Debug + Copy + PartialEq + Send + Sync,
> IsNotStaticOne > IsNotStaticOne
for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G> for UInt<UInt<UInt<UInt<UInt<UInt<UInt<UInt<UTerm, B1>, A>, B>, C>, D>, E>, F>, G>
{} {
}
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send + Sync> NamedDim impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send + Sync> NamedDim
for UInt<U, B> for UInt<U, B>
@ -405,4 +406,5 @@ impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send + Syn
impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send + Sync> IsNotStaticOne impl<U: Unsigned + DimName, B: Bit + Any + Debug + Copy + PartialEq + Send + Sync> IsNotStaticOne
for UInt<U, B> for UInt<U, B>
{} {
}

View File

@ -1,13 +1,18 @@
use num::{One, Zero}; use num::{One, Zero};
use std::cmp; use std::cmp;
use std::ptr; use std::ptr;
#[cfg(any(feature = "std", feature = "alloc"))]
use std::iter::ExactSizeIterator; use std::iter::ExactSizeIterator;
#[cfg(any(feature = "std", feature = "alloc"))]
use std::mem;
use base::allocator::{Allocator, Reallocator}; use base::allocator::{Allocator, Reallocator};
use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
use base::dimension::{ use base::dimension::{
Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, Dynamic, U1, Dim, DimAdd, DimDiff, DimMin, DimMinimum, DimName, DimSub, DimSum, U1,
}; };
#[cfg(any(feature = "std", feature = "alloc"))]
use base::dimension::Dynamic;
use base::storage::{Storage, StorageMut}; use base::storage::{Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))] #[cfg(any(feature = "std", feature = "alloc"))]
use base::DMatrix; use base::DMatrix;
@ -35,6 +40,7 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
/// Creates a new matrix by extracting the given set of rows from `self`. /// Creates a new matrix by extracting the given set of rows from `self`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn select_rows<'a, I>(&self, irows: I) -> MatrixMN<N, Dynamic, C> pub fn select_rows<'a, I>(&self, irows: I) -> MatrixMN<N, Dynamic, C>
where I: IntoIterator<Item = &'a usize>, where I: IntoIterator<Item = &'a usize>,
I::IntoIter: ExactSizeIterator + Clone, I::IntoIter: ExactSizeIterator + Clone,
@ -65,6 +71,7 @@ impl<N: Scalar + Zero, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
/// Creates a new matrix by extracting the given set of columns from `self`. /// Creates a new matrix by extracting the given set of columns from `self`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn select_columns<'a, I>(&self, icols: I) -> MatrixMN<N, R, Dynamic> pub fn select_columns<'a, I>(&self, icols: I) -> MatrixMN<N, R, Dynamic>
where I: IntoIterator<Item = &'a usize>, where I: IntoIterator<Item = &'a usize>,
I::IntoIter: ExactSizeIterator, I::IntoIter: ExactSizeIterator,
@ -295,6 +302,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included). /// Removes `n` consecutive columns from this matrix, starting with the `i`-th (included).
#[inline] #[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN<N, R, Dynamic> pub fn remove_columns(self, i: usize, n: usize) -> MatrixMN<N, R, Dynamic>
where where
C: DimSub<Dynamic, Output = Dynamic>, C: DimSub<Dynamic, Output = Dynamic>,
@ -377,6 +385,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included). /// Removes `n` consecutive rows from this matrix, starting with the `i`-th (included).
#[inline] #[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN<N, Dynamic, C> pub fn remove_rows(self, i: usize, n: usize) -> MatrixMN<N, Dynamic, C>
where where
R: DimSub<Dynamic, Output = Dynamic>, R: DimSub<Dynamic, Output = Dynamic>,
@ -454,6 +463,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts `n` columns filled with `val` starting at the `i-th` position. /// Inserts `n` columns filled with `val` starting at the `i-th` position.
#[inline] #[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN<N, R, Dynamic> pub fn insert_columns(self, i: usize, n: usize, val: N) -> MatrixMN<N, R, Dynamic>
where where
C: DimAdd<Dynamic, Output = Dynamic>, C: DimAdd<Dynamic, Output = Dynamic>,
@ -531,6 +541,7 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Inserts `n` rows filled with `val` starting at the `i-th` position. /// Inserts `n` rows filled with `val` starting at the `i-th` position.
#[inline] #[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN<N, Dynamic, C> pub fn insert_rows(self, i: usize, n: usize, val: N) -> MatrixMN<N, Dynamic, C>
where where
R: DimAdd<Dynamic, Output = Dynamic>, R: DimAdd<Dynamic, Output = Dynamic>,
@ -596,6 +607,29 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val) self.resize_generic(Dynamic::new(new_nrows), Dynamic::new(new_ncols), val)
} }
/// Resizes this matrix vertically, i.e., so that it contains `new_nrows` rows while keeping the same number of columns.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows than `self`, then the extra rows are filled with `val`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_vertically(self, new_nrows: usize, val: N) -> MatrixMN<N, Dynamic, C>
where DefaultAllocator: Reallocator<N, R, C, Dynamic, C> {
let ncols = self.data.shape().1;
self.resize_generic(Dynamic::new(new_nrows), ncols, val)
}
/// Resizes this matrix horizontally, i.e., so that it contains `new_ncolumns` columns while keeping the same number of columns.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// columns than `self`, then the extra columns are filled with `val`.
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_horizontally(self, new_ncols: usize, val: N) -> MatrixMN<N, R, Dynamic>
where DefaultAllocator: Reallocator<N, R, C, R, Dynamic> {
let nrows = self.data.shape().0;
self.resize_generic(nrows, Dynamic::new(new_ncols), val)
}
/// Resizes this matrix so that it contains `R2::value()` rows and `C2::value()` columns. /// Resizes this matrix so that it contains `R2::value()` rows and `C2::value()` columns.
/// ///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more /// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
@ -673,6 +707,61 @@ impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
} }
} }
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar> DMatrix<N> {
/// Resizes this matrix in-place.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows and/or columns than `self`, then the extra rows or columns are filled with `val`.
///
/// Defined only for owned fully-dynamic matrices, i.e., `DMatrix`.
pub fn resize_mut(&mut self, new_nrows: usize, new_ncols: usize, val: N)
where DefaultAllocator: Reallocator<N, Dynamic, Dynamic, Dynamic, Dynamic> {
let placeholder = unsafe { Self::new_uninitialized(0, 0) };
let old = mem::replace(self, placeholder);
let new = old.resize(new_nrows, new_ncols, val);
let _ = mem::replace(self, new);
}
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, C: Dim> MatrixMN<N, Dynamic, C>
where DefaultAllocator: Allocator<N, Dynamic, C> {
/// Changes the number of rows of this matrix in-place.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// rows than `self`, then the extra rows are filled with `val`.
///
/// Defined only for owned matrices with a dynamic number of rows (for example, `DVector`).
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_vertically_mut(&mut self, new_nrows: usize, val: N)
where DefaultAllocator: Reallocator<N, Dynamic, C, Dynamic, C> {
let placeholder = unsafe { Self::new_uninitialized_generic(Dynamic::new(0), self.data.shape().1) };
let old = mem::replace(self, placeholder);
let new = old.resize_vertically(new_nrows, val);
let _ = mem::replace(self, new);
}
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N: Scalar, R: Dim> MatrixMN<N, R, Dynamic>
where DefaultAllocator: Allocator<N, R, Dynamic> {
/// Changes the number of column of this matrix in-place.
///
/// The values are copied such that `self[(i, j)] == result[(i, j)]`. If the result has more
/// columns than `self`, then the extra columns are filled with `val`.
///
/// Defined only for owned matrices with a dynamic number of columns (for example, `DVector`).
#[cfg(any(feature = "std", feature = "alloc"))]
pub fn resize_horizontally_mut(&mut self, new_ncols: usize, val: N)
where DefaultAllocator: Reallocator<N, R, Dynamic, R, Dynamic> {
let placeholder = unsafe { Self::new_uninitialized_generic(self.data.shape().0, Dynamic::new(0)) };
let old = mem::replace(self, placeholder);
let new = old.resize_horizontally(new_ncols, val);
let _ = mem::replace(self, new);
}
}
unsafe fn compress_rows<N: Scalar>( unsafe fn compress_rows<N: Scalar>(
data: &mut [N], data: &mut [N],
nrows: usize, nrows: usize,
@ -753,6 +842,7 @@ unsafe fn extend_rows<N: Scalar>(
/// Extend the number of columns of the `Matrix` with elements from /// Extend the number of columns of the `Matrix` with elements from
/// a given iterator. /// a given iterator.
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N, R, S> Extend<N> for Matrix<N, R, Dynamic, S> impl<N, R, S> Extend<N> for Matrix<N, R, Dynamic, S>
where where
N: Scalar, N: Scalar,
@ -800,6 +890,7 @@ where
/// Extend the number of rows of the `Vector` with elements from /// Extend the number of rows of the `Vector` with elements from
/// a given iterator. /// a given iterator.
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N, S> Extend<N> for Matrix<N, Dynamic, U1, S> impl<N, S> Extend<N> for Matrix<N, Dynamic, U1, S>
where where
N: Scalar, N: Scalar,
@ -820,6 +911,7 @@ where
} }
} }
#[cfg(any(feature = "std", feature = "alloc"))]
impl<N, R, S, RV, SV> Extend<Vector<N, RV, SV>> for Matrix<N, R, Dynamic, S> impl<N, R, S, RV, SV> Extend<Vector<N, RV, SV>> for Matrix<N, R, Dynamic, S>
where where
N: Scalar, N: Scalar,

View File

@ -6,7 +6,7 @@ use num::{One, Zero};
use alga::general::{ use alga::general::{
AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, ClosedAdd, ClosedMul,
ClosedNeg, Field, Identity, Inverse, JoinSemilattice, Lattice, MeetSemilattice, Module, ClosedNeg, Field, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice, Module,
Multiplicative, Real, RingCommutative, Multiplicative, Real, RingCommutative,
}; };
use alga::linear::{ use alga::linear::{
@ -45,18 +45,18 @@ where
} }
} }
impl<N, R: DimName, C: DimName> Inverse<Additive> for MatrixMN<N, R, C> impl<N, R: DimName, C: DimName> TwoSidedInverse<Additive> for MatrixMN<N, R, C>
where where
N: Scalar + ClosedNeg, N: Scalar + ClosedNeg,
DefaultAllocator: Allocator<N, R, C>, DefaultAllocator: Allocator<N, R, C>,
{ {
#[inline] #[inline]
fn inverse(&self) -> MatrixMN<N, R, C> { fn two_sided_inverse(&self) -> MatrixMN<N, R, C> {
-self -self
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
*self = -self.clone() *self = -self.clone()
} }
} }

View File

@ -4,9 +4,9 @@ use std::slice;
use base::allocator::Allocator; use base::allocator::Allocator;
use base::default_allocator::DefaultAllocator; use base::default_allocator::DefaultAllocator;
use base::dimension::{Dim, DimName, Dynamic, U1}; use base::dimension::{Dim, DimName, Dynamic, U1, IsNotStaticOne};
use base::iter::MatrixIter; use base::iter::MatrixIter;
use base::storage::{Owned, Storage, StorageMut}; use base::storage::{Owned, Storage, StorageMut, ContiguousStorage, ContiguousStorageMut};
use base::{Matrix, Scalar}; use base::{Matrix, Scalar};
macro_rules! slice_storage_impl( macro_rules! slice_storage_impl(
@ -91,7 +91,8 @@ slice_storage_impl!("A mutable matrix data storage for mutable matrix slice. Onl
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Copy
for SliceStorage<'a, N, R, C, RStride, CStride> for SliceStorage<'a, N, R, C, RStride, CStride>
{} {
}
impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> Clone
for SliceStorage<'a, N, R, C, RStride, CStride> for SliceStorage<'a, N, R, C, RStride, CStride>
@ -146,8 +147,6 @@ macro_rules! storage_impl(
} }
} }
#[inline] #[inline]
fn into_owned(self) -> Owned<N, R, C> fn into_owned(self) -> Owned<N, R, C>
where DefaultAllocator: Allocator<N, R, C> { where DefaultAllocator: Allocator<N, R, C> {
@ -199,6 +198,14 @@ unsafe impl<'a, N: Scalar, R: Dim, C: Dim, RStride: Dim, CStride: Dim> StorageMu
} }
} }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorage<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorage<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: Dim, CStride: Dim> ContiguousStorageMut<N, R, U1> for SliceStorageMut<'a, N, R, U1, U1, CStride> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorage<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorage<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
unsafe impl<'a, N: Scalar, R: DimName, C: Dim + IsNotStaticOne> ContiguousStorageMut<N, R, C> for SliceStorageMut<'a, N, R, C, U1, R> { }
impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
#[inline] #[inline]
fn assert_slice_index( fn assert_slice_index(
@ -859,3 +866,25 @@ impl<N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> Matrix<N, R, C, S> {
self.slice_range_mut(.., cols) self.slice_range_mut(.., cols)
} }
} }
impl<'a, N, R, C, RStride, CStride> From<MatrixSliceMut<'a, N, R, C, RStride, CStride>>
for MatrixSlice<'a, N, R, C, RStride, CStride>
where
N: Scalar,
R: Dim,
C: Dim,
RStride: Dim,
CStride: Dim,
{
fn from(slice_mut: MatrixSliceMut<'a, N, R, C, RStride, CStride>) -> Self {
let data = SliceStorage {
ptr: slice_mut.data.ptr,
shape: slice_mut.data.shape,
strides: slice_mut.data.strides,
_phantoms: PhantomData,
};
unsafe { Matrix::from_data_statically_unchecked(data) }
}
}

View File

@ -761,7 +761,7 @@ where
} }
impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> { impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S> {
/// Returns the absolute value of the coefficient with the largest absolute value. /// Returns the absolute value of the component with the largest absolute value.
#[inline] #[inline]
pub fn amax(&self) -> N { pub fn amax(&self) -> N {
let mut max = N::zero(); let mut max = N::zero();
@ -777,7 +777,7 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
max max
} }
/// Returns the absolute value of the coefficient with the smallest absolute value. /// Returns the absolute value of the component with the smallest absolute value.
#[inline] #[inline]
pub fn amin(&self) -> N { pub fn amin(&self) -> N {
let mut it = self.iter(); let mut it = self.iter();
@ -796,4 +796,42 @@ impl<N: Scalar + PartialOrd + Signed, R: Dim, C: Dim, S: Storage<N, R, C>> Matri
min min
} }
/// Returns the component with the largest value.
#[inline]
pub fn max(&self) -> N {
let mut it = self.iter();
let mut max = it
.next()
.expect("max: empty matrices not supported.");
for e in it {
let ae = e;
if ae > max {
max = ae;
}
}
*max
}
/// Returns the component with the smallest value.
#[inline]
pub fn min(&self) -> N {
let mut it = self.iter();
let mut min = it
.next()
.expect("min: empty matrices not supported.");
for e in it {
let ae = e;
if ae < min {
min = ae;
}
}
*min
}
} }

View File

@ -166,7 +166,7 @@ where DefaultAllocator: Allocator<N, D>
/// ``` /// ```
#[inline] #[inline]
pub fn inverse_mut(&mut self) { pub fn inverse_mut(&mut self) {
self.rotation.inverse_mut(); self.rotation.two_sided_inverse_mut();
self.translation.inverse_mut(); self.translation.inverse_mut();
self.translation.vector = self.rotation.transform_vector(&self.translation.vector); self.translation.vector = self.rotation.transform_vector(&self.translation.vector);
} }

View File

@ -1,6 +1,6 @@
use alga::general::{ use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real,
}; };
use alga::linear::Isometry as AlgaIsometry; use alga::linear::Isometry as AlgaIsometry;
use alga::linear::{ use alga::linear::{
@ -30,18 +30,18 @@ where
} }
} }
impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Isometry<N, D, R> impl<N: Real, D: DimName, R> TwoSidedInverse<Multiplicative> for Isometry<N, D, R>
where where
R: Rotation<Point<N, D>>, R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
{ {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.inverse() self.inverse()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.inverse_mut() self.inverse_mut()
} }
} }

View File

@ -16,7 +16,7 @@ use base::{DefaultAllocator, Vector2, Vector3};
use geometry::{ use geometry::{
Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex, Isometry, Point, Point3, Rotation, Rotation2, Rotation3, Translation, UnitComplex,
UnitQuaternion, UnitQuaternion, Translation2, Translation3
}; };
impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R> impl<N: Real, D: DimName, R: AlgaRotation<Point<N, D>>> Isometry<N, D, R>
@ -129,6 +129,18 @@ impl<N: Real> Isometry<N, U2, Rotation2<N>> {
Rotation::<N, U2>::new(angle), Rotation::<N, U2>::new(angle),
) )
} }
/// Creates a new isometry from the given translation coordinates.
#[inline]
pub fn translation(x: N, y: N) -> Self {
Self::new(Vector2::new(x, y), N::zero())
}
/// Creates a new isometry from the given rotation angle.
#[inline]
pub fn rotation(angle: N) -> Self {
Self::new(Vector2::zeros(), angle)
}
} }
impl<N: Real> Isometry<N, U2, UnitComplex<N>> { impl<N: Real> Isometry<N, U2, UnitComplex<N>> {
@ -152,6 +164,18 @@ impl<N: Real> Isometry<N, U2, UnitComplex<N>> {
UnitComplex::from_angle(angle), UnitComplex::from_angle(angle),
) )
} }
/// Creates a new isometry from the given translation coordinates.
#[inline]
pub fn translation(x: N, y: N) -> Self {
Self::from_parts(Translation2::new(x, y), UnitComplex::identity())
}
/// Creates a new isometry from the given rotation angle.
#[inline]
pub fn rotation(angle: N) -> Self {
Self::new(Vector2::zeros(), angle)
}
} }
// 3D rotation. // 3D rotation.
@ -189,6 +213,18 @@ macro_rules! isometry_construction_impl(
$RotId::<$($RotParams),*>::from_scaled_axis(axisangle)) $RotId::<$($RotParams),*>::from_scaled_axis(axisangle))
} }
/// Creates a new isometry from the given translation coordinates.
#[inline]
pub fn translation(x: N, y: N, z: N) -> Self {
Self::from_parts(Translation3::new(x, y, z), $RotId::identity())
}
/// Creates a new isometry from the given rotation angle.
#[inline]
pub fn rotation(axisangle: Vector3<N>) -> Self {
Self::new(Vector3::zeros(), axisangle)
}
/// Creates an isometry that corresponds to the local frame of an observer standing at the /// Creates an isometry that corresponds to the local frame of an observer standing at the
/// point `eye` and looking toward `target`. /// point `eye` and looking toward `target`.
/// ///

View File

@ -200,8 +200,8 @@ isometry_binop_assign_impl_all!(
DivAssign, div_assign; DivAssign, div_assign;
self: Isometry<N, D, R>, rhs: R; self: Isometry<N, D, R>, rhs: R;
// FIXME: don't invert explicitly? // FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse(); [val] => *self *= rhs.two_sided_inverse();
[ref] => *self *= rhs.inverse(); [ref] => *self *= rhs.two_sided_inverse();
); );
// Isometry × R // Isometry × R

View File

@ -37,6 +37,7 @@ mod translation_alga;
mod translation_alias; mod translation_alias;
mod translation_construction; mod translation_construction;
mod translation_conversion; mod translation_conversion;
mod translation_coordinates;
mod translation_ops; mod translation_ops;
mod isometry; mod isometry;

View File

@ -2,7 +2,7 @@ use num::Zero;
use alga::general::{ use alga::general::{
AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule,
AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, Id, Identity, Inverse, Module, AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, Id, Identity, TwoSidedInverse, Module,
Multiplicative, Real, Multiplicative, Real,
}; };
use alga::linear::{ use alga::linear::{
@ -42,9 +42,9 @@ impl<N: Real> AbstractMagma<Additive> for Quaternion<N> {
} }
} }
impl<N: Real> Inverse<Additive> for Quaternion<N> { impl<N: Real> TwoSidedInverse<Additive> for Quaternion<N> {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
-self -self
} }
} }
@ -173,14 +173,14 @@ impl<N: Real> AbstractMagma<Multiplicative> for UnitQuaternion<N> {
} }
} }
impl<N: Real> Inverse<Multiplicative> for UnitQuaternion<N> { impl<N: Real> TwoSidedInverse<Multiplicative> for UnitQuaternion<N> {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.inverse() self.inverse()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.inverse_mut() self.inverse_mut()
} }
} }

View File

@ -1,6 +1,6 @@
use alga::general::{ use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real,
}; };
use alga::linear::{ use alga::linear::{
self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, self, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation,
@ -27,16 +27,16 @@ where DefaultAllocator: Allocator<N, D, D>
} }
} }
impl<N: Real, D: DimName> Inverse<Multiplicative> for Rotation<N, D> impl<N: Real, D: DimName> TwoSidedInverse<Multiplicative> for Rotation<N, D>
where DefaultAllocator: Allocator<N, D, D> where DefaultAllocator: Allocator<N, D, D>
{ {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.transpose() self.transpose()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.transpose_mut() self.transpose_mut()
} }
} }

View File

@ -1,6 +1,6 @@
use alga::general::{ use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Identity, Inverse, Multiplicative, Real, AbstractSemigroup, Identity, TwoSidedInverse, Multiplicative, Real,
}; };
use alga::linear::Similarity as AlgaSimilarity; use alga::linear::Similarity as AlgaSimilarity;
use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation}; use alga::linear::{AffineTransformation, ProjectiveTransformation, Rotation, Transformation};
@ -27,18 +27,18 @@ where
} }
} }
impl<N: Real, D: DimName, R> Inverse<Multiplicative> for Similarity<N, D, R> impl<N: Real, D: DimName, R> TwoSidedInverse<Multiplicative> for Similarity<N, D, R>
where where
R: Rotation<Point<N, D>>, R: Rotation<Point<N, D>>,
DefaultAllocator: Allocator<N, D>, DefaultAllocator: Allocator<N, D>,
{ {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.inverse() self.inverse()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.inverse_mut() self.inverse_mut()
} }
} }

View File

@ -222,8 +222,8 @@ similarity_binop_assign_impl_all!(
DivAssign, div_assign; DivAssign, div_assign;
self: Similarity<N, D, R>, rhs: R; self: Similarity<N, D, R>, rhs: R;
// FIXME: don't invert explicitly? // FIXME: don't invert explicitly?
[val] => *self *= rhs.inverse(); [val] => *self *= rhs.two_sided_inverse();
[ref] => *self *= rhs.inverse(); [ref] => *self *= rhs.two_sided_inverse();
); );
// Similarity × R // Similarity × R

View File

@ -1,6 +1,6 @@
use alga::general::{ use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Identity, Inverse, Multiplicative, Real, AbstractSemigroup, Identity, TwoSidedInverse, Multiplicative, Real,
}; };
use alga::linear::{ProjectiveTransformation, Transformation}; use alga::linear::{ProjectiveTransformation, Transformation};
@ -26,18 +26,18 @@ where
} }
} }
impl<N: Real, D: DimNameAdd<U1>, C> Inverse<Multiplicative> for Transform<N, D, C> impl<N: Real, D: DimNameAdd<U1>, C> TwoSidedInverse<Multiplicative> for Transform<N, D, C>
where where
C: SubTCategoryOf<TProjective>, C: SubTCategoryOf<TProjective>,
DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>, DefaultAllocator: Allocator<N, DimNameSum<D, U1>, DimNameSum<D, U1>>,
{ {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.clone().inverse() self.clone().inverse()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.inverse_mut() self.inverse_mut()
} }
} }
@ -116,12 +116,12 @@ where
{ {
#[inline] #[inline]
fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> { fn inverse_transform_point(&self, pt: &Point<N, D>) -> Point<N, D> {
self.inverse() * pt self.two_sided_inverse() * pt
} }
#[inline] #[inline]
fn inverse_transform_vector(&self, v: &VectorN<N, D>) -> VectorN<N, D> { fn inverse_transform_vector(&self, v: &VectorN<N, D>) -> VectorN<N, D> {
self.inverse() * v self.two_sided_inverse() * v
} }
} }

View File

@ -1,6 +1,6 @@
use alga::general::{ use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real,
}; };
use alga::linear::Translation as AlgaTranslation; use alga::linear::Translation as AlgaTranslation;
use alga::linear::{ use alga::linear::{
@ -28,16 +28,16 @@ where DefaultAllocator: Allocator<N, D>
} }
} }
impl<N: Real, D: DimName> Inverse<Multiplicative> for Translation<N, D> impl<N: Real, D: DimName> TwoSidedInverse<Multiplicative> for Translation<N, D>
where DefaultAllocator: Allocator<N, D> where DefaultAllocator: Allocator<N, D>
{ {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.inverse() self.inverse()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.inverse_mut() self.inverse_mut()
} }
} }

View File

@ -0,0 +1,44 @@
use std::mem;
use std::ops::{Deref, DerefMut};
use base::allocator::Allocator;
use base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB};
use base::dimension::{U1, U2, U3, U4, U5, U6};
use base::{DefaultAllocator, Scalar};
use geometry::Translation;
/*
*
* Give coordinates to Translation{1 .. 6}
*
*/
macro_rules! deref_impl(
($D: ty, $Target: ident $(, $comps: ident)*) => {
impl<N: Scalar> Deref for Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
type Target = $Target<N>;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { mem::transmute(self) }
}
}
impl<N: Scalar> DerefMut for Translation<N, $D>
where DefaultAllocator: Allocator<N, $D> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { mem::transmute(self) }
}
}
}
);
deref_impl!(U1, X, x);
deref_impl!(U2, XY, x, y);
deref_impl!(U3, XYZ, x, y, z);
deref_impl!(U4, XYZW, x, y, z, w);
deref_impl!(U5, XYZWA, x, y, z, w, a);
deref_impl!(U6, XYZWAB, x, y, z, w, a, b);

View File

@ -1,6 +1,6 @@
use alga::general::{ use alga::general::{
AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup, AbstractGroup, AbstractLoop, AbstractMagma, AbstractMonoid, AbstractQuasigroup,
AbstractSemigroup, Id, Identity, Inverse, Multiplicative, Real, AbstractSemigroup, Id, Identity, TwoSidedInverse, Multiplicative, Real,
}; };
use alga::linear::{ use alga::linear::{
AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation, AffineTransformation, DirectIsometry, Isometry, OrthogonalTransformation,
@ -31,14 +31,14 @@ impl<N: Real> AbstractMagma<Multiplicative> for UnitComplex<N> {
} }
} }
impl<N: Real> Inverse<Multiplicative> for UnitComplex<N> { impl<N: Real> TwoSidedInverse<Multiplicative> for UnitComplex<N> {
#[inline] #[inline]
fn inverse(&self) -> Self { fn two_sided_inverse(&self) -> Self {
self.inverse() self.inverse()
} }
#[inline] #[inline]
fn inverse_mut(&mut self) { fn two_sided_inverse_mut(&mut self) {
self.inverse_mut() self.inverse_mut()
} }
} }

16
src/io/matrix_market.pest Normal file
View File

@ -0,0 +1,16 @@
WHITESPACE = _{ " " }
Comments = _{ "%" ~ (!NEWLINE ~ ANY)* }
Header = { "%%" ~ (!NEWLINE ~ ANY)* }
Shape = { Dimension ~ Dimension ~ Dimension }
Document = {
SOI ~
NEWLINE* ~
Header ~
(NEWLINE ~ Comments)* ~
(NEWLINE ~ Shape) ~
(NEWLINE ~ Entry?)*
}
Dimension = @{ ASCII_DIGIT+ }
Value = @{ ("+" | "-")? ~ NUMBER+ ~ ("." ~ NUMBER+)? ~ ("e" ~ ("+" | "-")? ~ NUMBER+)? }
Entry = { Dimension ~ Dimension ~ Value }

53
src/io/matrix_market.rs Normal file
View File

@ -0,0 +1,53 @@
use std::fs;
use std::path::Path;
use pest::Parser;
use sparse::CsMatrix;
use Real;
#[derive(Parser)]
#[grammar = "io/matrix_market.pest"]
struct MatrixMarketParser;
// FIXME: return an Error instead of an Option.
/// Parses a Matrix Market file at the given path, and returns the corresponding sparse matrix.
pub fn cs_matrix_from_matrix_market<N: Real, P: AsRef<Path>>(path: P) -> Option<CsMatrix<N>> {
let file = fs::read_to_string(path).ok()?;
cs_matrix_from_matrix_market_str(&file)
}
// FIXME: return an Error instead of an Option.
/// Parses a Matrix Market file described by the given string, and returns the corresponding sparse matrix.
pub fn cs_matrix_from_matrix_market_str<N: Real>(data: &str) -> Option<CsMatrix<N>> {
let file = MatrixMarketParser::parse(Rule::Document, data)
.unwrap()
.next()?;
let mut shape = (0, 0, 0);
let mut rows: Vec<usize> = Vec::new();
let mut cols: Vec<usize> = Vec::new();
let mut data: Vec<N> = Vec::new();
for line in file.into_inner() {
match line.as_rule() {
Rule::Header => {}
Rule::Shape => {
let mut inner = line.into_inner();
shape.0 = inner.next()?.as_str().parse::<usize>().ok()?;
shape.1 = inner.next()?.as_str().parse::<usize>().ok()?;
shape.2 = inner.next()?.as_str().parse::<usize>().ok()?;
}
Rule::Entry => {
let mut inner = line.into_inner();
// NOTE: indices are 1-based.
rows.push(inner.next()?.as_str().parse::<usize>().ok()? - 1);
cols.push(inner.next()?.as_str().parse::<usize>().ok()? - 1);
data.push(::convert(inner.next()?.as_str().parse::<f64>().ok()?));
}
_ => return None, // FIXME: return an Err instead.
}
}
Some(CsMatrix::from_triplet(
shape.0, shape.1, &rows, &cols, &data,
))
}

5
src/io/mod.rs Normal file
View File

@ -0,0 +1,5 @@
//! Parsers for various matrix formats.
pub use self::matrix_market::{cs_matrix_from_matrix_market, cs_matrix_from_matrix_market_str};
mod matrix_market;

View File

@ -81,10 +81,12 @@ an optimized set of tools for computer graphics and physics. Those features incl
#![deny(non_upper_case_globals)] #![deny(non_upper_case_globals)]
#![deny(unused_qualifications)] #![deny(unused_qualifications)]
#![deny(unused_results)] #![deny(unused_results)]
#![deny(missing_docs)] #![warn(missing_docs)] // FIXME: deny this
#![warn(incoherent_fundamental_impls)] #![warn(incoherent_fundamental_impls)]
#![doc(html_favicon_url = "http://nalgebra.org/img/favicon.ico", #![doc(
html_root_url = "http://nalgebra.org/rustdoc")] html_favicon_url = "http://nalgebra.org/img/favicon.ico",
html_root_url = "http://nalgebra.org/rustdoc"
)]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))] #![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))]
@ -121,11 +123,21 @@ extern crate alloc;
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
extern crate core as std; extern crate core as std;
#[cfg(feature = "io")]
extern crate pest;
#[macro_use]
#[cfg(feature = "io")]
extern crate pest_derive;
pub mod base; pub mod base;
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
pub mod debug; pub mod debug;
pub mod geometry; pub mod geometry;
#[cfg(feature = "io")]
pub mod io;
pub mod linalg; pub mod linalg;
#[cfg(feature = "sparse")]
pub mod sparse;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[deprecated( #[deprecated(
@ -135,11 +147,13 @@ pub use base as core;
pub use base::*; pub use base::*;
pub use geometry::*; pub use geometry::*;
pub use linalg::*; pub use linalg::*;
#[cfg(feature = "sparse")]
pub use sparse::*;
use std::cmp::{self, Ordering, PartialOrd}; use std::cmp::{self, Ordering, PartialOrd};
use alga::general::{ use alga::general::{
Additive, AdditiveGroup, Identity, Inverse, JoinSemilattice, Lattice, MeetSemilattice, Additive, AdditiveGroup, Identity, TwoSidedInverse, JoinSemilattice, Lattice, MeetSemilattice,
Multiplicative, SupersetOf, Multiplicative, SupersetOf,
}; };
use alga::linear::SquareMatrix as AlgaSquareMatrix; use alga::linear::SquareMatrix as AlgaSquareMatrix;
@ -413,8 +427,8 @@ pub fn try_inverse<M: AlgaSquareMatrix>(m: &M) -> Option<M> {
/// ///
/// * [`try_inverse`](fn.try_inverse.html) /// * [`try_inverse`](fn.try_inverse.html)
#[inline] #[inline]
pub fn inverse<M: Inverse<Multiplicative>>(m: &M) -> M { pub fn inverse<M: TwoSidedInverse<Multiplicative>>(m: &M) -> M {
m.inverse() m.two_sided_inverse()
} }
/* /*

517
src/sparse/cs_matrix.rs Normal file
View File

@ -0,0 +1,517 @@
use alga::general::ClosedAdd;
use num::Zero;
use std::iter;
use std::marker::PhantomData;
use std::ops::Range;
use std::slice;
use allocator::Allocator;
use sparse::cs_utils;
use {
DefaultAllocator, Dim, Dynamic, Scalar, Vector, VectorN, U1
};
pub struct ColumnEntries<'a, N> {
curr: usize,
i: &'a [usize],
v: &'a [N],
}
impl<'a, N> ColumnEntries<'a, N> {
#[inline]
pub fn new(i: &'a [usize], v: &'a [N]) -> Self {
assert_eq!(i.len(), v.len());
ColumnEntries { curr: 0, i, v }
}
}
impl<'a, N: Copy> Iterator for ColumnEntries<'a, N> {
type Item = (usize, N);
#[inline]
fn next(&mut self) -> Option<(usize, N)> {
if self.curr >= self.i.len() {
None
} else {
let res = Some((unsafe { *self.i.get_unchecked(self.curr) }, unsafe {
*self.v.get_unchecked(self.curr)
}));
self.curr += 1;
res
}
}
}
// FIXME: this structure exists for now only because impl trait
// cannot be used for trait method return types.
/// Trait for iterable compressed-column matrix storage.
pub trait CsStorageIter<'a, N, R, C = U1> {
/// Iterator through all the rows of a specific columns.
///
/// The elements are given as a tuple (row_index, value).
type ColumnEntries: Iterator<Item = (usize, N)>;
/// Iterator through the row indices of a specific column.
type ColumnRowIndices: Iterator<Item = usize>;
/// Iterates through all the row indices of the j-th column.
fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices;
#[inline(always)]
/// Iterates through all the entries of the j-th column.
fn column_entries(&'a self, j: usize) -> Self::ColumnEntries;
}
/// Trait for mutably iterable compressed-column sparse matrix storage.
pub trait CsStorageIterMut<'a, N: 'a, R, C = U1> {
/// Mutable iterator through all the values of the sparse matrix.
type ValuesMut: Iterator<Item = &'a mut N>;
/// Mutable iterator through all the rows of a specific columns.
///
/// The elements are given as a tuple (row_index, value).
type ColumnEntriesMut: Iterator<Item = (usize, &'a mut N)>;
/// A mutable iterator through the values buffer of the sparse matrix.
fn values_mut(&'a mut self) -> Self::ValuesMut;
/// Iterates mutably through all the entries of the j-th column.
fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut;
}
/// Trait for compressed column sparse matrix storage.
pub trait CsStorage<N, R, C = U1>: for<'a> CsStorageIter<'a, N, R, C> {
/// The shape of the stored matrix.
fn shape(&self) -> (R, C);
/// Retrieve the i-th row index of the underlying row index buffer.
///
/// No bound-checking is performed.
unsafe fn row_index_unchecked(&self, i: usize) -> usize;
/// The i-th value on the contiguous value buffer of this storage.
///
/// No bound-checking is performed.
unsafe fn get_value_unchecked(&self, i: usize) -> &N;
/// The i-th value on the contiguous value buffer of this storage.
fn get_value(&self, i: usize) -> &N;
/// Retrieve the i-th row index of the underlying row index buffer.
fn row_index(&self, i: usize) -> usize;
/// The value indices for the `i`-th column.
fn column_range(&self, i: usize) -> Range<usize>;
/// The size of the value buffer (i.e. the entries known as possibly being non-zero).
fn len(&self) -> usize;
}
/// Trait for compressed column sparse matrix mutable storage.
pub trait CsStorageMut<N, R, C = U1>:
CsStorage<N, R, C> + for<'a> CsStorageIterMut<'a, N, R, C>
{
}
/// A storage of column-compressed sparse matrix based on a Vec.
#[derive(Clone, Debug, PartialEq)]
pub struct CsVecStorage<N: Scalar, R: Dim, C: Dim>
where DefaultAllocator: Allocator<usize, C>
{
pub(crate) shape: (R, C),
pub(crate) p: VectorN<usize, C>,
pub(crate) i: Vec<usize>,
pub(crate) vals: Vec<N>,
}
impl<N: Scalar, R: Dim, C: Dim> CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
/// The value buffer of this storage.
pub fn values(&self) -> &[N] {
&self.vals
}
/// The column shifts buffer.
pub fn p(&self) -> &[usize] {
self.p.as_slice()
}
/// The row index buffers.
pub fn i(&self) -> &[usize] {
&self.i
}
}
impl<N: Scalar, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {}
impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIter<'a, N, R, C> for CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
type ColumnEntries = ColumnEntries<'a, N>;
type ColumnRowIndices = iter::Cloned<slice::Iter<'a, usize>>;
#[inline]
fn column_entries(&'a self, j: usize) -> Self::ColumnEntries {
let rng = self.column_range(j);
ColumnEntries::new(&self.i[rng.clone()], &self.vals[rng])
}
#[inline]
fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices {
let rng = self.column_range(j);
self.i[rng.clone()].iter().cloned()
}
}
impl<N: Scalar, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
#[inline]
fn shape(&self) -> (R, C) {
self.shape
}
#[inline]
fn len(&self) -> usize {
self.vals.len()
}
#[inline]
fn row_index(&self, i: usize) -> usize {
self.i[i]
}
#[inline]
unsafe fn row_index_unchecked(&self, i: usize) -> usize {
*self.i.get_unchecked(i)
}
#[inline]
unsafe fn get_value_unchecked(&self, i: usize) -> &N {
self.vals.get_unchecked(i)
}
#[inline]
fn get_value(&self, i: usize) -> &N {
&self.vals[i]
}
#[inline]
fn column_range(&self, j: usize) -> Range<usize> {
let end = if j + 1 == self.p.len() {
self.len()
} else {
self.p[j + 1]
};
self.p[j]..end
}
}
impl<'a, N: Scalar, R: Dim, C: Dim> CsStorageIterMut<'a, N, R, C> for CsVecStorage<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
type ValuesMut = slice::IterMut<'a, N>;
type ColumnEntriesMut = iter::Zip<iter::Cloned<slice::Iter<'a, usize>>, slice::IterMut<'a, N>>;
#[inline]
fn values_mut(&'a mut self) -> Self::ValuesMut {
self.vals.iter_mut()
}
#[inline]
fn column_entries_mut(&'a mut self, j: usize) -> Self::ColumnEntriesMut {
let rng = self.column_range(j);
self.i[rng.clone()]
.iter()
.cloned()
.zip(self.vals[rng].iter_mut())
}
}
impl<N: Scalar, R: Dim, C: Dim> CsStorageMut<N, R, C> for CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C>
{}
/*
pub struct CsSliceStorage<'a, N: Scalar, R: Dim, C: DimAdd<U1>> {
shape: (R, C),
p: VectorSlice<usize, DimSum<C, U1>>,
i: VectorSlice<usize, Dynamic>,
vals: VectorSlice<N, Dynamic>,
}*/
/// A compressed sparse column matrix.
#[derive(Clone, Debug, PartialEq)]
pub struct CsMatrix<
N: Scalar,
R: Dim = Dynamic,
C: Dim = Dynamic,
S: CsStorage<N, R, C> = CsVecStorage<N, R, C>,
> {
pub(crate) data: S,
_phantoms: PhantomData<(N, R, C)>,
}
/// A column compressed sparse vector.
pub type CsVector<N, R = Dynamic, S = CsVecStorage<N, R, U1>> = CsMatrix<N, R, U1, S>;
impl<N: Scalar, R: Dim, C: Dim> CsMatrix<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
/// Creates a new compressed sparse column matrix with the specified dimension and
/// `nvals` possible non-zero values.
pub fn new_uninitialized_generic(nrows: R, ncols: C, nvals: usize) -> Self {
let mut i = Vec::with_capacity(nvals);
unsafe {
i.set_len(nvals);
}
i.shrink_to_fit();
let mut vals = Vec::with_capacity(nvals);
unsafe {
vals.set_len(nvals);
}
vals.shrink_to_fit();
CsMatrix {
data: CsVecStorage {
shape: (nrows, ncols),
p: VectorN::zeros_generic(ncols, U1),
i,
vals,
},
_phantoms: PhantomData,
}
}
/*
pub(crate) fn from_parts_generic(
nrows: R,
ncols: C,
p: VectorN<usize, C>,
i: Vec<usize>,
vals: Vec<N>,
) -> Self
where
N: Zero + ClosedAdd,
DefaultAllocator: Allocator<N, R>,
{
assert_eq!(ncols.value(), p.len(), "Invalid inptr size.");
assert_eq!(i.len(), vals.len(), "Invalid value size.");
// Check p.
for ptr in &p {
assert!(*ptr < i.len(), "Invalid inptr value.");
}
for ptr in p.as_slice().windows(2) {
assert!(ptr[0] <= ptr[1], "Invalid inptr ordering.");
}
// Check i.
for i in &i {
assert!(*i < nrows.value(), "Invalid row ptr value.")
}
let mut res = CsMatrix {
data: CsVecStorage {
shape: (nrows, ncols),
p,
i,
vals,
},
_phantoms: PhantomData,
};
// Sort and remove duplicates.
res.sort();
res.dedup();
res
}*/
}
/*
impl<N: Scalar + Zero + ClosedAdd> CsMatrix<N> {
pub(crate) fn from_parts(
nrows: usize,
ncols: usize,
p: Vec<usize>,
i: Vec<usize>,
vals: Vec<N>,
) -> Self
{
let nrows = Dynamic::new(nrows);
let ncols = Dynamic::new(ncols);
let p = DVector::from_data(VecStorage::new(ncols, U1, p));
Self::from_parts_generic(nrows, ncols, p, i, vals)
}
}
*/
impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
pub(crate) fn from_data(data: S) -> Self {
CsMatrix {
data,
_phantoms: PhantomData,
}
}
/// The size of the data buffer.
pub fn len(&self) -> usize {
self.data.len()
}
/// The number of rows of this matrix.
pub fn nrows(&self) -> usize {
self.data.shape().0.value()
}
/// The number of rows of this matrix.
pub fn ncols(&self) -> usize {
self.data.shape().1.value()
}
/// The shape of this matrix.
pub fn shape(&self) -> (usize, usize) {
let (nrows, ncols) = self.data.shape();
(nrows.value(), ncols.value())
}
/// Whether this matrix is square or not.
pub fn is_square(&self) -> bool {
let (nrows, ncols) = self.data.shape();
nrows.value() == ncols.value()
}
/// Should always return `true`.
///
/// This method is generally used for debugging and should typically not be called in user code.
/// This checks that the row inner indices of this matrix are sorted. It takes `O(n)` time,
/// where n` is `self.len()`.
/// All operations of CSC matrices on nalgebra assume, and will return, sorted indices.
/// If at any time this `is_sorted` method returns `false`, then, something went wrong
/// and an issue should be open on the nalgebra repository with details on how to reproduce
/// this.
pub fn is_sorted(&self) -> bool {
for j in 0..self.ncols() {
let mut curr = None;
for idx in self.data.column_row_indices(j) {
if let Some(curr) = curr {
if idx <= curr {
return false;
}
}
curr = Some(idx);
}
}
true
}
/// Computes the transpose of this sparse matrix.
pub fn transpose(&self) -> CsMatrix<N, C, R>
where DefaultAllocator: Allocator<usize, R> {
let (nrows, ncols) = self.data.shape();
let nvals = self.len();
let mut res = CsMatrix::new_uninitialized_generic(ncols, nrows, nvals);
let mut workspace = Vector::zeros_generic(nrows, U1);
// Compute p.
for i in 0..nvals {
let row_id = self.data.row_index(i);
workspace[row_id] += 1;
}
let _ = cs_utils::cumsum(&mut workspace, &mut res.data.p);
// Fill the result.
for j in 0..ncols.value() {
for (row_id, value) in self.data.column_entries(j) {
let shift = workspace[row_id];
res.data.vals[shift] = value;
res.data.i[shift] = j;
workspace[row_id] += 1;
}
}
res
}
}
impl<N: Scalar, R: Dim, C: Dim, S: CsStorageMut<N, R, C>> CsMatrix<N, R, C, S> {
/// Iterator through all the mutable values of this sparse matrix.
#[inline]
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut N> {
self.data.values_mut()
}
}
impl<N: Scalar, R: Dim, C: Dim> CsMatrix<N, R, C>
where DefaultAllocator: Allocator<usize, C>
{
pub(crate) fn sort(&mut self)
where DefaultAllocator: Allocator<N, R> {
// Size = R
let nrows = self.data.shape().0;
let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows, U1) };
self.sort_with_workspace(workspace.as_mut_slice());
}
pub(crate) fn sort_with_workspace(&mut self, workspace: &mut [N]) {
assert!(
workspace.len() >= self.nrows(),
"Workspace must be able to hold at least self.nrows() elements."
);
for j in 0..self.ncols() {
// Scatter the row in the workspace.
for (irow, val) in self.data.column_entries(j) {
workspace[irow] = val;
}
// Sort the index vector.
let range = self.data.column_range(j);
self.data.i[range.clone()].sort();
// Permute the values too.
for (i, irow) in range.clone().zip(self.data.i[range].iter().cloned()) {
self.data.vals[i] = workspace[irow];
}
}
}
// Remove dupliate entries on a sorted CsMatrix.
pub(crate) fn dedup(&mut self)
where N: Zero + ClosedAdd {
let mut curr_i = 0;
for j in 0..self.ncols() {
let range = self.data.column_range(j);
self.data.p[j] = curr_i;
if range.start != range.end {
let mut value = N::zero();
let mut irow = self.data.i[range.start];
for idx in range {
let curr_irow = self.data.i[idx];
if curr_irow == irow {
value += self.data.vals[idx];
} else {
self.data.i[curr_i] = irow;
self.data.vals[curr_i] = value;
value = self.data.vals[idx];
irow = curr_irow;
curr_i += 1;
}
}
// Handle the last entry.
self.data.i[curr_i] = irow;
self.data.vals[curr_i] = value;
curr_i += 1;
}
}
self.data.i.truncate(curr_i);
self.data.i.shrink_to_fit();
self.data.vals.truncate(curr_i);
self.data.vals.shrink_to_fit();
}
}

View File

@ -0,0 +1,408 @@
use std::iter;
use std::mem;
use allocator::Allocator;
use sparse::{CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsVecStorage};
use {DefaultAllocator, Dim, Real, VectorN, U1};
/// The cholesky decomposition of a column compressed sparse matrix.
pub struct CsCholesky<N: Real, D: Dim>
where DefaultAllocator: Allocator<usize, D> + Allocator<N, D>
{
// Non-zero pattern of the original matrix upper-triangular part.
// Unlike the original matrix, the `original_p` array does contain the last sentinel value
// equal to `original_i.len()` at the end.
original_p: Vec<usize>,
original_i: Vec<usize>,
// Decomposition result.
l: CsMatrix<N, D, D>,
// Used only for the pattern.
// FIXME: store only the nonzero pattern instead.
u: CsMatrix<N, D, D>,
ok: bool,
// Workspaces.
work_x: VectorN<N, D>,
work_c: VectorN<usize, D>,
}
impl<N: Real, D: Dim> CsCholesky<N, D>
where DefaultAllocator: Allocator<usize, D> + Allocator<N, D>
{
/// Computes the cholesky decomposition of the sparse matrix `m`.
pub fn new(m: &CsMatrix<N, D, D>) -> Self {
let mut me = Self::new_symbolic(m);
let _ = me.decompose_left_looking(&m.data.vals);
me
}
/// Perform symbolic analysis for the given matrix.
///
/// This does not access the numerical values of `m`.
pub fn new_symbolic(m: &CsMatrix<N, D, D>) -> Self {
assert!(
m.is_square(),
"The matrix `m` must be square to compute its elimination tree."
);
let (l, u) = Self::nonzero_pattern(m);
// Workspaces.
let work_x = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) };
let work_c = unsafe { VectorN::new_uninitialized_generic(m.data.shape().1, U1) };
let mut original_p = m.data.p.as_slice().to_vec();
original_p.push(m.data.i.len());
CsCholesky {
original_p,
original_i: m.data.i.clone(),
l,
u,
ok: false,
work_x,
work_c,
}
}
/// The lower-triangular matrix of the cholesky decomposition.
pub fn l(&self) -> Option<&CsMatrix<N, D, D>> {
if self.ok {
Some(&self.l)
} else {
None
}
}
/// Extracts the lower-triangular matrix of the cholesky decomposition.
pub fn unwrap_l(self) -> Option<CsMatrix<N, D, D>> {
if self.ok {
Some(self.l)
} else {
None
}
}
/// Perform a numerical left-looking cholesky decomposition of a matrix with the same structure as the
/// one used to initialize `self`, but with different non-zero values provided by `values`.
pub fn decompose_left_looking(&mut self, values: &[N]) -> bool {
assert!(
values.len() >= self.original_i.len(),
"The set of values is too small."
);
let n = self.l.nrows();
// Reset `work_c` to the column pointers of `l`.
self.work_c.copy_from(&self.l.data.p);
unsafe {
for k in 0..n {
// Scatter the k-th column of the original matrix with the values provided.
let range_k =
*self.original_p.get_unchecked(k)..*self.original_p.get_unchecked(k + 1);
*self.work_x.vget_unchecked_mut(k) = N::zero();
for p in range_k.clone() {
let irow = *self.original_i.get_unchecked(p);
if irow >= k {
*self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p);
}
}
for j in self.u.data.column_row_indices(k) {
let factor = -*self
.l
.data
.vals
.get_unchecked(*self.work_c.vget_unchecked(j));
*self.work_c.vget_unchecked_mut(j) += 1;
if j < k {
for (z, val) in self.l.data.column_entries(j) {
if z >= k {
*self.work_x.vget_unchecked_mut(z) += val * factor;
}
}
}
}
let diag = *self.work_x.vget_unchecked(k);
if diag > N::zero() {
let denom = diag.sqrt();
*self
.l
.data
.vals
.get_unchecked_mut(*self.l.data.p.vget_unchecked(k)) = denom;
for (p, val) in self.l.data.column_entries_mut(k) {
*val = *self.work_x.vget_unchecked(p) / denom;
*self.work_x.vget_unchecked_mut(p) = N::zero();
}
} else {
self.ok = false;
return false;
}
}
}
self.ok = true;
true
}
/// Perform a numerical up-looking cholesky decomposition of a matrix with the same structure as the
/// one used to initialize `self`, but with different non-zero values provided by `values`.
pub fn decompose_up_looking(&mut self, values: &[N]) -> bool {
assert!(
values.len() >= self.original_i.len(),
"The set of values is too small."
);
// Reset `work_c` to the column pointers of `l`.
self.work_c.copy_from(&self.l.data.p);
// Perform the decomposition.
for k in 0..self.l.nrows() {
unsafe {
// Scatter the k-th column of the original matrix with the values provided.
let column_range =
*self.original_p.get_unchecked(k)..*self.original_p.get_unchecked(k + 1);
*self.work_x.vget_unchecked_mut(k) = N::zero();
for p in column_range.clone() {
let irow = *self.original_i.get_unchecked(p);
if irow <= k {
*self.work_x.vget_unchecked_mut(irow) = *values.get_unchecked(p);
}
}
let mut diag = *self.work_x.vget_unchecked(k);
*self.work_x.vget_unchecked_mut(k) = N::zero();
// Triangular solve.
for irow in self.u.data.column_row_indices(k) {
if irow >= k {
continue;
}
let lki = *self.work_x.vget_unchecked(irow)
/ *self
.l
.data
.vals
.get_unchecked(*self.l.data.p.vget_unchecked(irow));
*self.work_x.vget_unchecked_mut(irow) = N::zero();
for p in
*self.l.data.p.vget_unchecked(irow) + 1..*self.work_c.vget_unchecked(irow)
{
*self
.work_x
.vget_unchecked_mut(*self.l.data.i.get_unchecked(p)) -=
*self.l.data.vals.get_unchecked(p) * lki;
}
diag -= lki * lki;
let p = *self.work_c.vget_unchecked(irow);
*self.work_c.vget_unchecked_mut(irow) += 1;
*self.l.data.i.get_unchecked_mut(p) = k;
*self.l.data.vals.get_unchecked_mut(p) = lki;
}
if diag <= N::zero() {
self.ok = false;
return false;
}
// Deal with the diagonal element.
let p = *self.work_c.vget_unchecked(k);
*self.work_c.vget_unchecked_mut(k) += 1;
*self.l.data.i.get_unchecked_mut(p) = k;
*self.l.data.vals.get_unchecked_mut(p) = diag.sqrt();
}
}
self.ok = true;
true
}
fn elimination_tree<S: CsStorage<N, D, D>>(m: &CsMatrix<N, D, D, S>) -> Vec<usize> {
let nrows = m.nrows();
let mut forest: Vec<_> = iter::repeat(usize::max_value()).take(nrows).collect();
let mut ancestor: Vec<_> = iter::repeat(usize::max_value()).take(nrows).collect();
for k in 0..nrows {
for irow in m.data.column_row_indices(k) {
let mut i = irow;
while i < k {
let i_ancestor = ancestor[i];
ancestor[i] = k;
if i_ancestor == usize::max_value() {
forest[i] = k;
break;
}
i = i_ancestor;
}
}
}
forest
}
fn reach<S: CsStorage<N, D, D>>(
m: &CsMatrix<N, D, D, S>,
j: usize,
max_j: usize,
tree: &[usize],
marks: &mut Vec<bool>,
out: &mut Vec<usize>,
)
{
marks.clear();
marks.resize(tree.len(), false);
// FIXME: avoid all those allocations.
let mut tmp = Vec::new();
let mut res = Vec::new();
for irow in m.data.column_row_indices(j) {
let mut curr = irow;
while curr != usize::max_value() && curr <= max_j && !marks[curr] {
marks[curr] = true;
tmp.push(curr);
curr = tree[curr];
}
tmp.append(&mut res);
mem::swap(&mut tmp, &mut res);
}
out.append(&mut res);
}
fn nonzero_pattern<S: CsStorage<N, D, D>>(
m: &CsMatrix<N, D, D, S>,
) -> (CsMatrix<N, D, D>, CsMatrix<N, D, D>) {
let etree = Self::elimination_tree(m);
let (nrows, ncols) = m.data.shape();
let mut rows = Vec::with_capacity(m.len());
let mut cols = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) };
let mut marks = Vec::new();
// NOTE: the following will actually compute the non-zero pattern of
// the transpose of l.
for i in 0..nrows.value() {
cols[i] = rows.len();
Self::reach(m, i, i, &etree, &mut marks, &mut rows);
}
let mut vals = Vec::with_capacity(rows.len());
unsafe {
vals.set_len(rows.len());
}
vals.shrink_to_fit();
let data = CsVecStorage {
shape: (nrows, ncols),
p: cols,
i: rows,
vals,
};
let u = CsMatrix::from_data(data);
// XXX: avoid this transpose.
let l = u.transpose();
(l, u)
}
/*
*
* NOTE: All the following methods are untested and currently unused.
*
*
fn column_counts<S: CsStorage<N, D, D>>(
m: &CsMatrix<N, D, D, S>,
tree: &[usize],
) -> Vec<usize> {
let len = m.data.shape().0.value();
let mut counts: Vec<_> = iter::repeat(0).take(len).collect();
let mut reach = Vec::new();
let mut marks = Vec::new();
for i in 0..len {
Self::reach(m, i, i, tree, &mut marks, &mut reach);
for j in reach.drain(..) {
counts[j] += 1;
}
}
counts
}
fn tree_postorder(tree: &[usize]) -> Vec<usize> {
// FIXME: avoid all those allocations?
let mut first_child: Vec<_> = iter::repeat(usize::max_value()).take(tree.len()).collect();
let mut other_children: Vec<_> =
iter::repeat(usize::max_value()).take(tree.len()).collect();
// Build the children list from the parent list.
// The set of children of the node `i` is given by the linked list
// starting at `first_child[i]`. The nodes of this list are then:
// { first_child[i], other_children[first_child[i]], other_children[other_children[first_child[i]], ... }
for (i, parent) in tree.iter().enumerate() {
if *parent != usize::max_value() {
let brother = first_child[*parent];
first_child[*parent] = i;
other_children[i] = brother;
}
}
let mut stack = Vec::with_capacity(tree.len());
let mut postorder = Vec::with_capacity(tree.len());
for (i, node) in tree.iter().enumerate() {
if *node == usize::max_value() {
Self::dfs(
i,
&mut first_child,
&other_children,
&mut stack,
&mut postorder,
)
}
}
postorder
}
fn dfs(
i: usize,
first_child: &mut [usize],
other_children: &[usize],
stack: &mut Vec<usize>,
result: &mut Vec<usize>,
) {
stack.clear();
stack.push(i);
while let Some(n) = stack.pop() {
let child = first_child[n];
if child == usize::max_value() {
// No children left.
result.push(n);
} else {
stack.push(n);
stack.push(child);
first_child[n] = other_children[child];
}
}
}
*/
}

View File

@ -0,0 +1,114 @@
use alga::general::ClosedAdd;
use num::Zero;
use allocator::Allocator;
use sparse::cs_utils;
use sparse::{CsMatrix, CsStorage};
use storage::Storage;
use {DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, Scalar};
impl<'a, N: Scalar + Zero + ClosedAdd> CsMatrix<N> {
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
pub fn from_triplet(
nrows: usize,
ncols: usize,
irows: &[usize],
icols: &[usize],
vals: &[N],
) -> Self
{
Self::from_triplet_generic(Dynamic::new(nrows), Dynamic::new(ncols), irows, icols, vals)
}
}
impl<'a, N: Scalar + Zero + ClosedAdd, R: Dim, C: Dim> CsMatrix<N, R, C>
where DefaultAllocator: Allocator<usize, C> + Allocator<N, R>
{
/// Creates a column-compressed sparse matrix from a sparse matrix in triplet form.
pub fn from_triplet_generic(
nrows: R,
ncols: C,
irows: &[usize],
icols: &[usize],
vals: &[N],
) -> Self
{
assert!(vals.len() == irows.len());
assert!(vals.len() == icols.len());
let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, vals.len());
let mut workspace = res.data.p.clone();
// Column count.
for j in icols.iter().cloned() {
workspace[j] += 1;
}
let _ = cs_utils::cumsum(&mut workspace, &mut res.data.p);
// Fill i and vals.
for ((i, j), val) in irows
.iter()
.cloned()
.zip(icols.iter().cloned())
.zip(vals.iter().cloned())
{
let offset = workspace[j];
res.data.i[offset] = i;
res.data.vals[offset] = val;
workspace[j] = offset + 1;
}
// Sort the result.
res.sort();
res.dedup();
res
}
}
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
where
S: CsStorage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
fn from(m: CsMatrix<N, R, C, S>) -> Self {
let (nrows, ncols) = m.data.shape();
let mut res = MatrixMN::zeros_generic(nrows, ncols);
for j in 0..ncols.value() {
for (i, val) in m.data.column_entries(j) {
res[(i, j)] = val;
}
}
res
}
}
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
where
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<usize, C>,
{
fn from(m: Matrix<N, R, C, S>) -> Self {
let (nrows, ncols) = m.data.shape();
let len = m.iter().filter(|e| !e.is_zero()).count();
let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, len);
let mut nz = 0;
for j in 0..ncols.value() {
let column = m.column(j);
res.data.p[j] = nz;
for i in 0..nrows.value() {
if !column[i].is_zero() {
res.data.i[nz] = i;
res.data.vals[nz] = column[i];
nz += 1;
}
}
}
res
}
}

304
src/sparse/cs_matrix_ops.rs Normal file
View File

@ -0,0 +1,304 @@
use alga::general::{ClosedAdd, ClosedMul};
use num::{One, Zero};
use std::ops::{Add, Mul};
use allocator::Allocator;
use constraint::{AreMultipliable, DimEq, ShapeConstraint};
use sparse::{CsMatrix, CsStorage, CsStorageMut, CsVector};
use storage::StorageMut;
use {DefaultAllocator, Dim, Scalar, Vector, VectorN, U1};
impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
fn scatter<R2: Dim, C2: Dim>(
&self,
j: usize,
beta: N,
timestamps: &mut [usize],
timestamp: usize,
workspace: &mut [N],
mut nz: usize,
res: &mut CsMatrix<N, R2, C2>,
) -> usize
where
N: ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<usize, C2>,
{
for (i, val) in self.data.column_entries(j) {
if timestamps[i] < timestamp {
timestamps[i] = timestamp;
res.data.i[nz] = i;
nz += 1;
workspace[i] = val * beta;
} else {
workspace[i] += val * beta;
}
}
nz
}
}
/*
impl<N: Scalar, R, S> CsVector<N, R, S> {
pub fn axpy(&mut self, alpha: N, x: CsVector<N, R, S>, beta: N) {
// First, compute the number of non-zero entries.
let mut nnzero = 0;
// Allocate a size large enough.
self.data.set_column_len(0, nnzero);
// Fill with the axpy.
let mut i = self.len();
let mut j = x.len();
let mut k = nnzero - 1;
let mut rid1 = self.data.row_index(0, i - 1);
let mut rid2 = x.data.row_index(0, j - 1);
while k > 0 {
if rid1 == rid2 {
self.data.set_row_index(0, k, rid1);
self[k] = alpha * x[j] + beta * self[k];
i -= 1;
j -= 1;
} else if rid1 < rid2 {
self.data.set_row_index(0, k, rid1);
self[k] = beta * self[i];
i -= 1;
} else {
self.data.set_row_index(0, k, rid2);
self[k] = alpha * x[j];
j -= 1;
}
k -= 1;
}
}
}
*/
impl<N: Scalar + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
/// Perform a sparse axpy operation: `self = alpha * x + beta * self` operation.
pub fn axpy_cs<D2: Dim, S2>(&mut self, alpha: N, x: &CsVector<N, D2, S2>, beta: N)
where
S2: CsStorage<N, D2>,
ShapeConstraint: DimEq<D, D2>,
{
if beta.is_zero() {
for i in 0..x.len() {
unsafe {
let k = x.data.row_index_unchecked(i);
let y = self.vget_unchecked_mut(k);
*y = alpha * *x.data.get_value_unchecked(i);
}
}
} else {
// Needed to be sure even components not present on `x` are multiplied.
*self *= beta;
for i in 0..x.len() {
unsafe {
let k = x.data.row_index_unchecked(i);
let y = self.vget_unchecked_mut(k);
*y += alpha * *x.data.get_value_unchecked(i);
}
}
}
}
/*
pub fn gemv_sparse<R2: Dim, C2: Dim, S2>(&mut self, alpha: N, a: &CsMatrix<N, R2, C2, S2>, x: &DVector<N>, beta: N)
where
S2: CsStorage<N, R2, C2> {
let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) };
self.axpy_sparse(alpha * val, &col2, beta);
for j in 1..ncols2 {
let col2 = a.column(j);
let val = unsafe { *x.vget_unchecked(j) };
self.axpy_sparse(alpha * val, &col2, N::one());
}
}
*/
}
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + Zero,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
S1: CsStorage<N, R1, C1>,
S2: CsStorage<N, R2, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
DefaultAllocator: Allocator<usize, C2> + Allocator<usize, R1> + Allocator<N, R1>,
{
type Output = CsMatrix<N, R1, C2>;
fn mul(self, rhs: &'b CsMatrix<N, R2, C2, S2>) -> CsMatrix<N, R1, C2> {
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
assert_eq!(
ncols1.value(),
nrows2.value(),
"Mismatched dimensions for matrix multiplication."
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut workspace = VectorN::<N, R1>::zeros_generic(nrows1, U1);
let mut nz = 0;
for j in 0..ncols2.value() {
res.data.p[j] = nz;
let new_size_bound = nz + nrows1.value();
res.data.i.resize(new_size_bound, 0);
res.data.vals.resize(new_size_bound, N::zero());
for (i, beta) in rhs.data.column_entries(j) {
for (k, val) in self.data.column_entries(i) {
workspace[k] += val * beta;
}
}
for (i, val) in workspace.as_mut_slice().iter_mut().enumerate() {
if !val.is_zero() {
res.data.i[nz] = i;
res.data.vals[nz] = *val;
*val = N::zero();
nz += 1;
}
}
}
// NOTE: the following has a lower complexity, but is slower in many cases, likely because
// of branching inside of the inner loop.
//
// let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
// let mut timestamps = VectorN::zeros_generic(nrows1, U1);
// let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) };
// let mut nz = 0;
//
// for j in 0..ncols2.value() {
// res.data.p[j] = nz;
// let new_size_bound = nz + nrows1.value();
// res.data.i.resize(new_size_bound, 0);
// res.data.vals.resize(new_size_bound, N::zero());
//
// for (i, val) in rhs.data.column_entries(j) {
// nz = self.scatter(
// i,
// val,
// timestamps.as_mut_slice(),
// j + 1,
// workspace.as_mut_slice(),
// nz,
// &mut res,
// );
// }
//
// // Keep the output sorted.
// let range = res.data.p[j]..nz;
// res.data.i[range.clone()].sort();
//
// for p in range {
// res.data.vals[p] = workspace[res.data.i[p]]
// }
// }
res.data.i.truncate(nz);
res.data.i.shrink_to_fit();
res.data.vals.truncate(nz);
res.data.vals.shrink_to_fit();
res
}
}
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + One,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
S1: CsStorage<N, R1, C1>,
S2: CsStorage<N, R2, C2>,
ShapeConstraint: DimEq<R1, R2> + DimEq<C1, C2>,
DefaultAllocator: Allocator<usize, C2> + Allocator<usize, R1> + Allocator<N, R1>,
{
type Output = CsMatrix<N, R1, C2>;
fn add(self, rhs: &'b CsMatrix<N, R2, C2, S2>) -> CsMatrix<N, R1, C2> {
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
assert_eq!(
(nrows1.value(), ncols1.value()),
(nrows2.value(), ncols2.value()),
"Mismatched dimensions for matrix sum."
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut timestamps = VectorN::zeros_generic(nrows1, U1);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) };
let mut nz = 0;
for j in 0..ncols2.value() {
res.data.p[j] = nz;
nz = self.scatter(
j,
N::one(),
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
nz = rhs.scatter(
j,
N::one(),
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
// Keep the output sorted.
let range = res.data.p[j]..nz;
res.data.i[range.clone()].sort();
for p in range {
res.data.vals[p] = workspace[res.data.i[p]]
}
}
res.data.i.truncate(nz);
res.data.i.shrink_to_fit();
res.data.vals.truncate(nz);
res.data.vals.shrink_to_fit();
res
}
}
impl<'a, 'b, N, R, C, S> Mul<N> for CsMatrix<N, R, C, S>
where
N: Scalar + ClosedAdd + ClosedMul + Zero,
R: Dim,
C: Dim,
S: CsStorageMut<N, R, C>,
{
type Output = Self;
fn mul(mut self, rhs: N) -> Self {
for e in self.values_mut() {
*e *= rhs
}
self
}
}

View File

@ -0,0 +1,283 @@
use allocator::Allocator;
use constraint::{SameNumberOfRows, ShapeConstraint};
use sparse::{CsMatrix, CsStorage, CsVector};
use storage::{Storage, StorageMut};
use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, VectorN, U1};
impl<N: Real, D: Dim, S: CsStorage<N, D, D>> CsMatrix<N, D, D, S> {
/// Solve a lower-triangular system with a dense right-hand-side.
pub fn solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let mut b = b.clone_owned();
if self.solve_lower_triangular_mut(&mut b) {
Some(b)
} else {
None
}
}
/// Solve a lower-triangular system with `self` transposed and a dense right-hand-side.
pub fn tr_solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let mut b = b.clone_owned();
if self.tr_solve_lower_triangular_mut(&mut b) {
Some(b)
} else {
None
}
}
/// Solve in-place a lower-triangular system with a dense right-hand-side.
pub fn solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let (nrows, ncols) = self.data.shape();
assert_eq!(nrows.value(), ncols.value(), "The matrix must be square.");
assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions.");
for j2 in 0..b.ncols() {
let mut b = b.column_mut(j2);
for j in 0..ncols.value() {
let mut column = self.data.column_entries(j);
let mut diag_found = false;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
return false;
}
b[j] /= val;
diag_found = true;
break;
}
}
if !diag_found {
return false;
}
for (i, val) in column {
b[i] -= b[j] * val;
}
}
}
true
}
/// Solve a lower-triangular system with `self` transposed and a dense right-hand-side.
pub fn tr_solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let (nrows, ncols) = self.data.shape();
assert_eq!(nrows.value(), ncols.value(), "The matrix must be square.");
assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions.");
for j2 in 0..b.ncols() {
let mut b = b.column_mut(j2);
for j in (0..ncols.value()).rev() {
let mut column = self.data.column_entries(j);
let mut diag = None;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
return false;
}
diag = Some(val);
break;
}
}
if let Some(diag) = diag {
for (i, val) in column {
b[j] -= val * b[i];
}
b[j] /= diag;
} else {
return false;
}
}
}
true
}
/// Solve a lower-triangular system with a sparse right-hand-side.
pub fn solve_lower_triangular_cs<D2: Dim, S2>(
&self,
b: &CsVector<N, D2, S2>,
) -> Option<CsVector<N, D2>>
where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D> + Allocator<N, D2> + Allocator<usize, D2>,
ShapeConstraint: SameNumberOfRows<D, D2>,
{
let mut reach = Vec::new();
// We don't compute a postordered reach here because it will be sorted after anyway.
self.lower_triangular_reach(b, &mut reach);
// We sort the reach so the result matrix has sorted indices.
reach.sort();
let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) };
for i in reach.iter().cloned() {
workspace[i] = N::zero();
}
for (i, val) in b.data.column_entries(0) {
workspace[i] = val;
}
for j in reach.iter().cloned() {
let mut column = self.data.column_entries(j);
let mut diag_found = false;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
break;
}
workspace[j] /= val;
diag_found = true;
break;
}
}
if !diag_found {
return None;
}
for (i, val) in column {
workspace[i] -= workspace[j] * val;
}
}
// Copy the result into a sparse vector.
let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len());
for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) {
*val = workspace[*i];
}
result.data.i = reach;
Some(result)
}
/*
// Computes the reachable, post-ordered, nodes from `b`.
fn lower_triangular_reach_postordered<D2: Dim, S2>(
&self,
b: &CsVector<N, D2, S2>,
xi: &mut Vec<usize>,
) where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D>,
{
let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false);
let mut stack = Vec::new();
for i in b.data.column_range(0) {
let row_index = b.data.row_index(i);
if !visited[row_index] {
let rng = self.data.column_range(row_index);
stack.push((row_index, rng));
self.lower_triangular_dfs(visited.as_mut_slice(), &mut stack, xi);
}
}
}
fn lower_triangular_dfs(
&self,
visited: &mut [bool],
stack: &mut Vec<(usize, Range<usize>)>,
xi: &mut Vec<usize>,
)
{
'recursion: while let Some((j, rng)) = stack.pop() {
visited[j] = true;
for i in rng.clone() {
let row_id = self.data.row_index(i);
if row_id > j && !visited[row_id] {
stack.push((j, (i + 1)..rng.end));
stack.push((row_id, self.data.column_range(row_id)));
continue 'recursion;
}
}
xi.push(j)
}
}
*/
// Computes the nodes reachable from `b` in an arbitrary order.
fn lower_triangular_reach<D2: Dim, S2>(&self, b: &CsVector<N, D2, S2>, xi: &mut Vec<usize>)
where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D>,
{
let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false);
let mut stack = Vec::new();
for irow in b.data.column_row_indices(0) {
self.lower_triangular_bfs(irow, visited.as_mut_slice(), &mut stack, xi);
}
}
fn lower_triangular_bfs(
&self,
start: usize,
visited: &mut [bool],
stack: &mut Vec<usize>,
xi: &mut Vec<usize>,
)
{
if !visited[start] {
stack.clear();
stack.push(start);
xi.push(start);
visited[start] = true;
while let Some(j) = stack.pop() {
for irow in self.data.column_row_indices(j) {
if irow > j && !visited[irow] {
stack.push(irow);
xi.push(irow);
visited[irow] = true;
}
}
}
}
}
}

16
src/sparse/cs_utils.rs Normal file
View File

@ -0,0 +1,16 @@
use allocator::Allocator;
use {DefaultAllocator, Dim, VectorN};
pub fn cumsum<D: Dim>(a: &mut VectorN<usize, D>, b: &mut VectorN<usize, D>) -> usize
where DefaultAllocator: Allocator<usize, D> {
assert!(a.len() == b.len());
let mut sum = 0;
for i in 0..a.len() {
b[i] = sum;
sum += a[i];
a[i] = b[i];
}
sum
}

13
src/sparse/mod.rs Normal file
View File

@ -0,0 +1,13 @@
//! Sparse matrices.
pub use self::cs_matrix::{
CsMatrix, CsStorage, CsStorageIter, CsStorageIterMut, CsStorageMut, CsVecStorage, CsVector,
};
pub use self::cs_matrix_cholesky::CsCholesky;
mod cs_matrix;
mod cs_matrix_cholesky;
mod cs_matrix_conversion;
mod cs_matrix_ops;
mod cs_matrix_solve;
pub(crate) mod cs_utils;

View File

@ -25,27 +25,35 @@ quickcheck!(
let viewmatrix = Isometry3::look_at_rh(&eye, &target, &up); let viewmatrix = Isometry3::look_at_rh(&eye, &target, &up);
let origin = Point3::origin(); let origin = Point3::origin();
relative_eq!(viewmatrix * eye, origin, epsilon = 1.0e-7) && relative_eq!(viewmatrix * eye, origin, epsilon = 1.0e-7)
relative_eq!((viewmatrix * (target - eye)).normalize(), -Vector3::z(), epsilon = 1.0e-7) && relative_eq!(
(viewmatrix * (target - eye)).normalize(),
-Vector3::z(),
epsilon = 1.0e-7
)
} }
fn observer_frame_3(eye: Point3<f64>, target: Point3<f64>, up: Vector3<f64>) -> bool { fn observer_frame_3(eye: Point3<f64>, target: Point3<f64>, up: Vector3<f64>) -> bool {
let observer = Isometry3::face_towards(&eye, &target, &up); let observer = Isometry3::face_towards(&eye, &target, &up);
let origin = Point3::origin(); let origin = Point3::origin();
relative_eq!(observer * origin, eye, epsilon = 1.0e-7) && relative_eq!(observer * origin, eye, epsilon = 1.0e-7)
relative_eq!(observer * Vector3::z(), (target - eye).normalize(), epsilon = 1.0e-7) && relative_eq!(
observer * Vector3::z(),
(target - eye).normalize(),
epsilon = 1.0e-7
)
} }
fn inverse_is_identity(i: Isometry3<f64>, p: Point3<f64>, v: Vector3<f64>) -> bool { fn inverse_is_identity(i: Isometry3<f64>, p: Point3<f64>, v: Vector3<f64>) -> bool {
let ii = i.inverse(); let ii = i.inverse();
relative_eq!(i * ii, Isometry3::identity(), epsilon = 1.0e-7) && relative_eq!(i * ii, Isometry3::identity(), epsilon = 1.0e-7)
relative_eq!(ii * i, Isometry3::identity(), epsilon = 1.0e-7) && && relative_eq!(ii * i, Isometry3::identity(), epsilon = 1.0e-7)
relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) && && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7)
relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) && && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7)
relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) && && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7)
relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7)
} }
fn inverse_is_parts_inversion(t: Translation3<f64>, r: UnitQuaternion<f64>) -> bool { fn inverse_is_parts_inversion(t: Translation3<f64>, r: UnitQuaternion<f64>) -> bool {
@ -54,14 +62,29 @@ quickcheck!(
} }
fn multiply_equals_alga_transform(i: Isometry3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool { fn multiply_equals_alga_transform(i: Isometry3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool {
i * v == i.transform_vector(&v) && i * v == i.transform_vector(&v)
i * p == i.transform_point(&p) && && i * p == i.transform_point(&p)
relative_eq!(i.inverse() * v, i.inverse_transform_vector(&v), epsilon = 1.0e-7) && && relative_eq!(
relative_eq!(i.inverse() * p, i.inverse_transform_point(&p), epsilon = 1.0e-7) i.inverse() * v,
i.inverse_transform_vector(&v),
epsilon = 1.0e-7
)
&& relative_eq!(
i.inverse() * p,
i.inverse_transform_point(&p),
epsilon = 1.0e-7
)
} }
fn composition2(i: Isometry2<f64>, uc: UnitComplex<f64>, r: Rotation2<f64>, fn composition2(
t: Translation2<f64>, v: Vector2<f64>, p: Point2<f64>) -> bool { i: Isometry2<f64>,
uc: UnitComplex<f64>,
r: Rotation2<f64>,
t: Translation2<f64>,
v: Vector2<f64>,
p: Point2<f64>,
) -> bool
{
// (rotation × translation) * point = rotation × (translation * point) // (rotation × translation) * point = rotation × (translation * point)
relative_eq!((uc * t) * v, uc * v, epsilon = 1.0e-7) && relative_eq!((uc * t) * v, uc * v, epsilon = 1.0e-7) &&
relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) && relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) &&
@ -91,8 +114,15 @@ quickcheck!(
relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7) relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7)
} }
fn composition3(i: Isometry3<f64>, uq: UnitQuaternion<f64>, r: Rotation3<f64>, fn composition3(
t: Translation3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool { i: Isometry3<f64>,
uq: UnitQuaternion<f64>,
r: Rotation3<f64>,
t: Translation3<f64>,
v: Vector3<f64>,
p: Point3<f64>,
) -> bool
{
// (rotation × translation) * point = rotation × (translation * point) // (rotation × translation) * point = rotation × (translation * point)
relative_eq!((uq * t) * v, uq * v, epsilon = 1.0e-7) && relative_eq!((uq * t) * v, uq * v, epsilon = 1.0e-7) &&
relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) && relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) &&
@ -122,11 +152,18 @@ quickcheck!(
relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7) relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7)
} }
fn all_op_exist(i: Isometry3<f64>, uq: UnitQuaternion<f64>, t: Translation3<f64>, fn all_op_exist(
v: Vector3<f64>, p: Point3<f64>, r: Rotation3<f64>) -> bool { i: Isometry3<f64>,
let iMi = i * i; uq: UnitQuaternion<f64>,
t: Translation3<f64>,
v: Vector3<f64>,
p: Point3<f64>,
r: Rotation3<f64>,
) -> bool
{
let iMi = i * i;
let iMuq = i * uq; let iMuq = i * uq;
let iDi = i / i; let iDi = i / i;
let iDuq = i / uq; let iDuq = i / uq;
let iMp = i * p; let iMp = i * p;
@ -135,13 +172,13 @@ quickcheck!(
let iMt = i * t; let iMt = i * t;
let tMi = t * i; let tMi = t * i;
let tMr = t * r; let tMr = t * r;
let tMuq = t * uq; let tMuq = t * uq;
let uqMi = uq * i; let uqMi = uq * i;
let uqDi = uq / i; let uqDi = uq / i;
let rMt = r * t; let rMt = r * t;
let uqMt = uq * t; let uqMt = uq * t;
let mut iMt1 = i; let mut iMt1 = i;
@ -174,75 +211,57 @@ quickcheck!(
iDuq1 /= uq; iDuq1 /= uq;
iDuq2 /= &uq; iDuq2 /= &uq;
iMt == iMt1 && iMt == iMt1
iMt == iMt2 && && iMt == iMt2
&& iMi == iMi1
iMi == iMi1 && && iMi == iMi2
iMi == iMi2 && && iMuq == iMuq1
&& iMuq == iMuq2
iMuq == iMuq1 && && iDi == iDi1
iMuq == iMuq2 && && iDi == iDi2
&& iDuq == iDuq1
iDi == iDi1 && && iDuq == iDuq2
iDi == iDi2 && && iMi == &i * &i
&& iMi == i * &i
iDuq == iDuq1 && && iMi == &i * i
iDuq == iDuq2 && && iMuq == &i * &uq
&& iMuq == i * &uq
iMi == &i * &i && && iMuq == &i * uq
iMi == i * &i && && iDi == &i / &i
iMi == &i * i && && iDi == i / &i
&& iDi == &i / i
iMuq == &i * &uq && && iDuq == &i / &uq
iMuq == i * &uq && && iDuq == i / &uq
iMuq == &i * uq && && iDuq == &i / uq
&& iMp == &i * &p
iDi == &i / &i && && iMp == i * &p
iDi == i / &i && && iMp == &i * p
iDi == &i / i && && iMv == &i * &v
&& iMv == i * &v
iDuq == &i / &uq && && iMv == &i * v
iDuq == i / &uq && && iMt == &i * &t
iDuq == &i / uq && && iMt == i * &t
&& iMt == &i * t
iMp == &i * &p && && tMi == &t * &i
iMp == i * &p && && tMi == t * &i
iMp == &i * p && && tMi == &t * i
&& tMr == &t * &r
iMv == &i * &v && && tMr == t * &r
iMv == i * &v && && tMr == &t * r
iMv == &i * v && && tMuq == &t * &uq
&& tMuq == t * &uq
iMt == &i * &t && && tMuq == &t * uq
iMt == i * &t && && uqMi == &uq * &i
iMt == &i * t && && uqMi == uq * &i
&& uqMi == &uq * i
tMi == &t * &i && && uqDi == &uq / &i
tMi == t * &i && && uqDi == uq / &i
tMi == &t * i && && uqDi == &uq / i
&& rMt == &r * &t
tMr == &t * &r && && rMt == r * &t
tMr == t * &r && && rMt == &r * t
tMr == &t * r && && uqMt == &uq * &t
&& uqMt == uq * &t
tMuq == &t * &uq && && uqMt == &uq * t
tMuq == t * &uq &&
tMuq == &t * uq &&
uqMi == &uq * &i &&
uqMi == uq * &i &&
uqMi == &uq * i &&
uqDi == &uq / &i &&
uqDi == uq / &i &&
uqDi == &uq / i &&
rMt == &r * &t &&
rMt == r * &t &&
rMt == &r * t &&
uqMt == &uq * &t &&
uqMt == uq * &t &&
uqMt == &uq * t
} }
); );

View File

@ -8,33 +8,57 @@ quickcheck!(
fn inverse_is_identity(i: Similarity3<f64>, p: Point3<f64>, v: Vector3<f64>) -> bool { fn inverse_is_identity(i: Similarity3<f64>, p: Point3<f64>, v: Vector3<f64>) -> bool {
let ii = i.inverse(); let ii = i.inverse();
relative_eq!(i * ii, Similarity3::identity(), epsilon = 1.0e-7) && relative_eq!(i * ii, Similarity3::identity(), epsilon = 1.0e-7)
relative_eq!(ii * i, Similarity3::identity(), epsilon = 1.0e-7) && && relative_eq!(ii * i, Similarity3::identity(), epsilon = 1.0e-7)
relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) && && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7)
relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) && && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7)
relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) && && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7)
relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7)
} }
fn inverse_is_parts_inversion(t: Translation3<f64>, r: UnitQuaternion<f64>, scaling: f64) -> bool { fn inverse_is_parts_inversion(
t: Translation3<f64>,
r: UnitQuaternion<f64>,
scaling: f64,
) -> bool
{
if relative_eq!(scaling, 0.0) { if relative_eq!(scaling, 0.0) {
true true
} } else {
else {
let s = Similarity3::from_isometry(t * r, scaling); let s = Similarity3::from_isometry(t * r, scaling);
s.inverse() == Similarity3::from_scaling(1.0 / scaling) * r.inverse() * t.inverse() s.inverse() == Similarity3::from_scaling(1.0 / scaling) * r.inverse() * t.inverse()
} }
} }
fn multiply_equals_alga_transform(s: Similarity3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool { fn multiply_equals_alga_transform(
s * v == s.transform_vector(&v) && s: Similarity3<f64>,
s * p == s.transform_point(&p) && v: Vector3<f64>,
relative_eq!(s.inverse() * v, s.inverse_transform_vector(&v), epsilon = 1.0e-7) && p: Point3<f64>,
relative_eq!(s.inverse() * p, s.inverse_transform_point(&p), epsilon = 1.0e-7) ) -> bool
{
s * v == s.transform_vector(&v)
&& s * p == s.transform_point(&p)
&& relative_eq!(
s.inverse() * v,
s.inverse_transform_vector(&v),
epsilon = 1.0e-7
)
&& relative_eq!(
s.inverse() * p,
s.inverse_transform_point(&p),
epsilon = 1.0e-7
)
} }
fn composition(i: Isometry3<f64>, uq: UnitQuaternion<f64>, fn composition(
t: Translation3<f64>, v: Vector3<f64>, p: Point3<f64>, scaling: f64) -> bool { i: Isometry3<f64>,
uq: UnitQuaternion<f64>,
t: Translation3<f64>,
v: Vector3<f64>,
p: Point3<f64>,
scaling: f64,
) -> bool
{
if relative_eq!(scaling, 0.0) { if relative_eq!(scaling, 0.0) {
return true; return true;
} }
@ -122,11 +146,18 @@ quickcheck!(
relative_eq!((s * i * t) * p, scaling * (i * (t * p)), epsilon = 1.0e-7) relative_eq!((s * i * t) * p, scaling * (i * (t * p)), epsilon = 1.0e-7)
} }
fn all_op_exist(s: Similarity3<f64>, i: Isometry3<f64>, uq: UnitQuaternion<f64>, fn all_op_exist(
t: Translation3<f64>, v: Vector3<f64>, p: Point3<f64>) -> bool { s: Similarity3<f64>,
let sMs = s * s; i: Isometry3<f64>,
uq: UnitQuaternion<f64>,
t: Translation3<f64>,
v: Vector3<f64>,
p: Point3<f64>,
) -> bool
{
let sMs = s * s;
let sMuq = s * uq; let sMuq = s * uq;
let sDs = s / s; let sDs = s / s;
let sDuq = s / uq; let sDuq = s / uq;
let sMp = s * p; let sMp = s * p;
@ -186,81 +217,61 @@ quickcheck!(
sDi1 /= i; sDi1 /= i;
sDi2 /= &i; sDi2 /= &i;
sMt == sMt1 && sMt == sMt1
sMt == sMt2 && && sMt == sMt2
&& sMs == sMs1
sMs == sMs1 && && sMs == sMs2
sMs == sMs2 && && sMuq == sMuq1
&& sMuq == sMuq2
sMuq == sMuq1 && && sMi == sMi1
sMuq == sMuq2 && && sMi == sMi2
&& sDs == sDs1
sMi == sMi1 && && sDs == sDs2
sMi == sMi2 && && sDuq == sDuq1
&& sDuq == sDuq2
sDs == sDs1 && && sDi == sDi1
sDs == sDs2 && && sDi == sDi2
&& sMs == &s * &s
sDuq == sDuq1 && && sMs == s * &s
sDuq == sDuq2 && && sMs == &s * s
&& sMuq == &s * &uq
sDi == sDi1 && && sMuq == s * &uq
sDi == sDi2 && && sMuq == &s * uq
&& sDs == &s / &s
sMs == &s * &s && && sDs == s / &s
sMs == s * &s && && sDs == &s / s
sMs == &s * s && && sDuq == &s / &uq
&& sDuq == s / &uq
sMuq == &s * &uq && && sDuq == &s / uq
sMuq == s * &uq && && sMp == &s * &p
sMuq == &s * uq && && sMp == s * &p
&& sMp == &s * p
sDs == &s / &s && && sMv == &s * &v
sDs == s / &s && && sMv == s * &v
sDs == &s / s && && sMv == &s * v
&& sMt == &s * &t
sDuq == &s / &uq && && sMt == s * &t
sDuq == s / &uq && && sMt == &s * t
sDuq == &s / uq && && tMs == &t * &s
&& tMs == t * &s
sMp == &s * &p && && tMs == &t * s
sMp == s * &p && && uqMs == &uq * &s
sMp == &s * p && && uqMs == uq * &s
&& uqMs == &uq * s
sMv == &s * &v && && uqDs == &uq / &s
sMv == s * &v && && uqDs == uq / &s
sMv == &s * v && && uqDs == &uq / s
&& sMi == &s * &i
sMt == &s * &t && && sMi == s * &i
sMt == s * &t && && sMi == &s * i
sMt == &s * t && && sDi == &s / &i
&& sDi == s / &i
tMs == &t * &s && && sDi == &s / i
tMs == t * &s && && iMs == &i * &s
tMs == &t * s && && iMs == i * &s
&& iMs == &i * s
uqMs == &uq * &s && && iDs == &i / &s
uqMs == uq * &s && && iDs == i / &s
uqMs == &uq * s && && iDs == &i / s
uqDs == &uq / &s &&
uqDs == uq / &s &&
uqDs == &uq / s &&
sMi == &s * &i &&
sMi == s * &i &&
sMi == &s * i &&
sDi == &s / &i &&
sDi == s / &i &&
sDi == &s / i &&
iMs == &i * &s &&
iMs == i * &s &&
iMs == &i * s &&
iDs == &i / &s &&
iDs == i / &s &&
iDs == &i / s
} }
); );

View File

@ -4,19 +4,17 @@
use na::{Point2, Rotation2, Unit, UnitComplex, Vector2}; use na::{Point2, Rotation2, Unit, UnitComplex, Vector2};
quickcheck!( quickcheck!(
/* /*
* *
* From/to rotation matrix. * From/to rotation matrix.
* *
*/ */
fn unit_complex_rotation_conversion(c: UnitComplex<f64>) -> bool { fn unit_complex_rotation_conversion(c: UnitComplex<f64>) -> bool {
let r = c.to_rotation_matrix(); let r = c.to_rotation_matrix();
let cc = UnitComplex::from_rotation_matrix(&r); let cc = UnitComplex::from_rotation_matrix(&r);
let rr = cc.to_rotation_matrix(); let rr = cc.to_rotation_matrix();
relative_eq!(c, cc, epsilon = 1.0e-7) && relative_eq!(c, cc, epsilon = 1.0e-7) && relative_eq!(r, rr, epsilon = 1.0e-7)
relative_eq!(r, rr, epsilon = 1.0e-7)
} }
/* /*
@ -25,19 +23,18 @@ quickcheck!(
* *
*/ */
fn unit_complex_transformation(c: UnitComplex<f64>, v: Vector2<f64>, p: Point2<f64>) -> bool { fn unit_complex_transformation(c: UnitComplex<f64>, v: Vector2<f64>, p: Point2<f64>) -> bool {
let r = c.to_rotation_matrix(); let r = c.to_rotation_matrix();
let rv = r * v; let rv = r * v;
let rp = r * p; let rp = r * p;
relative_eq!( c * v, rv, epsilon = 1.0e-7) && relative_eq!(c * v, rv, epsilon = 1.0e-7)
relative_eq!( c * &v, rv, epsilon = 1.0e-7) && && relative_eq!(c * &v, rv, epsilon = 1.0e-7)
relative_eq!(&c * v, rv, epsilon = 1.0e-7) && && relative_eq!(&c * v, rv, epsilon = 1.0e-7)
relative_eq!(&c * &v, rv, epsilon = 1.0e-7) && && relative_eq!(&c * &v, rv, epsilon = 1.0e-7)
&& relative_eq!(c * p, rp, epsilon = 1.0e-7)
relative_eq!( c * p, rp, epsilon = 1.0e-7) && && relative_eq!(c * &p, rp, epsilon = 1.0e-7)
relative_eq!( c * &p, rp, epsilon = 1.0e-7) && && relative_eq!(&c * p, rp, epsilon = 1.0e-7)
relative_eq!(&c * p, rp, epsilon = 1.0e-7) && && relative_eq!(&c * &p, rp, epsilon = 1.0e-7)
relative_eq!(&c * &p, rp, epsilon = 1.0e-7)
} }
/* /*
@ -47,15 +44,14 @@ quickcheck!(
*/ */
fn unit_complex_inv(c: UnitComplex<f64>) -> bool { fn unit_complex_inv(c: UnitComplex<f64>) -> bool {
let iq = c.inverse(); let iq = c.inverse();
relative_eq!(&iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(&iq * &c, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!( iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) && && relative_eq!(iq * &c, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!(&iq * c, UnitComplex::identity(), epsilon = 1.0e-7) && && relative_eq!(&iq * c, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!( iq * c, UnitComplex::identity(), epsilon = 1.0e-7) && && relative_eq!(iq * c, UnitComplex::identity(), epsilon = 1.0e-7)
&& relative_eq!(&c * &iq, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!(&c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) && && relative_eq!(c * &iq, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!( c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) && && relative_eq!(&c * iq, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!(&c * iq, UnitComplex::identity(), epsilon = 1.0e-7) && && relative_eq!(c * iq, UnitComplex::identity(), epsilon = 1.0e-7)
relative_eq!( c * iq, UnitComplex::identity(), epsilon = 1.0e-7)
} }
/* /*
@ -66,25 +62,30 @@ quickcheck!(
fn unit_complex_mul_vector(c: UnitComplex<f64>, v: Vector2<f64>, p: Point2<f64>) -> bool { fn unit_complex_mul_vector(c: UnitComplex<f64>, v: Vector2<f64>, p: Point2<f64>) -> bool {
let r = c.to_rotation_matrix(); let r = c.to_rotation_matrix();
relative_eq!(c * v, r * v, epsilon = 1.0e-7) && relative_eq!(c * v, r * v, epsilon = 1.0e-7) && relative_eq!(c * p, r * p, epsilon = 1.0e-7)
relative_eq!(c * p, r * p, epsilon = 1.0e-7)
} }
// Test that all operators (incl. all combinations of references) work. // Test that all operators (incl. all combinations of references) work.
// See the top comment on `geometry/quaternion_ops.rs` for details on which operations are // See the top comment on `geometry/quaternion_ops.rs` for details on which operations are
// supported. // supported.
fn all_op_exist(uc: UnitComplex<f64>, v: Vector2<f64>, p: Point2<f64>, r: Rotation2<f64>) -> bool { fn all_op_exist(
uc: UnitComplex<f64>,
v: Vector2<f64>,
p: Point2<f64>,
r: Rotation2<f64>,
) -> bool
{
let uv = Unit::new_normalize(v); let uv = Unit::new_normalize(v);
let ucMuc = uc * uc; let ucMuc = uc * uc;
let ucMr = uc * r; let ucMr = uc * r;
let rMuc = r * uc; let rMuc = r * uc;
let ucDuc = uc / uc; let ucDuc = uc / uc;
let ucDr = uc / r; let ucDr = uc / r;
let rDuc = r / uc; let rDuc = r / uc;
let ucMp = uc * p; let ucMp = uc * p;
let ucMv = uc * v; let ucMv = uc * v;
let ucMuv = uc * uv; let ucMuv = uc * uv;
let mut ucMuc1 = uc; let mut ucMuc1 = uc;
@ -111,52 +112,40 @@ quickcheck!(
ucDr1 /= r; ucDr1 /= r;
ucDr2 /= &r; ucDr2 /= &r;
ucMuc1 == ucMuc && ucMuc1 == ucMuc
ucMuc1 == ucMuc2 && && ucMuc1 == ucMuc2
&& ucMr1 == ucMr
ucMr1 == ucMr && && ucMr1 == ucMr2
ucMr1 == ucMr2 && && ucDuc1 == ucDuc
&& ucDuc1 == ucDuc2
ucDuc1 == ucDuc && && ucDr1 == ucDr
ucDuc1 == ucDuc2 && && ucDr1 == ucDr2
&& ucMuc == &uc * &uc
ucDr1 == ucDr && && ucMuc == uc * &uc
ucDr1 == ucDr2 && && ucMuc == &uc * uc
&& ucMr == &uc * &r
ucMuc == &uc * &uc && && ucMr == uc * &r
ucMuc == uc * &uc && && ucMr == &uc * r
ucMuc == &uc * uc && && rMuc == &r * &uc
&& rMuc == r * &uc
ucMr == &uc * &r && && rMuc == &r * uc
ucMr == uc * &r && && ucDuc == &uc / &uc
ucMr == &uc * r && && ucDuc == uc / &uc
&& ucDuc == &uc / uc
rMuc == &r * &uc && && ucDr == &uc / &r
rMuc == r * &uc && && ucDr == uc / &r
rMuc == &r * uc && && ucDr == &uc / r
&& rDuc == &r / &uc
ucDuc == &uc / &uc && && rDuc == r / &uc
ucDuc == uc / &uc && && rDuc == &r / uc
ucDuc == &uc / uc && && ucMp == &uc * &p
&& ucMp == uc * &p
ucDr == &uc / &r && && ucMp == &uc * p
ucDr == uc / &r && && ucMv == &uc * &v
ucDr == &uc / r && && ucMv == uc * &v
&& ucMv == &uc * v
rDuc == &r / &uc && && ucMuv == &uc * &uv
rDuc == r / &uc && && ucMuv == uc * &uv
rDuc == &r / uc && && ucMuv == &uc * uv
ucMp == &uc * &p &&
ucMp == uc * &p &&
ucMp == &uc * p &&
ucMv == &uc * &v &&
ucMv == uc * &v &&
ucMv == &uc * v &&
ucMuv == &uc * &uv &&
ucMuv == uc * &uv &&
ucMuv == &uc * uv
} }
); );

View File

@ -16,3 +16,5 @@ extern crate serde_json;
mod core; mod core;
mod geometry; mod geometry;
mod linalg; mod linalg;
#[cfg(feature = "sparse")]
mod sparse;

View File

@ -104,3 +104,89 @@ fn symmetric_eigen_singular_24x24() {
epsilon = 1.0e-5 epsilon = 1.0e-5
)); ));
} }
// #[cfg(feature = "arbitrary")]
// quickcheck! {
// FIXME: full eigendecomposition is not implemented yet because of its complexity when some
// eigenvalues have multiplicity > 1.
//
// /*
// * NOTE: for the following tests, we use only upper-triangular matrices.
// * Thes ensures the schur decomposition will work, and allows use to test the eigenvector
// * computation.
// */
// fn eigen(n: usize) -> bool {
// let n = cmp::max(1, cmp::min(n, 10));
// let m = DMatrix::<f64>::new_random(n, n).upper_triangle();
//
// let eig = RealEigen::new(m.clone()).unwrap();
// verify_eigenvectors(m, eig)
// }
//
// fn eigen_with_adjascent_duplicate_diagonals(n: usize) -> bool {
// let n = cmp::max(1, cmp::min(n, 10));
// let mut m = DMatrix::<f64>::new_random(n, n).upper_triangle();
//
// // Suplicate some adjascent diagonal elements.
// for i in 0 .. n / 2 {
// m[(i * 2 + 1, i * 2 + 1)] = m[(i * 2, i * 2)];
// }
//
// let eig = RealEigen::new(m.clone()).unwrap();
// verify_eigenvectors(m, eig)
// }
//
// fn eigen_with_nonadjascent_duplicate_diagonals(n: usize) -> bool {
// let n = cmp::max(3, cmp::min(n, 10));
// let mut m = DMatrix::<f64>::new_random(n, n).upper_triangle();
//
// // Suplicate some diagonal elements.
// for i in n / 2 .. n {
// m[(i, i)] = m[(i - n / 2, i - n / 2)];
// }
//
// let eig = RealEigen::new(m.clone()).unwrap();
// verify_eigenvectors(m, eig)
// }
//
// fn eigen_static_square_4x4(m: Matrix4<f64>) -> bool {
// let m = m.upper_triangle();
// let eig = RealEigen::new(m.clone()).unwrap();
// verify_eigenvectors(m, eig)
// }
//
// fn eigen_static_square_3x3(m: Matrix3<f64>) -> bool {
// let m = m.upper_triangle();
// let eig = RealEigen::new(m.clone()).unwrap();
// verify_eigenvectors(m, eig)
// }
//
// fn eigen_static_square_2x2(m: Matrix2<f64>) -> bool {
// let m = m.upper_triangle();
// println!("{}", m);
// let eig = RealEigen::new(m.clone()).unwrap();
// verify_eigenvectors(m, eig)
// }
// }
//
// fn verify_eigenvectors<D: Dim>(m: MatrixN<f64, D>, mut eig: RealEigen<f64, D>) -> bool
// where DefaultAllocator: Allocator<f64, D, D> +
// Allocator<f64, D> +
// Allocator<usize, D, D> +
// Allocator<usize, D>,
// MatrixN<f64, D>: Display,
// VectorN<f64, D>: Display {
// let mv = &m * &eig.eigenvectors;
//
// println!("eigenvalues: {}eigenvectors: {}", eig.eigenvalues, eig.eigenvectors);
//
// let dim = m.nrows();
// for i in 0 .. dim {
// let mut col = eig.eigenvectors.column_mut(i);
// col *= eig.eigenvalues[i];
// }
//
// println!("{}{:.5}{:.5}", m, mv, eig.eigenvectors);
//
// relative_eq!(eig.eigenvectors, mv, epsilon = 1.0e-5)
// }

View File

@ -3,8 +3,10 @@ use na::{DMatrix, Matrix6};
#[cfg(feature = "arbitrary")] #[cfg(feature = "arbitrary")]
mod quickcheck_tests { mod quickcheck_tests {
use na::{
DMatrix, DVector, Matrix2, Matrix2x5, Matrix3, Matrix3x5, Matrix4, Matrix5x2, Matrix5x3,
};
use std::cmp; use std::cmp;
use na::{DMatrix, Matrix2, Matrix3, Matrix4, Matrix5x2, Matrix5x3, Matrix2x5, Matrix3x5, DVector};
quickcheck! { quickcheck! {
fn svd(m: DMatrix<f64>) -> bool { fn svd(m: DMatrix<f64>) -> bool {
@ -258,7 +260,6 @@ fn svd_singular_horizontal() {
assert!(relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5)); assert!(relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5));
} }
#[test] #[test]
fn svd_zeros() { fn svd_zeros() {
let m = DMatrix::from_element(10, 10, 0.0); let m = DMatrix::from_element(10, 10, 0.0);

View File

@ -0,0 +1,75 @@
#![cfg_attr(rustfmt, rustfmt_skip)]
use na::{CsMatrix, CsVector, CsCholesky, Cholesky, Matrix5, Vector5};
#[test]
fn cs_cholesky() {
let mut a = Matrix5::new(
40.0, 0.0, 0.0, 0.0, 0.0,
2.0, 60.0, 0.0, 0.0, 0.0,
1.0, 0.0, 11.0, 0.0, 0.0,
0.0, 0.0, 0.0, 50.0, 0.0,
1.0, 0.0, 0.0, 4.0, 10.0
);
a.fill_upper_triangle_with_lower_triangle();
test_cholesky(a);
let a = Matrix5::from_diagonal(&Vector5::new(40.0, 60.0, 11.0, 50.0, 10.0));
test_cholesky(a);
let mut a = Matrix5::new(
40.0, 0.0, 0.0, 0.0, 0.0,
2.0, 60.0, 0.0, 0.0, 0.0,
1.0, 0.0, 11.0, 0.0, 0.0,
1.0, 0.0, 0.0, 50.0, 0.0,
0.0, 0.0, 0.0, 4.0, 10.0
);
a.fill_upper_triangle_with_lower_triangle();
test_cholesky(a);
let mut a = Matrix5::new(
2.0, 0.0, 0.0, 0.0, 0.0,
0.0, 2.0, 0.0, 0.0, 0.0,
1.0, 1.0, 2.0, 0.0, 0.0,
0.0, 0.0, 0.0, 2.0, 0.0,
1.0, 1.0, 0.0, 0.0, 2.0
);
a.fill_upper_triangle_with_lower_triangle();
// Test ::new, left_looking, and up_looking implementations.
test_cholesky(a);
}
fn test_cholesky(a: Matrix5<f32>) {
// Test ::new
test_cholesky_variant(a, 0);
// Test up-looking
test_cholesky_variant(a, 1);
// Test left-looking
test_cholesky_variant(a, 2);
}
fn test_cholesky_variant(a: Matrix5<f32>, option: usize) {
let cs_a: CsMatrix<_, _, _> = a.into();
let chol_a = Cholesky::new(a).unwrap();
let mut chol_cs_a;
match option {
0 => chol_cs_a = CsCholesky::new(&cs_a),
1 => {
chol_cs_a = CsCholesky::new_symbolic(&cs_a);
chol_cs_a.decompose_up_looking(cs_a.data.values());
}
_ => {
chol_cs_a = CsCholesky::new_symbolic(&cs_a);
chol_cs_a.decompose_left_looking(cs_a.data.values());
}
};
let l = chol_a.l();
let cs_l = chol_cs_a.unwrap_l().unwrap();
assert!(cs_l.is_sorted());
let cs_l_mat: Matrix5<_> = cs_l.into();
assert_relative_eq!(l, cs_l_mat);
}

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,89 @@
use na::{CsMatrix, DMatrix, Matrix4x5};
#[test]
fn cs_from_to_matrix() {
#[cfg_attr(rustfmt, rustfmt_skip)]
let m = Matrix4x5::new(
5.0, 6.0, 0.0, 8.0, 15.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, 0.0, 13.0, 0.0, 0.0,
0.0, 1.0, 4.0, 0.0, 14.0,
);
let cs: CsMatrix<_, _, _> = m.into();
assert!(cs.is_sorted());
let m2: Matrix4x5<_> = cs.into();
assert_eq!(m2, m);
}
#[test]
fn cs_matrix_from_triplet() {
let mut irows = vec![0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3, 3];
let mut icols = vec![0, 1, 3, 4, 0, 1, 2, 3, 2, 1, 2, 4];
let mut vals = vec![
5.0, 6.0, 8.0, 15.0, 9.0, 10.0, 11.0, 12.0, 13.0, 1.0, 4.0, 14.0,
];
#[cfg_attr(rustfmt, rustfmt_skip)]
let expected = DMatrix::from_row_slice(4, 5, &[
5.0, 6.0, 0.0, 8.0, 15.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, 0.0, 13.0, 0.0, 0.0,
0.0, 1.0, 4.0, 0.0, 14.0,
]);
let cs_expected = CsMatrix::from_parts(
4,
5,
vec![0, 2, 5, 8, 10],
vec![0, 1, 0, 1, 3, 1, 2, 3, 0, 1, 0, 3],
vec![
5.0, 9.0, 6.0, 10.0, 1.0, 11.0, 13.0, 4.0, 8.0, 12.0, 15.0, 14.0,
],
);
let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals);
println!("Mat from triplet: {:?}", cs_mat);
assert!(cs_mat.is_sorted());
assert_eq!(cs_mat, cs_expected);
let m: DMatrix<_> = cs_mat.into();
assert_eq!(m, expected);
/*
* Try again with some permutations.
*/
let permutations = [(2, 5), (0, 4), (8, 10), (1, 11)];
for (i, j) in &permutations {
irows.swap(*i, *j);
icols.swap(*i, *j);
vals.swap(*i, *j);
}
let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals);
println!("Mat from triplet: {:?}", cs_mat);
assert!(cs_mat.is_sorted());
assert_eq!(cs_mat, cs_expected);
let m: DMatrix<_> = cs_mat.into();
assert_eq!(m, expected);
/*
* Try again, duplicating all entries.
*/
let mut ir = irows.clone();
let mut ic = icols.clone();
let mut va = vals.clone();
irows.append(&mut ir);
icols.append(&mut ic);
vals.append(&mut va);
let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals);
println!("Mat from triplet: {:?}", cs_mat);
assert!(cs_mat.is_sorted());
assert_eq!(cs_mat, cs_expected * 2.0);
let m: DMatrix<_> = cs_mat.into();
assert_eq!(m, expected * 2.0);
}

22
tests/sparse/cs_matrix.rs Normal file
View File

@ -0,0 +1,22 @@
#![cfg_attr(rustfmt, rustfmt_skip)]
use na::{Matrix4x5, Matrix5x4, CsMatrix};
#[test]
fn cs_transpose() {
let m = Matrix4x5::new(
4.0, 1.0, 4.0, 0.0, 9.0,
5.0, 6.0, 0.0, 8.0, 10.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, 0.0, 1.0, 0.0, 10.0
);
let cs: CsMatrix<_, _, _> = m.into();
assert!(cs.is_sorted());
let cs_transposed = cs.transpose();
assert!(cs_transposed.is_sorted());
let cs_transposed_mat: Matrix5x4<_> = cs_transposed.into();
assert_eq!(cs_transposed_mat, m.transpose())
}

View File

@ -0,0 +1,55 @@
#![cfg_attr(rustfmt, rustfmt_skip)]
use na::io;
use na::DMatrix;
#[test]
fn cs_matrix_market() {
let file_str = r#"
%%MatrixMarket matrix coordinate real general
%=================================================================================
%
% This ASCII file represents a sparse MxN matrix with L
% nonzeros in the following Matrix Market format:
%
% +----------------------------------------------+
% |%%MatrixMarket matrix coordinate real general | <--- header line
% |% | <--+
% |% comments | |-- 0 or more comment lines
% |% | <--+
% | M N L | <--- rows, columns, entries
% | I1 J1 A(I1, J1) | <--+
% | I2 J2 A(I2, J2) | |
% | I3 J3 A(I3, J3) | |-- L lines
% | . . . | |
% | IL JL A(IL, JL) | <--+
% +----------------------------------------------+
%
% Indices are 1-based, i.e. A(1,1) is the first element.
%
%=================================================================================
5 5 8
1 1 1.000e+00
2 2 1.050e+01
3 3 1.500e-02
1 4 6.000e+00
4 2 2.505e+02
4 4 -2.800e+02
4 5 3.332e+01
5 5 1.200e+01
"#;
let cs_mat = io::cs_matrix_from_matrix_market_str(file_str).unwrap();
println!("CS mat: {:?}", cs_mat);
let mat: DMatrix<_> = cs_mat.into();
let expected = DMatrix::from_row_slice(5, 5, &[
1.0, 0.0, 0.0, 6.0, 0.0,
0.0, 10.5, 0.0, 0.0, 0.0,
0.0, 0.0, 0.015, 0.0, 0.0,
0.0, 250.5, 0.0, -280.0, 33.32,
0.0, 0.0, 0.0, 0.0, 12.0,
]);
assert_eq!(mat, expected);
}

72
tests/sparse/cs_ops.rs Normal file
View File

@ -0,0 +1,72 @@
#![cfg_attr(rustfmt, rustfmt_skip)]
use na::{Matrix3x4, Matrix4x5, Matrix3x5, CsMatrix, Vector5, CsVector};
#[test]
fn axpy_cs() {
let mut v1 = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0);
let v2 = Vector5::new(10.0, 0.0, 30.0, 0.0, 50.0);
let expected = 5.0 * v2 + 10.0 * v1;
let cs: CsVector<_, _> = v2.into();
v1.axpy_cs(5.0, &cs, 10.0);
assert!(cs.is_sorted());
assert_eq!(v1, expected)
}
#[test]
fn cs_mat_mul() {
let m1 = Matrix3x4::new(
0.0, 1.0, 4.0, 0.0,
5.0, 6.0, 0.0, 8.0,
9.0, 10.0, 11.0, 12.0,
);
let m2 = Matrix4x5::new(
5.0, 6.0, 0.0, 8.0, 15.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, 0.0, 13.0, 0.0, 0.0,
0.0, 1.0, 4.0, 0.0, 14.0,
);
let sm1: CsMatrix<_, _, _> = m1.into();
let sm2: CsMatrix<_, _, _> = m2.into();
let mul = &sm1 * &sm2;
assert!(sm1.is_sorted());
assert!(sm2.is_sorted());
assert!(mul.is_sorted());
assert_eq!(Matrix3x5::from(mul), m1 * m2);
}
#[test]
fn cs_mat_add() {
let m1 = Matrix4x5::new(
4.0, 1.0, 4.0, 0.0, 0.0,
5.0, 6.0, 0.0, 8.0, 0.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, 0.0, 1.0, 0.0, 10.0
);
let m2 = Matrix4x5::new(
0.0, 1.0, 4.0, 0.0, 14.0,
5.0, 6.0, 0.0, 8.0, 15.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, 0.0, 13.0, 0.0, 0.0,
);
let sm1: CsMatrix<_, _, _> = m1.into();
let sm2: CsMatrix<_, _, _> = m2.into();
let sum = &sm1 + &sm2;
assert!(sm1.is_sorted());
assert!(sm2.is_sorted());
assert!(sum.is_sorted());
assert_eq!(Matrix4x5::from(sum), m1 + m2);
}

106
tests/sparse/cs_solve.rs Normal file
View File

@ -0,0 +1,106 @@
#![cfg_attr(rustfmt, rustfmt_skip)]
use na::{CsMatrix, CsVector, Matrix5, Vector5};
#[test]
fn cs_lower_triangular_solve() {
let a = Matrix5::new(
4.0, 1.0, 4.0, 0.0, 9.0,
5.0, 6.0, 0.0, 8.0, 10.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, -8.0, 3.0, 5.0, 9.0,
0.0, 0.0, 1.0, 0.0, -10.0
);
let b = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0);
let cs_a: CsMatrix<_, _, _> = a.into();
assert_eq!(cs_a.solve_lower_triangular(&b), a.solve_lower_triangular(&b));
}
#[test]
fn cs_tr_lower_triangular_solve() {
let a = Matrix5::new(
4.0, 1.0, 4.0, 0.0, 9.0,
5.0, 6.0, 0.0, 8.0, 10.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, -8.0, 3.0, 5.0, 9.0,
0.0, 0.0, 1.0, 0.0, -10.0
);
let b = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0);
let cs_a: CsMatrix<_, _, _> = a.into();
assert!(cs_a.tr_solve_lower_triangular(&b).is_some());
assert_eq!(cs_a.tr_solve_lower_triangular(&b), a.tr_solve_lower_triangular(&b));
// Singular case.
let a = Matrix5::new(
4.0, 1.0, 4.0, 0.0, 9.0,
5.0, 6.0, 0.0, 8.0, 10.0,
9.0, 10.0, 0.0, 12.0, 0.0,
0.0, -8.0, 3.0, 5.0, 9.0,
0.0, 0.0, 1.0, 0.0, -10.0
);
let cs_a: CsMatrix<_, _, _> = a.into();
assert!(cs_a.tr_solve_lower_triangular(&b).is_none());
}
#[test]
fn cs_lower_triangular_solve_cs() {
let a = Matrix5::new(
4.0, 1.0, 4.0, 0.0, 9.0,
5.0, 6.0, 0.0, 8.0, 10.0,
9.0, 10.0, 11.0, 12.0, 0.0,
0.0, -8.0, 3.0, 5.0, 9.0,
0.0, 0.0, 1.0, 0.0, -10.0
);
let b1 = Vector5::zeros();
let b2 = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0);
let b3 = Vector5::new(1.0, 0.0, 0.0, 4.0, 0.0);
let b4 = Vector5::new(0.0, 1.0, 0.0, 4.0, 5.0);
let b5 = Vector5::x();
let b6 = Vector5::y();
let b7 = Vector5::z();
let b8 = Vector5::w();
let b9 = Vector5::a();
let cs_a: CsMatrix<_, _, _> = a.into();
let cs_b1: CsVector<_, _> = Vector5::zeros().into();
let cs_b2: CsVector<_, _> = Vector5::new(1.0, 2.0, 3.0, 4.0, 5.0).into();
let cs_b3: CsVector<_, _> = Vector5::new(1.0, 0.0, 0.0, 4.0, 0.0).into();
let cs_b4: CsVector<_, _> = Vector5::new(0.0, 1.0, 0.0, 4.0, 5.0).into();
let cs_b5: CsVector<_, _> = Vector5::x().into();
let cs_b6: CsVector<_, _> = Vector5::y().into();
let cs_b7: CsVector<_, _> = Vector5::z().into();
let cs_b8: CsVector<_, _> = Vector5::w().into();
let cs_b9: CsVector<_, _> = Vector5::a().into();
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b1).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b1));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b5).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b5));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b6).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b6));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b7).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b7));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b8).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b8));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b9).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b9));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b2).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b2));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b3).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b3));
assert_eq!(cs_a.solve_lower_triangular_cs(&cs_b4).map(|v| { assert!(v.is_sorted()); v.into() }), a.solve_lower_triangular(&b4));
// Singular case.
let a = Matrix5::new(
4.0, 1.0, 4.0, 0.0, 9.0,
5.0, 0.0, 0.0, 8.0, 10.0,
9.0, 10.0, 0.0, 12.0, 0.0,
0.0, -8.0, 3.0, 5.0, 9.0,
0.0, 0.0, 1.0, 0.0, -10.0
);
let cs_a: CsMatrix<_, _, _> = a.into();
assert!(cs_a.solve_lower_triangular_cs(&cs_b2).is_none());
assert!(cs_a.solve_lower_triangular_cs(&cs_b3).is_none());
assert!(cs_a.solve_lower_triangular_cs(&cs_b4).is_none());
}

8
tests/sparse/mod.rs Normal file
View File

@ -0,0 +1,8 @@
mod cs_cholesky;
mod cs_construction;
mod cs_conversion;
mod cs_matrix;
#[cfg(feature = "io")]
mod cs_matrix_market;
mod cs_ops;
mod cs_solve;