Implement SparseLU factorization

Add `solve_upper_triangular` to `CsrMatrix`

This allows a sparse matrix to be used for efficient solving with a dense LU decomposition.

Add CscBuilder

For partial construction of Csc matrices

Start working on actual LU factorization

Complete basic version of sparse LU factorization

Reformat to compile in old version

Add LU tests

Add upper triangular solve

Complete tests of Sparse LU factorization
This commit is contained in:
julianknodt 2023-08-23 00:25:05 -07:00
parent 0b89950fca
commit 951ec4b190
15 changed files with 1122 additions and 8 deletions

View File

@ -4,7 +4,7 @@ use num_traits::One;
use nalgebra::Scalar; use nalgebra::Scalar;
use crate::pattern::SparsityPattern; use crate::pattern::{BuilderInsertError, SparsityPattern, SparsityPatternBuilder};
use crate::utils::{apply_permutation, compute_sort_permutation}; use crate::utils::{apply_permutation, compute_sort_permutation};
use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind}; use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind};
@ -700,3 +700,56 @@ where
Ok(()) Ok(())
} }
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CsBuilder<T> {
sparsity_builder: SparsityPatternBuilder,
values: Vec<T>,
}
impl<T> CsBuilder<T> {
/// Constructs a new CsBuilder of the given size.
pub fn new(major_dim: usize, minor_dim: usize) -> Self {
Self {
sparsity_builder: SparsityPatternBuilder::new(major_dim, minor_dim),
values: vec![],
}
}
/// Given an existing CsMatrix, allows for modification by converting it into a builder.
pub fn from_mat(mat: CsMatrix<T>) -> Self {
let CsMatrix {
sparsity_pattern,
values,
} = mat;
CsBuilder {
sparsity_builder: SparsityPatternBuilder::from(sparsity_pattern),
values,
}
}
/// Backtracks to a given major index
pub fn revert_to_major(&mut self, maj: usize) -> bool {
if !self.sparsity_builder.revert_to_major(maj) {
return false;
}
self.values.truncate(self.sparsity_builder.num_entries());
true
}
pub fn insert(&mut self, maj: usize, min: usize, val: T) -> Result<(), BuilderInsertError> {
self.sparsity_builder.insert(maj, min)?;
self.values.push(val);
Ok(())
}
pub fn build(self) -> CsMatrix<T> {
let CsBuilder {
sparsity_builder,
values,
} = self;
let sparsity_pattern = sparsity_builder.build();
CsMatrix {
sparsity_pattern,
values,
}
}
}

View File

@ -7,12 +7,14 @@
mod csc_serde; mod csc_serde;
use crate::cs; use crate::cs;
use crate::cs::{CsLane, CsLaneIter, CsLaneIterMut, CsLaneMut, CsMatrix}; use crate::cs::{CsBuilder, CsLane, CsLaneIter, CsLaneIterMut, CsLaneMut, CsMatrix};
use crate::csr::CsrMatrix; use crate::csr::CsrMatrix;
use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter}; use crate::pattern::{
BuilderInsertError, SparsityPattern, SparsityPatternFormatError, SparsityPatternIter,
};
use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind}; use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind};
use nalgebra::Scalar; use nalgebra::{RealField, Scalar};
use num_traits::One; use num_traits::One;
use std::slice::{Iter, IterMut}; use std::slice::{Iter, IterMut};
@ -553,6 +555,269 @@ impl<T> CscMatrix<T> {
self.filter(|i, j, _| i >= j) self.filter(|i, j, _| i >= j)
} }
/// Solves a lower triangular system, `self` is a matrix of NxN, and `b` is a column vector of size N
/// Assuming that b is dense.
pub fn dense_lower_triangular_solve(&self, b: &[T], out: &mut [T], unit_diagonal: bool)
where
T: RealField + Copy,
{
assert_eq!(self.nrows(), self.ncols());
assert_eq!(self.ncols(), b.len());
assert_eq!(out.len(), b.len());
out.copy_from_slice(b);
let n = b.len();
for i in 0..n {
let col = self.col(i);
let mut iter = col.row_indices().iter().zip(col.values().iter()).peekable();
while iter.next_if(|n| *n.0 < i).is_some() {}
if let Some(n) = iter.peek() {
if *n.0 == i && !unit_diagonal {
assert!(*n.0 <= i);
out[i] /= *n.1;
iter.next();
}
}
let mul = out[i];
for (&ri, &v) in col.row_indices().iter().zip(col.values().iter()) {
use std::cmp::Ordering::*;
// ensure that only using the lower part
match ri.cmp(&i) {
Greater => out[ri] -= v * mul,
Equal | Less => {}
}
}
}
}
/// Solves an upper triangular system, `self` is a matrix of NxN, and `b` is a column vector of size N
/// Assuming that b is dense.
pub fn dense_upper_triangular_solve(&self, b: &[T], out: &mut [T])
where
T: RealField + Copy,
{
assert_eq!(self.nrows(), self.ncols());
assert_eq!(self.ncols(), b.len());
assert_eq!(out.len(), b.len());
out.copy_from_slice(b);
let n = b.len();
for i in (0..n).rev() {
let col = self.col(i);
let mut iter = col
.row_indices()
.iter()
.zip(col.values().iter())
.rev()
.peekable();
while iter.next_if(|n| *n.0 > i).is_some() {}
if let Some(n) = iter.peek() {
if *n.0 == i {
out[i] /= *n.1;
iter.next();
}
}
// introduce a NaN, intentionally, if the diagonal doesn't have a value.
let mul = out[i];
for (&row, &v) in iter {
use std::cmp::Ordering::*;
match row.cmp(&i) {
Less => out[row] -= v * mul,
Equal | Greater => {}
}
}
}
}
/// Solves a sparse lower triangular system `Ax = b`, with both the matrix and vector
/// sparse.
/// sparsity_idxs should be precomputed using the sparse_lower_triangle.
pub fn sparse_upper_triangular_solve_sorted(
&self,
b_idxs: &[usize],
b: &[T],
out_sparsity_pattern: &[usize],
out: &mut [T],
) where
T: RealField + Copy,
{
assert_eq!(self.nrows(), self.ncols());
assert_eq!(b_idxs.len(), b.len());
assert!(b_idxs.iter().all(|&bi| bi < self.ncols()));
assert_eq!(out_sparsity_pattern.len(), out.len());
assert!(out_sparsity_pattern.iter().all(|&bi| bi < self.ncols()));
// initialize out with b
out.fill(T::zero());
for (&bv, &bi) in b.iter().zip(b_idxs.iter()) {
let out_pos = out_sparsity_pattern.iter().position(|&p| p == bi).unwrap();
out[out_pos] = bv;
}
for (i, &row) in out_sparsity_pattern.iter().enumerate().rev() {
let col = self.col(row);
let mut iter = col
.row_indices()
.iter()
.zip(col.values().iter())
.rev()
.peekable();
while iter.next_if(|n| *n.0 > row).is_some() {}
match iter.peek() {
Some((&r, &l_val)) if r == row => out[i] /= l_val,
// here it now becomes implicitly 0,
// likely this should introduce NaN or some other behavior.
_ => {}
}
let mul = out[i];
for (ni, &nrow) in out_sparsity_pattern[i + 1..].iter().enumerate().rev() {
assert!(nrow > row);
while iter.next_if(|n| *n.0 > nrow).is_some() {}
let l_val = match iter.peek() {
Some((&r, &l_val)) if r == nrow => l_val,
_ => continue,
};
out[ni] -= l_val * mul;
}
}
}
/// Solves a sparse lower triangular system `Ax = b`, with both the matrix and vector
/// sparse.
/// sparsity_idxs should be precomputed using the sparse_lower_triangle.
/// Assumes that the diagonal of the sparse matrix is all 1 if `assume_unit` is true.
pub fn sparse_lower_triangular_solve(
&self,
b_idxs: &[usize],
b: &[T],
// idx -> row
// for now, is permitted to be unsorted
// TODO maybe would be better to enforce sorted, but would have to sort internally.
out_sparsity_pattern: &[usize],
out: &mut [T],
assume_unit: bool,
) where
T: RealField + Copy,
{
assert_eq!(self.nrows(), self.ncols());
assert_eq!(b.len(), b_idxs.len());
assert!(b_idxs.iter().all(|&bi| bi < self.ncols()));
assert_eq!(out_sparsity_pattern.len(), out.len());
assert!(out_sparsity_pattern.iter().all(|&i| i < self.ncols()));
let is_sorted = (0..out_sparsity_pattern.len() - 1)
.all(|i| out_sparsity_pattern[i] < out_sparsity_pattern[i + 1]);
if is_sorted {
return self.sparse_lower_triangular_solve_sorted(
b_idxs,
b,
out_sparsity_pattern,
out,
assume_unit,
);
}
// initialize out with b
out.fill(T::zero());
for (&bv, &bi) in b.iter().zip(b_idxs.iter()) {
let out_pos = out_sparsity_pattern.iter().position(|&p| p == bi).unwrap();
out[out_pos] = bv;
}
for (i, &row) in out_sparsity_pattern.iter().enumerate() {
let col = self.col(row);
if !assume_unit {
if let Some(l_val) = col.get_entry(row) {
out[i] /= l_val.into_value();
} else {
// diagonal is 0, non-invertible
out[i] /= T::zero();
}
}
let mul = out[i];
for (ni, &nrow) in out_sparsity_pattern.iter().enumerate() {
if nrow <= row {
continue;
}
// TODO in a sorted version may be able to iterate without
// having the cost of binary search at each iteration
let l_val = if let Some(l_val) = col.get_entry(nrow) {
l_val.into_value()
} else {
continue;
};
out[ni] -= l_val * mul;
}
}
}
/// Solves a sparse lower triangular system `Ax = b`, with both the matrix and vector
/// sparse.
/// sparsity_idxs should be precomputed using the sparse_lower_triangle pattern.
///
/// `out_sparsity_pattern` must also be pre-sorted.
///
/// Assumes that the diagonal of the sparse matrix is all 1 if `assume_unit` is true.
pub fn sparse_lower_triangular_solve_sorted(
&self,
// input vector idxs & values
b_idxs: &[usize],
b: &[T],
// idx -> row
// for now, is permitted to be unsorted
// TODO maybe would be better to enforce sorted, but would have to sort internally.
out_sparsity_pattern: &[usize],
out: &mut [T],
assume_unit: bool,
) where
T: RealField + Copy,
{
assert_eq!(self.nrows(), self.ncols());
assert_eq!(b.len(), b_idxs.len());
assert!(b_idxs.iter().all(|&bi| bi < self.ncols()));
assert_eq!(out_sparsity_pattern.len(), out.len());
assert!(out_sparsity_pattern.iter().all(|&i| i < self.ncols()));
// initialize out with b
// TODO can make this more efficient by keeping two iterators in sorted order
out.fill(T::zero());
for (&bv, &bi) in b.iter().zip(b_idxs.iter()) {
let out_pos = out_sparsity_pattern.iter().position(|&p| p == bi).unwrap();
out[out_pos] = bv;
}
// end init
// assuming that the output sparsity pattern is sorted
// iterate thru
for (i, &row) in out_sparsity_pattern.iter().enumerate() {
let col = self.col(row);
let mut iter = col.row_indices().iter().zip(col.values().iter()).peekable();
if !assume_unit {
while iter.next_if(|n| *n.0 < row).is_some() {}
match iter.peek() {
Some((&r, &l_val)) if r == row => out[i] /= l_val,
// here it now becomes implicitly 0,
// likely this should introduce NaN or some other behavior.
_ => {}
}
}
let mul = out[i];
for (ni, &nrow) in out_sparsity_pattern.iter().enumerate().skip(i + 1) {
assert!(nrow > row);
while iter.next_if(|n| *n.0 < nrow).is_some() {}
let l_val = match iter.peek() {
Some((&r, &l_val)) if r == nrow => l_val,
_ => continue,
};
out[ni] -= l_val * mul;
}
}
}
/// Returns the diagonal of the matrix as a sparse matrix. /// Returns the diagonal of the matrix as a sparse matrix.
#[must_use] #[must_use]
pub fn diagonal_as_csc(&self) -> Self pub fn diagonal_as_csc(&self) -> Self
@ -784,3 +1049,30 @@ where
self.lane_iter.next().map(|lane| CscColMut { lane }) self.lane_iter.next().map(|lane| CscColMut { lane })
} }
} }
/// An incremental builder for a Csc matrix.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CscBuilder<T>(CsBuilder<T>);
impl<T> CscBuilder<T> {
/// Constructs a new instance of a Csc Builder.
pub fn new(rows: usize, cols: usize) -> Self {
Self(CsBuilder::new(cols, rows))
}
/// Convert back from a matrix to a CscBuilder.
pub fn from_mat(mat: CscMatrix<T>) -> Self {
Self(CsBuilder::from_mat(mat.cs))
}
/// Backtracks back to column `col`, deleting all entries ahead of it.
pub fn revert_to_col(&mut self, col: usize) -> bool {
self.0.revert_to_major(col)
}
/// Inserts a value into the builder. Must be called in ascending col, row order.
pub fn insert(&mut self, row: usize, col: usize, val: T) -> Result<(), BuilderInsertError> {
self.0.insert(col, row, val)
}
/// Converts this builder into a valid CscMatrix.
pub fn build(self) -> CscMatrix<T> {
CscMatrix { cs: self.0.build() }
}
}

View File

@ -12,7 +12,7 @@ use crate::csc::CscMatrix;
use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter}; use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter};
use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind}; use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind};
use nalgebra::Scalar; use nalgebra::{DMatrix, DMatrixView, RealField, Scalar};
use num_traits::One; use num_traits::One;
use std::slice::{Iter, IterMut}; use std::slice::{Iter, IterMut};
@ -573,6 +573,55 @@ impl<T> CsrMatrix<T> {
{ {
CscMatrix::from(self).transpose_as_csr() CscMatrix::from(self).transpose_as_csr()
} }
/// Solves the equation `Ax = b`, treating `self` as an upper triangular matrix.
/// If `A` is not upper triangular, elements in the lower triangle below the diagonal
/// will be ignored.
///
/// If `m` has zeros along the diagonal, returns `None`.
/// Panics:
/// Panics if `A` and `b` have incompatible shapes, specifically if `b`
/// has a different number of rows and than `a`.
pub fn solve_upper_triangular<'a>(&self, b: impl Into<DMatrixView<'a, T>>) -> Option<DMatrix<T>>
where
T: RealField + Scalar,
{
// https://www.nicolasboumal.net/papers/MAT321_Lecture_notes_Boumal_2019.pdf
// page 48
let b: DMatrixView<'a, T> = b.into();
assert_eq!(b.nrows(), self.nrows());
let out_cols = b.ncols();
let out_rows = self.nrows();
let mut out = DMatrix::zeros(out_rows, out_cols);
for r in (0..out_rows).rev() {
let row = self.row(r);
// only take upper triangle elements
let mut row_iter = row
.col_indices()
.iter()
.copied()
.zip(row.values().iter())
.filter(|&(c, _)| c >= r);
let (c, div) = row_iter.next()?;
// This implies there is a 0 on the diagonal
if c != r || div.is_zero() {
return None;
}
for c in 0..out_cols {
let numer = b.index((r, c)).clone();
let numer = numer
- row_iter
.clone()
.map(|(a_col, val)| val.clone() * b.index((a_col, c)).clone())
.fold(T::zero(), |acc, n| acc + n);
*out.index_mut((r, c)) = numer / div.clone();
}
}
Some(out)
}
} }
impl<T> Default for CsrMatrix<T> { impl<T> Default for CsrMatrix<T> {

View File

@ -0,0 +1,103 @@
use crate::SparseEntryMut;
use crate::{CscBuilder, CscMatrix};
use nalgebra::{DMatrix, RealField};
/// Constructs an LU Factorization using a left-looking approach.
/// This means it will construct each column, starting from the leftmost one.
pub struct LeftLookingLUFactorization<T> {
/// A single matrix stores both the lower and upper triangular components
l_u: CscMatrix<T>,
}
impl<T: RealField + Copy> LeftLookingLUFactorization<T> {
/// Returns the upper triangular part of this matrix.
pub fn u(&self) -> CscMatrix<T> {
self.l_u.upper_triangle()
}
/// Returns the joint L\U matrix. Here, `L` implicitly has 1 along the diagonal.
pub fn lu(&self) -> &CscMatrix<T> {
&self.l_u
}
/// Returns the lower triangular part of this matrix.
pub fn l(&self) -> CscMatrix<T> {
let mut l = self.l_u.lower_triangle();
let n = self.l_u.nrows();
for i in 0..n {
if let SparseEntryMut::NonZero(v) = l.index_entry_mut(i, i) {
*v = T::one();
} else {
unreachable!();
}
}
l
}
/// Computes `x` in `LUx = b`, where `b` is a dense vector.
pub fn solve(&self, b: &[T]) -> DMatrix<T> {
let mut y = vec![T::zero(); b.len()];
// Implementation: Solve two systems: Ly = b, then Ux = y.
self.l_u.dense_lower_triangular_solve(b, &mut y, true);
let mut out = y.clone();
self.l_u.dense_upper_triangular_solve(&y, &mut out);
DMatrix::from_vec(b.len(), 1, out)
}
/// Construct a new sparse LU factorization
/// from a given CSC matrix.
pub fn new(a: &CscMatrix<T>) -> Self {
assert_eq!(a.nrows(), a.ncols());
let n = a.nrows();
// this initially starts as an identity matrix.
// but the ones are all implicit.
let mut csc_builder = CscBuilder::new(n, n);
let mut val_buf = vec![];
let mut pat_buf = vec![];
for (ci, col) in a.col_iter().enumerate() {
let curr_mat = csc_builder.build();
curr_mat
.pattern()
.sparse_lower_triangular_solve(col.row_indices(), &mut pat_buf);
pat_buf.sort_unstable();
val_buf.resize_with(pat_buf.len(), T::zero);
// Solve the current column, assuming that it is lower triangular
curr_mat.sparse_lower_triangular_solve_sorted(
col.row_indices(),
col.values(),
&pat_buf,
&mut val_buf,
true,
);
// convert builder back to matrix
csc_builder = CscBuilder::from_mat(curr_mat);
assert!(csc_builder.revert_to_col(ci));
let mut ukk = T::zero();
for (row, val) in pat_buf.drain(..).zip(val_buf.drain(..)) {
use std::cmp::Ordering;
let val = match row.cmp(&ci) {
Ordering::Less => val,
Ordering::Equal => {
ukk = val;
val
}
Ordering::Greater => {
assert_ne!(ukk, T::zero());
val / ukk
}
};
assert_eq!(csc_builder.insert(row, ci, val), Ok(()));
}
}
let l_u = csc_builder.build();
Self { l_u }
}
}

View File

@ -2,5 +2,7 @@
//! //!
//! Currently, the only factorization provided here is the [`CscCholesky`] factorization. //! Currently, the only factorization provided here is the [`CscCholesky`] factorization.
mod cholesky; mod cholesky;
mod lu;
pub use cholesky::*; pub use cholesky::*;
pub use lu::*;

View File

@ -171,7 +171,7 @@ use std::error::Error;
use std::fmt; use std::fmt;
pub use self::coo::CooMatrix; pub use self::coo::CooMatrix;
pub use self::csc::CscMatrix; pub use self::csc::{CscBuilder, CscMatrix};
pub use self::csr::CsrMatrix; pub use self::csr::CsrMatrix;
/// Errors produced by functions that expect well-formed sparse format data. /// Errors produced by functions that expect well-formed sparse format data.
@ -279,4 +279,11 @@ impl<'a, T: Clone + Zero> SparseEntryMut<'a, T> {
SparseEntryMut::Zero => T::zero(), SparseEntryMut::Zero => T::zero(),
} }
} }
/// If the entry is nonzero, returns `Some(&mut value)`, otherwise returns `None`.
pub fn nonzero(self) -> Option<&'a mut T> {
match self {
SparseEntryMut::NonZero(v) => Some(v),
SparseEntryMut::Zero => None,
}
}
} }

View File

@ -61,6 +61,14 @@ impl SparsityPattern {
minor_dim, minor_dim,
} }
} }
/// Creates the sparsity pattern of an identity matrix of size `n`.
pub fn identity(n: usize) -> Self {
Self {
major_offsets: (0..=n).collect(),
minor_indices: (0..n).collect(),
minor_dim: n,
}
}
/// The offsets for the major dimension. /// The offsets for the major dimension.
#[inline] #[inline]
@ -109,6 +117,13 @@ impl SparsityPattern {
pub fn lane(&self, major_index: usize) -> &[usize] { pub fn lane(&self, major_index: usize) -> &[usize] {
self.get_lane(major_index).unwrap() self.get_lane(major_index).unwrap()
} }
/// Returns an iterator over all lanes in this sparsity pattern, in order.
/// Does not omit empty lanes.
#[inline]
#[must_use]
pub fn lanes(&self) -> impl Iterator<Item = &[usize]> {
(0..self.major_offsets.len() - 1).map(move |r| self.lane(r))
}
/// Get the lane at the given index, or `None` if out of bounds. /// Get the lane at the given index, or `None` if out of bounds.
#[inline] #[inline]
@ -297,6 +312,198 @@ impl SparsityPattern {
) )
.expect("Internal error: Transpose should never fail.") .expect("Internal error: Transpose should never fail.")
} }
/// Computes the output sparsity pattern of `x` in `Ax = b`.
/// where A's nonzero pattern is given by `self` and the non-zero indices
/// of vector `b` are specified as a slice.
/// The output is not necessarily in sorted order, but is topological sort order.
/// Treats `self` as lower triangular, even if there are elements in the upper triangle.
/// Acts as if b is one major lane (i.e. CSC matrix and one column)
pub fn sparse_lower_triangular_solve(&self, b: &[usize], out: &mut Vec<usize>) {
assert!(b.iter().all(|&i| i < self.major_dim()));
out.clear();
// From a given starting column, traverses and finds all reachable indices.
fn reach(sp: &SparsityPattern, j: usize, out: &mut Vec<usize>) {
// already traversed
if out.contains(&j) {
return;
}
out.push(j);
for &i in sp.lane(j) {
if i < j {
continue;
}
reach(sp, i, out);
}
}
for &i in b {
reach(&self, i, out);
}
}
/// Computes the output sparsity pattern of `x` in `Ax = b`.
/// where A's nonzero pattern is given by `self` and the non-zero indices
/// of vector `b` are specified as a slice.
/// The output is not necessarily in sorted order, but is topological sort order.
/// Treats `self` as upper triangular, even if there are elements in the lower triangle.
/// Acts as if b is one major lane (i.e. CSC matrix and one column)
pub fn sparse_upper_triangular_solve(&self, b: &[usize], out: &mut Vec<usize>) {
assert!(b.iter().all(|&i| i < self.major_dim()));
out.clear();
// From a given starting column, traverses and finds all reachable indices.
fn reach(sp: &SparsityPattern, j: usize, out: &mut Vec<usize>) {
// already traversed
if out.contains(&j) {
return;
}
out.push(j);
// iteration order here does not matter, but technically it should be rev?
for &i in sp.lane(j).iter().rev() {
if i > j {
continue;
}
reach(sp, i, out);
}
}
for &i in b {
reach(&self, i, out);
}
}
/// Left-looking Sparse LU decomposition from Gilbert/Peierls.
/// returns the sparsity pattern of the output
pub fn left_looking_lu_decomposition(&self) -> SparsityPattern {
assert_eq!(self.major_dim(), self.minor_dim());
let n = self.minor_dim();
let mut sp = SparsityPatternBuilder::new(n, n);
let mut x = vec![];
for col in 0..n {
sp.valid_partial()
.sparse_lower_triangular_solve(self.lane(col), &mut x);
x.sort_unstable();
for &row in &x {
assert!(sp.insert(col, row).is_ok());
}
}
sp.build()
}
}
/// A builder that allows for constructing a sparsity pattern.
/// It requires elements to be added in sorted order. Specifically,
/// For each element the major must be >= the previous element's major.
/// If the major is the same, the minor must be in ascending order.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SparsityPatternBuilder {
buf: SparsityPattern,
major_dim: usize,
}
/// An error when adding into the SparsityPatternBuilder
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum BuilderInsertError {
///
MajorTooLow,
///
MinorTooLow,
}
impl SparsityPatternBuilder {
/// Constructs a new empty builder.
pub fn new(major_dim: usize, minor_dim: usize) -> Self {
Self {
buf: SparsityPattern {
major_offsets: vec![0],
minor_indices: vec![],
minor_dim,
},
major_dim,
}
}
/// The number of non-zero entries inserted into `self`.
pub fn num_entries(&self) -> usize {
self.buf.minor_indices.len()
}
/// Allows for general assignment of indices
pub fn insert(&mut self, maj: usize, min: usize) -> Result<(), BuilderInsertError> {
assert!(maj < self.major_dim);
assert!(min < self.buf.minor_dim);
let curr_major = self.buf.major_dim();
// cannot go backwards in major
if maj < curr_major {
return Err(BuilderInsertError::MajorTooLow);
}
// cannot go backwards in minor
if maj == curr_major
&& *self.buf.major_offsets.last().unwrap() < self.buf.minor_indices.len()
&& !self.buf.minor_indices.is_empty()
&& min <= *self.buf.minor_indices.last().unwrap()
{
return Err(BuilderInsertError::MinorTooLow);
}
// add any advances in row.
for _ in curr_major..maj {
self.buf.major_offsets.push(self.buf.minor_indices.len());
}
self.buf.minor_indices.push(min);
Ok(())
}
/// Returns a valid partial sparsity pattern.
/// All the major lanes up to the current insertion will be completed.
pub(crate) fn valid_partial(&mut self) -> &SparsityPattern {
if *self.buf.major_offsets.last().unwrap() != self.buf.minor_indices.len() {
self.buf.major_offsets.push(self.buf.minor_indices.len());
}
&self.buf
}
/// Consumes self and outputs the constructed `SparsityPattern`.
/// If elements were added to the last major, but `advance_major`
/// was not called, will implicitly call `advance_major` then
/// output the values.
#[inline]
pub fn build(mut self) -> SparsityPattern {
self.buf
.major_offsets
.resize(self.major_dim + 1, self.buf.minor_indices.len());
assert_eq!(self.buf.major_dim(), self.major_dim);
self.buf
}
/// Returns the current major being modified by `self`.
pub fn current_major(&self) -> usize {
assert!(!self.buf.major_offsets.is_empty());
self.buf.major_offsets.len() - 1
}
/// Reverts the major index of `self` back to `maj`, deleting any entries ahead of it.
/// Preserves entries in `maj`.
pub fn revert_to_major(&mut self, maj: usize) -> bool {
// preserve maj + 1 elements in self
if self.buf.major_offsets.len() + 1 <= maj {
return false;
}
let last = self.buf.major_offsets[maj + 1];
self.buf.major_offsets.truncate(maj + 1);
self.buf.minor_indices.truncate(last + 1);
true
}
/// Allows for rebuilding part of a sparsity pattern, assuming that
/// items after maj_start have not been filled in.
pub fn from(sp: SparsityPattern) -> Self {
SparsityPatternBuilder {
major_dim: sp.major_dim(),
buf: sp,
}
}
} }
impl Default for SparsityPattern { impl Default for SparsityPattern {

View File

@ -0,0 +1,46 @@
use nalgebra_sparse::factorization::LeftLookingLUFactorization;
use nalgebra_sparse::CscBuilder;
#[test]
fn test_basic_lu_factorization() {
let n = 5;
let mut a = CscBuilder::new(n, n);
for i in 0..n {
assert!(a.insert(i, i, 1.).is_ok());
}
// construct an identity matrix as a basic test
let a = a.build();
let lu_fact = LeftLookingLUFactorization::new(&a);
assert_eq!(lu_fact.u(), a);
}
#[test]
fn test_basic_lu_factorization_with_one_more_entry() {
let n = 3;
let mut a = CscBuilder::new(n, n);
for i in 0..n {
assert!(a.insert(i, i, if i == 0 { 0.5 } else { 1. }).is_ok());
if i == 0 {
assert!(a.insert(1, 0, 1.).is_ok());
}
}
// construct an identity matrix as a basic test
let a = a.build();
let lu_fact = LeftLookingLUFactorization::new(&a);
let mut ground_truth = CscBuilder::new(n, n);
for i in 0..n {
assert!(ground_truth
.insert(i, i, if i == 0 { 0.5 } else { 1. })
.is_ok());
if i == 0 {
assert!(ground_truth.insert(1, 0, 2.).is_ok());
}
}
let gt = ground_truth.build();
assert_eq!(lu_fact.lu(), &gt);
}

View File

@ -0,0 +1,131 @@
use nalgebra_sparse::pattern::{SparsityPattern, SparsityPatternBuilder};
#[test]
fn sparsity_identity() {
let n = 100;
let speye = SparsityPattern::identity(n);
for (i, j) in speye.entries() {
assert_eq!(i, j);
}
assert_eq!(speye.major_dim(), n);
assert_eq!(speye.minor_dim(), n);
}
#[test]
fn lower_sparse_solve() {
// just a smaller number so it's easier to debug
let n = 8;
let speye = SparsityPattern::identity(n);
let mut buf = vec![];
speye.sparse_lower_triangular_solve(&[0, 5], &mut buf);
assert_eq!(buf, vec![0, 5]);
// test case from
// https://www.youtube.com/watch?v=1dGRTOwBkQs&ab_channel=TimDavis
let mut builder = SparsityPatternBuilder::new(14, 14);
// building CscMatrix, so it will be col, row
#[rustfmt::skip]
let indices = vec![
(0, 0), (0, 2),
(1, 1), (1, 3), (1, 6), (1, 8),
(2,2), (2,4), (2,7),
(3,3), (3,8),
(4,4), (4,7),
(5,5), (5,8), (5,9),
(6,6), (6,9), (6,10),
(7,7), (7,9),
(8,8), (8,11), (8,12),
(9,9), (9,10), (9, 12), (9, 13),
(10,10), (10,11), (10,12),
(11,11), (11,12),
(12,12), (12,13),
(13,13),
];
for (maj, min) in indices.iter().copied() {
assert!(builder.insert(maj, min).is_ok());
}
let sp = builder.build();
assert_eq!(sp.major_dim(), 14);
assert_eq!(sp.minor_dim(), 14);
assert_eq!(sp.nnz(), indices.len());
for ij in sp.entries() {
assert!(indices.contains(&ij));
}
sp.sparse_lower_triangular_solve(&[3, 5], &mut buf);
assert_eq!(buf, vec![3, 8, 11, 12, 13, 5, 9, 10]);
}
// this test is a flipped version of lower sparse solve
#[test]
fn upper_sparse_solve() {
// just a smaller number so it's easier to debug
let n = 8;
let speye = SparsityPattern::identity(n);
let mut buf = vec![];
speye.sparse_lower_triangular_solve(&[0, 5], &mut buf);
assert_eq!(buf, vec![0, 5]);
// test case from
// https://www.youtube.com/watch?v=1dGRTOwBkQs&ab_channel=TimDavis
let mut builder = SparsityPatternBuilder::new(14, 14);
// building CscMatrix, so it will be col, row
#[rustfmt::skip]
let mut indices = vec![
(0, 0), (0, 2),
(1, 1), (1, 3), (1, 6), (1, 8),
(2,2), (2,4), (2,7),
(3,3), (3,8),
(4,4), (4,7),
(5,5), (5,8), (5,9),
(6,6), (6,9), (6,10),
(7,7), (7,9),
(8,8), (8,11), (8,12),
(9,9), (9,10), (9, 12), (9, 13),
(10,10), (10,11), (10,12),
(11,11), (11,12),
(12,12), (12,13),
(13,13),
];
indices.sort_by_key(|&(min, maj)| (maj, min));
for (min, maj) in indices.iter().copied() {
assert!(builder.insert(maj, min).is_ok());
}
let sp = builder.build();
assert_eq!(sp.major_dim(), 14);
assert_eq!(sp.minor_dim(), 14);
assert_eq!(sp.nnz(), indices.len());
sp.sparse_upper_triangular_solve(&[9], &mut buf);
assert_eq!(buf, vec![9, 7, 4, 2, 0, 6, 1, 5]);
}
#[test]
fn test_builder() {
let mut builder = SparsityPatternBuilder::new(2, 2);
assert!(builder.insert(0, 0).is_ok());
assert!(builder.insert(0, 0).is_err());
assert!(builder.insert(0, 1).is_ok());
assert!(builder.insert(0, 1).is_err());
assert!(builder.insert(1, 0).is_ok());
assert!(builder.insert(1, 0).is_err());
}
#[test]
fn test_builder_reset() {
let mut builder = SparsityPatternBuilder::new(4, 4);
for i in 0..3 {
assert!(builder.insert(i, i + 1).is_ok());
}
let out = builder.build();
assert_eq!(out.major_dim(), 4);
assert_eq!(out.minor_dim(), 4);
let mut builder = SparsityPatternBuilder::from(out);
for i in (0..=2).rev() {
assert!(builder.revert_to_major(i));
assert_eq!(builder.current_major(), i);
}
let out = builder.build();
let mut builder = SparsityPatternBuilder::from(out);
assert!(builder.revert_to_major(1));
assert_eq!(builder.current_major(), 1);
}

View File

@ -9,7 +9,7 @@ use nalgebra::proptest::matrix;
use proptest::prelude::*; use proptest::prelude::*;
use matrixcompare::{assert_matrix_eq, prop_assert_matrix_eq}; use matrixcompare::{assert_matrix_eq, prop_assert_matrix_eq};
fn positive_definite() -> impl Strategy<Value=CscMatrix<f64>> { pub fn positive_definite() -> impl Strategy<Value=CscMatrix<f64>> {
let csc_f64 = csc(value_strategy::<f64>(), let csc_f64 = csc(value_strategy::<f64>(),
PROPTEST_MATRIX_DIM, PROPTEST_MATRIX_DIM,
PROPTEST_MATRIX_DIM, PROPTEST_MATRIX_DIM,
@ -114,4 +114,4 @@ fn test_cholesky(a: Matrix5<f64>) {
let l = DMatrix::from_iterator(l.nrows(), l.ncols(), l.iter().cloned()); let l = DMatrix::from_iterator(l.nrows(), l.ncols(), l.iter().cloned());
let cs_l_mat = DMatrix::from(&cs_l); let cs_l_mat = DMatrix::from(&cs_l);
assert_matrix_eq!(l, cs_l_mat, comp = abs, tol = 1e-12); assert_matrix_eq!(l, cs_l_mat, comp = abs, tol = 1e-12);
} }

View File

@ -733,3 +733,110 @@ proptest! {
prop_assert_eq!(DMatrix::from(&csc), DMatrix::identity(n, n)); prop_assert_eq!(DMatrix::from(&csc), DMatrix::identity(n, n));
} }
} }
#[test]
fn test_dense_lower_triangular_solve() {
let mut a = CscMatrix::identity(3);
let v = [1., 2., 3.];
let mut out = [0.; 3];
a.dense_lower_triangular_solve(&v, &mut out, true);
assert_eq!(out, v);
a.dense_lower_triangular_solve(&v, &mut out, false);
assert_eq!(out, v);
if let SparseEntryMut::NonZero(e) = a.index_entry_mut(0, 0) {
*e = 2.;
};
a.dense_lower_triangular_solve(&v, &mut out, false);
assert_eq!(out, [0.5, 2., 3.]);
a.dense_lower_triangular_solve(&v, &mut out, true);
assert_eq!(out, v);
}
#[test]
fn test_sparse_lower_triangular_solve() {
let a = CscMatrix::identity(3);
let vi = [0, 1, 2];
let v = [1., 2., 3.];
let mut out = [0.; 3];
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, true);
assert_eq!(out, v);
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, false);
assert_eq!(out, v);
let vi = [0, 999];
let v = [3., 2.];
let mut out = [0.; 2];
// test with large identity matrix
let mut a = CscMatrix::identity(1000);
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, true);
assert_eq!(out, v);
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, false);
assert_eq!(out, v);
if let SparseEntryMut::NonZero(e) = a.index_entry_mut(0, 0) {
*e = 2.;
};
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, false);
assert_eq!(out, [1.5, 2.]);
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, true);
assert_eq!(out, v);
use nalgebra_sparse::coo::CooMatrix;
let a: CscMatrix<f32> = (&CooMatrix::<f32>::try_from_triplets(
1000,
1000,
vec![0, 999, 999],
vec![0, 0, 999],
vec![2., 1., 4.],
)
.unwrap())
.into();
assert_eq!(a.col(0).nnz(), 2);
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, false);
assert_eq!(out, [1.5, 0.125]);
a.sparse_lower_triangular_solve(&vi, &v, &vi, &mut out, true);
assert_eq!(out, [3., -1.]);
}
#[test]
fn test_sparse_upper_triangular_solve() {
let a = CscMatrix::identity(3);
let vi = [0, 1, 2];
let v = [1., 2., 3.];
let mut out = [0.; 3];
a.sparse_upper_triangular_solve_sorted(&vi, &v, &vi, &mut out);
assert_eq!(out, v);
let vi = [0, 999];
let v = [3., 2.];
let mut out = [0.; 2];
// test with large identity matrix
let mut a = CscMatrix::identity(1000);
a.sparse_upper_triangular_solve_sorted(&vi, &v, &vi, &mut out);
assert_eq!(out, v);
if let SparseEntryMut::NonZero(e) = a.index_entry_mut(0, 0) {
*e = 2.;
};
a.sparse_upper_triangular_solve_sorted(&vi, &v, &vi, &mut out);
assert_eq!(out, [1.5, 2.]);
use nalgebra_sparse::coo::CooMatrix;
let a: CscMatrix<f32> = (&CooMatrix::<f32>::try_from_triplets(
5,
5,
vec![0, 0, 4],
vec![0, 4, 4],
vec![2., 1., 4.],
)
.unwrap())
.into();
assert_eq!(a.col(0).nnz(), 1);
assert_eq!(a.col(4).nnz(), 2);
a.sparse_upper_triangular_solve_sorted(&[0, 4], &v, &[0, 4], &mut out);
assert_eq!(out, [1.5, 0.5]);
}

View File

@ -518,6 +518,38 @@ fn csr_matrix_get_index_entry() {
} }
} }
#[test]
fn csr_upper_triangle_solve_ok() {
const N: usize = 10;
let eye = CsrMatrix::<f32>::identity(N);
let b = DMatrix::from_row_slice(N, 1, &[0.5; N]);
assert_eq!(eye.solve_upper_triangular(&b), Some(b));
let mut eye = CsrMatrix::<f32>::identity(N);
let v = if let SparseEntryMut::NonZero(v) = eye.index_entry_mut(0, 0) {
v
} else {
unreachable!();
};
*v = 2.0;
let b = DMatrix::from_row_slice(N, 1, &[1.0; N]);
let mut x = b.clone();
x[0] = 0.5;
assert_eq!(eye.solve_upper_triangular(&b), Some(x));
#[rustfmt::skip]
let dense = DMatrix::from_row_slice(3, 3, &[
1., 1., 1.,
0., 1., 1.,
0., 0., 1.,
]);
let csr = CsrMatrix::from(&dense);
let b = DMatrix::from_row_slice(3, 1, &[1.0; 3]);
let x = DMatrix::from_row_slice(3, 1, &[-1., 0., 1.]);
assert_eq!(csr.solve_upper_triangular(&b), Some(x));
}
#[test] #[test]
fn csr_matrix_row_iter() { fn csr_matrix_row_iter() {
#[rustfmt::skip] #[rustfmt::skip]

View File

@ -0,0 +1,7 @@
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc b8e49f3fa66a3568e1c29a47de5f3d2f67866d59b39329e2a0da1537a9c6d584 # shrinks to matrix = CscMatrix { cs: CsMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 4, 8, 11, 16, 21], minor_indices: [0, 1, 3, 4, 0, 1, 3, 4, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4], minor_dim: 5 }, values: [1.0, 0.0, 0.0, 0.0, 0.0, 8.445226477445164, 0.0, 5.072108001361104, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 5.072108001361104, 0.0, 0.0, 4.455405910808415] } }

View File

@ -0,0 +1,75 @@
use crate::unit_tests::cholesky::positive_definite;
use nalgebra_sparse::factorization::LeftLookingLUFactorization;
use matrixcompare::{assert_matrix_eq, prop_assert_matrix_eq};
use proptest::prelude::*;
use crate::common::value_strategy;
use nalgebra::proptest::matrix;
use nalgebra_sparse::CscMatrix;
proptest! {
// Note that positive definite matrices are guaranteed to be invertible.
// That's why they're used here, but it's not necessary for a matrix to be pd to be
// invertible.
#[test]
fn lu_for_positive_def_matrices(
matrix in positive_definite()
) {
let lu = LeftLookingLUFactorization::new(&matrix);
let l = lu.l();
prop_assert!(l.triplet_iter().all(|(i, j, _)| j <= i));
let u = lu.u();
prop_assert!(u.triplet_iter().all(|(i, j, _)| j >= i));
prop_assert_matrix_eq!(l * u, matrix, comp = abs, tol = 1e-7);
}
#[test]
fn lu_solve_correct_for_positive_def_matrices(
(matrix, b) in positive_definite()
.prop_flat_map(|csc| {
let rhs = matrix(value_strategy::<f64>(), csc.nrows(), 1);
(Just(csc), rhs)
})
) {
let lu = LeftLookingLUFactorization::new(&matrix);
let l = lu.l();
prop_assert!(l.triplet_iter().all(|(i, j, _)| j <= i));
let mut x_l = b.clone_owned();
l.dense_lower_triangular_solve(b.as_slice(), x_l.as_mut_slice(), true);
prop_assert_matrix_eq!(l * x_l, b, comp=abs, tol=1e-12);
let u = lu.u();
prop_assert!(u.triplet_iter().all(|(i, j, _)| j >= i));
let mut x_u = b.clone_owned();
u.dense_upper_triangular_solve(b.as_slice(), x_u.as_mut_slice());
prop_assert_matrix_eq!(u * x_u, b, comp = abs, tol = 1e-7);
let x = lu.solve(b.as_slice());
prop_assert_matrix_eq!(&matrix * &x, b, comp=abs, tol=1e-12);
}
}
#[test]
fn minimized_lu() {
let major_offsets = vec![0, 1, 3, 4, 5, 8];
let minor_indices = vec![0, 1, 4, 2, 3, 1, 2, 4];
let values = vec![1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0];
assert_eq!(minor_indices.len(), values.len());
let mat = CscMatrix::try_from_unsorted_csc_data(5, 5, major_offsets, minor_indices, values);
let mat = mat.unwrap();
let lu = LeftLookingLUFactorization::new(&mat);
let l = lu.l();
assert!(l.triplet_iter().all(|(i, j, _)| j <= i));
let u = lu.u();
assert!(u.triplet_iter().all(|(i, j, _)| j >= i));
assert_matrix_eq!(l * u, mat, comp = abs, tol = 1e-7);
}

View File

@ -1,4 +1,7 @@
// factorization tests
mod cholesky; mod cholesky;
mod left_looking_lu;
mod convert_serial; mod convert_serial;
mod coo; mod coo;
mod csc; mod csc;