Add elimination tree computation.

This commit is contained in:
sebcrozet 2018-10-30 07:46:34 +01:00
parent 34b20dc291
commit 7ecbacacda
7 changed files with 749 additions and 515 deletions

View File

@ -14,7 +14,9 @@ use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1}
// cannot be used for trait method return types. // cannot be used for trait method return types.
pub trait CsStorageIter<'a, N, R, C = U1> { pub trait CsStorageIter<'a, N, R, C = U1> {
type ColumnEntries: Iterator<Item = (usize, N)>; type ColumnEntries: Iterator<Item = (usize, N)>;
type ColumnRowIndices: Iterator<Item = usize>;
fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices;
fn column_entries(&'a self, j: usize) -> Self::ColumnEntries; fn column_entries(&'a self, j: usize) -> Self::ColumnEntries;
} }
@ -44,10 +46,10 @@ pub struct CsVecStorage<N: Scalar, R: Dim, C: Dim>
where where
DefaultAllocator: Allocator<usize, C>, DefaultAllocator: Allocator<usize, C>,
{ {
shape: (R, C), pub(crate) shape: (R, C),
p: VectorN<usize, C>, pub(crate) p: VectorN<usize, C>,
i: Vec<usize>, pub(crate) i: Vec<usize>,
vals: Vec<N>, pub(crate) vals: Vec<N>,
} }
impl<N: Scalar, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {} impl<N: Scalar, R: Dim, C: Dim> CsVecStorage<N, R, C> where DefaultAllocator: Allocator<usize, C> {}
@ -58,6 +60,7 @@ where
{ {
type ColumnEntries = type ColumnEntries =
iter::Zip<iter::Cloned<slice::Iter<'a, usize>>, iter::Cloned<slice::Iter<'a, N>>>; iter::Zip<iter::Cloned<slice::Iter<'a, usize>>, iter::Cloned<slice::Iter<'a, N>>>;
type ColumnRowIndices = iter::Cloned<slice::Iter<'a, usize>>;
#[inline] #[inline]
fn column_entries(&'a self, j: usize) -> Self::ColumnEntries { fn column_entries(&'a self, j: usize) -> Self::ColumnEntries {
@ -67,6 +70,12 @@ where
.cloned() .cloned()
.zip(self.vals[rng].iter().cloned()) .zip(self.vals[rng].iter().cloned())
} }
#[inline]
fn column_row_indices(&'a self, j: usize) -> Self::ColumnRowIndices {
let rng = self.column_range(j);
self.i[rng.clone()].iter().cloned()
}
} }
impl<N: Scalar, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C> impl<N: Scalar, R: Dim, C: Dim> CsStorage<N, R, C> for CsVecStorage<N, R, C>
@ -213,514 +222,4 @@ impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
res res
} }
fn scatter<R2: Dim, C2: Dim>(
&self,
j: usize,
beta: N,
timestamps: &mut [usize],
timestamp: usize,
workspace: &mut [N],
mut nz: usize,
res: &mut CsMatrix<N, R2, C2>,
) -> usize
where
N: ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<usize, C2>,
{
for (i, val) in self.data.column_entries(j) {
if timestamps[i] < timestamp {
timestamps[i] = timestamp;
res.data.i[nz] = i;
nz += 1;
workspace[i] = val * beta;
} else {
workspace[i] += val * beta;
}
}
nz
}
}
impl<N: Real, D: Dim, S: CsStorage<N, D, D>> CsMatrix<N, D, D, S> {
pub fn solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let mut b = b.clone_owned();
if self.solve_lower_triangular_mut(&mut b) {
Some(b)
} else {
None
}
}
pub fn tr_solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let mut b = b.clone_owned();
if self.tr_solve_lower_triangular_mut(&mut b) {
Some(b)
} else {
None
}
}
pub fn solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let (nrows, ncols) = self.data.shape();
assert_eq!(nrows.value(), ncols.value(), "The matrix must be square.");
assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions.");
for j2 in 0..b.ncols() {
let mut b = b.column_mut(j2);
for j in 0..ncols.value() {
let mut column = self.data.column_entries(j);
let mut diag_found = false;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
return false;
}
b[j] /= val;
diag_found = true;
break;
}
}
if !diag_found {
return false;
}
for (i, val) in column {
b[i] -= b[j] * val;
}
}
}
true
}
pub fn tr_solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let (nrows, ncols) = self.data.shape();
assert_eq!(nrows.value(), ncols.value(), "The matrix must be square.");
assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions.");
for j2 in 0..b.ncols() {
let mut b = b.column_mut(j2);
for j in (0..ncols.value()).rev() {
let mut column = self.data.column_entries(j);
let mut diag = None;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
return false;
}
diag = Some(val);
break;
}
}
if let Some(diag) = diag {
for (i, val) in column {
b[j] -= val * b[i];
}
b[j] /= diag;
} else {
return false;
}
}
}
true
}
pub fn solve_lower_triangular_cs<D2: Dim, S2>(
&self,
b: &CsVector<N, D2, S2>,
) -> Option<CsVector<N, D2>>
where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D> + Allocator<N, D2> + Allocator<usize, D2>,
ShapeConstraint: SameNumberOfRows<D, D2>,
{
let mut reach = Vec::new();
self.lower_triangular_reach(b, &mut reach);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) };
for i in reach.iter().cloned() {
workspace[i] = N::zero();
}
for (i, val) in b.data.column_entries(0) {
workspace[i] = val;
}
for j in reach.iter().cloned().rev() {
let mut column = self.data.column_entries(j);
let mut diag_found = false;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
break;
}
workspace[j] /= val;
diag_found = true;
break;
}
}
if !diag_found {
return None;
}
for (i, val) in column {
workspace[i] -= workspace[j] * val;
}
}
// Copy the result into a sparse vector.
let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len());
for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) {
*val = workspace[*i];
}
result.data.i = reach;
Some(result)
}
fn lower_triangular_reach<D2: Dim, S2>(&self, b: &CsVector<N, D2, S2>, xi: &mut Vec<usize>)
where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D>,
{
let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false);
let mut stack = Vec::new();
for i in b.data.column_range(0) {
let row_index = b.data.row_index(i);
if !visited[row_index] {
let rng = self.data.column_range(row_index);
stack.push((row_index, rng));
self.lower_triangular_dfs(visited.as_mut_slice(), &mut stack, xi);
}
}
}
fn lower_triangular_dfs(
&self,
visited: &mut [bool],
stack: &mut Vec<(usize, Range<usize>)>,
xi: &mut Vec<usize>,
) {
'recursion: while let Some((j, rng)) = stack.pop() {
visited[j] = true;
for i in rng.clone() {
let row_id = self.data.row_index(i);
if row_id > j && !visited[row_id] {
stack.push((j, (i + 1)..rng.end));
let row_id = self.data.row_index(i);
stack.push((row_id, self.data.column_range(row_id)));
continue 'recursion;
}
}
xi.push(j)
}
}
}
/*
impl<N: Scalar, R, S> CsVector<N, R, S> {
pub fn axpy(&mut self, alpha: N, x: CsVector<N, R, S>, beta: N) {
// First, compute the number of non-zero entries.
let mut nnzero = 0;
// Allocate a size large enough.
self.data.set_column_len(0, nnzero);
// Fill with the axpy.
let mut i = self.len();
let mut j = x.len();
let mut k = nnzero - 1;
let mut rid1 = self.data.row_index(0, i - 1);
let mut rid2 = x.data.row_index(0, j - 1);
while k > 0 {
if rid1 == rid2 {
self.data.set_row_index(0, k, rid1);
self[k] = alpha * x[j] + beta * self[k];
i -= 1;
j -= 1;
} else if rid1 < rid2 {
self.data.set_row_index(0, k, rid1);
self[k] = beta * self[i];
i -= 1;
} else {
self.data.set_row_index(0, k, rid2);
self[k] = alpha * x[j];
j -= 1;
}
k -= 1;
}
}
}
*/
impl<N: Scalar + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
pub fn axpy_cs<D2: Dim, S2>(&mut self, alpha: N, x: &CsVector<N, D2, S2>, beta: N)
where
S2: CsStorage<N, D2>,
ShapeConstraint: DimEq<D, D2>,
{
if beta.is_zero() {
for i in 0..x.len() {
unsafe {
let k = x.data.row_index_unchecked(i);
let y = self.vget_unchecked_mut(k);
*y = alpha * *x.data.get_value_unchecked(i);
}
}
} else {
// Needed to be sure even components not present on `x` are multiplied.
*self *= beta;
for i in 0..x.len() {
unsafe {
let k = x.data.row_index_unchecked(i);
let y = self.vget_unchecked_mut(k);
*y += alpha * *x.data.get_value_unchecked(i);
}
}
}
}
/*
pub fn gemv_sparse<R2: Dim, C2: Dim, S2>(&mut self, alpha: N, a: &CsMatrix<N, R2, C2, S2>, x: &DVector<N>, beta: N)
where
S2: CsStorage<N, R2, C2> {
let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) };
self.axpy_sparse(alpha * val, &col2, beta);
for j in 1..ncols2 {
let col2 = a.column(j);
let val = unsafe { *x.vget_unchecked(j) };
self.axpy_sparse(alpha * val, &col2, N::one());
}
}
*/
}
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + Zero,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
S1: CsStorage<N, R1, C1>,
S2: CsStorage<N, R2, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
DefaultAllocator: Allocator<usize, C2> + Allocator<usize, R1> + Allocator<N, R1>,
{
type Output = CsMatrix<N, R1, C2>;
fn mul(self, rhs: &'b CsMatrix<N, R2, C2, S2>) -> CsMatrix<N, R1, C2> {
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
assert_eq!(
ncols1.value(),
nrows2.value(),
"Mismatched dimensions for matrix multiplication."
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut timestamps = VectorN::zeros_generic(nrows1, U1);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) };
let mut nz = 0;
for j in 0..ncols2.value() {
res.data.p[j] = nz;
let new_size_bound = nz + nrows1.value();
res.data.i.resize(new_size_bound, 0);
res.data.vals.resize(new_size_bound, N::zero());
for (i, val) in rhs.data.column_entries(j) {
nz = self.scatter(
i,
val,
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
}
for p in res.data.p[j]..nz {
res.data.vals[p] = workspace[res.data.i[p]]
}
}
res.data.i.truncate(nz);
res.data.i.shrink_to_fit();
res.data.vals.truncate(nz);
res.data.vals.shrink_to_fit();
res
}
}
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + One,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
S1: CsStorage<N, R1, C1>,
S2: CsStorage<N, R2, C2>,
ShapeConstraint: DimEq<R1, R2> + DimEq<C1, C2>,
DefaultAllocator: Allocator<usize, C2> + Allocator<usize, R1> + Allocator<N, R1>,
{
type Output = CsMatrix<N, R1, C2>;
fn add(self, rhs: &'b CsMatrix<N, R2, C2, S2>) -> CsMatrix<N, R1, C2> {
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
assert_eq!(
(nrows1.value(), ncols1.value()),
(nrows2.value(), ncols2.value()),
"Mismatched dimensions for matrix sum."
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut timestamps = VectorN::zeros_generic(nrows1, U1);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) };
let mut nz = 0;
for j in 0..ncols2.value() {
res.data.p[j] = nz;
nz = self.scatter(
j,
N::one(),
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
nz = rhs.scatter(
j,
N::one(),
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
for p in res.data.p[j]..nz {
res.data.vals[p] = workspace[res.data.i[p]]
}
}
res.data.i.truncate(nz);
res.data.i.shrink_to_fit();
res.data.vals.truncate(nz);
res.data.vals.shrink_to_fit();
res
}
}
use std::fmt::Debug;
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
where
S: CsStorage<N, R, C> + Debug,
DefaultAllocator: Allocator<N, R, C>,
{
fn from(m: CsMatrix<N, R, C, S>) -> Self {
let (nrows, ncols) = m.data.shape();
let mut res = MatrixMN::zeros_generic(nrows, ncols);
for j in 0..ncols.value() {
for (i, val) in m.data.column_entries(j) {
res[(i, j)] = val;
}
}
res
}
}
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
where
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<usize, C>,
{
fn from(m: Matrix<N, R, C, S>) -> Self {
let (nrows, ncols) = m.data.shape();
let len = m.iter().filter(|e| !e.is_zero()).count();
let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, len);
let mut nz = 0;
for j in 0..ncols.value() {
let column = m.column(j);
res.data.p[j] = nz;
for i in 0..nrows.value() {
if !column[i].is_zero() {
res.data.i[nz] = i;
res.data.vals[nz] = column[i];
nz += 1;
}
}
}
res
}
} }

View File

@ -0,0 +1,184 @@
use alga::general::{ClosedAdd, ClosedMul};
use num::{One, Zero};
use std::iter;
use std::marker::PhantomData;
use std::ops::{Add, Mul, Range};
use std::slice;
use allocator::Allocator;
use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
use sparse::{CsMatrix, CsStorage, CsVector};
use storage::{Storage, StorageMut};
use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1};
pub struct SymbolicAnalysis {
pinv: Vec<usize>,
q: Vec<usize>,
elimination_tree: Vec<usize>,
cp: Vec<usize>,
leftmost: Vec<usize>,
m2: usize,
lnz: usize,
unz: usize,
}
#[derive(Copy, Clone, Debug)]
pub struct EliminationTreeNode {
parent: usize,
}
impl EliminationTreeNode {
pub fn root() -> Self {
EliminationTreeNode {
parent: usize::max_value(),
}
}
pub fn with_parent(parent: usize) -> Self {
EliminationTreeNode { parent }
}
pub fn is_root(&self) -> bool {
self.parent == usize::max_value()
}
pub fn parent(&self) -> usize {
self.parent
}
}
impl<N: Real, D: Dim, S: CsStorage<N, D, D>> CsMatrix<N, D, D, S> {
fn elimination_tree(&self) -> Vec<EliminationTreeNode> {
let (nrows, ncols) = self.data.shape();
assert_eq!(
nrows.value(),
ncols.value(),
"The matrix `self` must be square to compute its elimination tree."
);
let mut forest: Vec<_> = iter::repeat(EliminationTreeNode::root())
.take(nrows.value())
.collect();
let mut ancestor: Vec<_> = iter::repeat(usize::max_value())
.take(nrows.value())
.collect();
for k in 0..nrows.value() {
for irow in self.data.column_row_indices(k) {
let mut i = irow;
while i < k {
let i_ancestor = ancestor[i];
ancestor[i] = k;
if i_ancestor == usize::max_value() {
forest[i] = EliminationTreeNode::with_parent(k);
break;
}
i = i_ancestor;
}
}
}
forest
}
fn reach(
&self,
j: usize,
max_j: usize,
tree: &[EliminationTreeNode],
marks: &mut Vec<bool>,
out: &mut Vec<usize>,
) {
marks.clear();
marks.resize(tree.len(), false);
for irow in self.data.column_row_indices(j) {
let mut curr = irow;
while curr != usize::max_value() && curr <= max_j && !marks[curr] {
marks[curr] = true;
out.push(curr);
curr = tree[curr].parent;
}
}
}
fn column_counts(&self, tree: &[EliminationTreeNode]) -> Vec<usize> {
let len = self.data.shape().0.value();
let mut counts: Vec<_> = iter::repeat(0).take(len).collect();
let mut reach = Vec::new();
let mut marks = Vec::new();
for i in 0..len {
self.reach(i, i, tree, &mut marks, &mut reach);
for j in reach.drain(..) {
counts[j] += 1;
}
}
counts
}
fn tree_postorder(tree: &[EliminationTreeNode]) -> Vec<usize> {
// FIXME: avoid all those allocations?
let mut first_child: Vec<_> = iter::repeat(usize::max_value()).take(tree.len()).collect();
let mut other_children: Vec<_> =
iter::repeat(usize::max_value()).take(tree.len()).collect();
// Build the children list from the parent list.
// The set of children of the node `i` is given by the linked list
// starting at `first_child[i]`. The nodes of this list are then:
// { first_child[i], other_children[first_child[i]], other_children[other_children[first_child[i]], ... }
for (i, node) in tree.iter().enumerate() {
if !node.is_root() {
let brother = first_child[node.parent];
first_child[node.parent] = i;
other_children[i] = brother;
}
}
let mut stack = Vec::with_capacity(tree.len());
let mut postorder = Vec::with_capacity(tree.len());
for (i, node) in tree.iter().enumerate() {
if node.is_root() {
Self::dfs(
i,
&mut first_child,
&other_children,
&mut stack,
&mut postorder,
)
}
}
postorder
}
fn dfs(
i: usize,
first_child: &mut [usize],
other_children: &[usize],
stack: &mut Vec<usize>,
result: &mut Vec<usize>,
) {
stack.clear();
stack.push(i);
while let Some(n) = stack.pop() {
let child = first_child[n];
if child == usize::max_value() {
// No children left.
result.push(n);
} else {
stack.push(n);
stack.push(child);
first_child[n] = other_children[child];
}
}
}
}

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,59 @@
use alga::general::{ClosedAdd, ClosedMul};
use num::{One, Zero};
use std::iter;
use std::marker::PhantomData;
use std::ops::{Add, Mul, Range};
use std::slice;
use allocator::Allocator;
use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
use sparse::{CsMatrix, CsStorage, CsVector};
use storage::{Storage, StorageMut};
use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1};
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<CsMatrix<N, R, C, S>> for MatrixMN<N, R, C>
where
S: CsStorage<N, R, C>,
DefaultAllocator: Allocator<N, R, C>,
{
fn from(m: CsMatrix<N, R, C, S>) -> Self {
let (nrows, ncols) = m.data.shape();
let mut res = MatrixMN::zeros_generic(nrows, ncols);
for j in 0..ncols.value() {
for (i, val) in m.data.column_entries(j) {
res[(i, j)] = val;
}
}
res
}
}
impl<'a, N: Scalar + Zero, R: Dim, C: Dim, S> From<Matrix<N, R, C, S>> for CsMatrix<N, R, C>
where
S: Storage<N, R, C>,
DefaultAllocator: Allocator<N, R, C> + Allocator<usize, C>,
{
fn from(m: Matrix<N, R, C, S>) -> Self {
let (nrows, ncols) = m.data.shape();
let len = m.iter().filter(|e| !e.is_zero()).count();
let mut res = CsMatrix::new_uninitialized_generic(nrows, ncols, len);
let mut nz = 0;
for j in 0..ncols.value() {
let column = m.column(j);
res.data.p[j] = nz;
for i in 0..nrows.value() {
if !column[i].is_zero() {
res.data.i[nz] = i;
res.data.vals[nz] = column[i];
nz += 1;
}
}
}
res
}
}

251
src/sparse/cs_matrix_ops.rs Normal file
View File

@ -0,0 +1,251 @@
use alga::general::{ClosedAdd, ClosedMul};
use num::{One, Zero};
use std::iter;
use std::marker::PhantomData;
use std::ops::{Add, Mul, Range};
use std::slice;
use allocator::Allocator;
use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
use sparse::{CsMatrix, CsStorage, CsVector};
use storage::{Storage, StorageMut};
use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1};
impl<N: Scalar, R: Dim, C: Dim, S: CsStorage<N, R, C>> CsMatrix<N, R, C, S> {
fn scatter<R2: Dim, C2: Dim>(
&self,
j: usize,
beta: N,
timestamps: &mut [usize],
timestamp: usize,
workspace: &mut [N],
mut nz: usize,
res: &mut CsMatrix<N, R2, C2>,
) -> usize
where
N: ClosedAdd + ClosedMul,
DefaultAllocator: Allocator<usize, C2>,
{
for (i, val) in self.data.column_entries(j) {
if timestamps[i] < timestamp {
timestamps[i] = timestamp;
res.data.i[nz] = i;
nz += 1;
workspace[i] = val * beta;
} else {
workspace[i] += val * beta;
}
}
nz
}
}
/*
impl<N: Scalar, R, S> CsVector<N, R, S> {
pub fn axpy(&mut self, alpha: N, x: CsVector<N, R, S>, beta: N) {
// First, compute the number of non-zero entries.
let mut nnzero = 0;
// Allocate a size large enough.
self.data.set_column_len(0, nnzero);
// Fill with the axpy.
let mut i = self.len();
let mut j = x.len();
let mut k = nnzero - 1;
let mut rid1 = self.data.row_index(0, i - 1);
let mut rid2 = x.data.row_index(0, j - 1);
while k > 0 {
if rid1 == rid2 {
self.data.set_row_index(0, k, rid1);
self[k] = alpha * x[j] + beta * self[k];
i -= 1;
j -= 1;
} else if rid1 < rid2 {
self.data.set_row_index(0, k, rid1);
self[k] = beta * self[i];
i -= 1;
} else {
self.data.set_row_index(0, k, rid2);
self[k] = alpha * x[j];
j -= 1;
}
k -= 1;
}
}
}
*/
impl<N: Scalar + Zero + ClosedAdd + ClosedMul, D: Dim, S: StorageMut<N, D>> Vector<N, D, S> {
pub fn axpy_cs<D2: Dim, S2>(&mut self, alpha: N, x: &CsVector<N, D2, S2>, beta: N)
where
S2: CsStorage<N, D2>,
ShapeConstraint: DimEq<D, D2>,
{
if beta.is_zero() {
for i in 0..x.len() {
unsafe {
let k = x.data.row_index_unchecked(i);
let y = self.vget_unchecked_mut(k);
*y = alpha * *x.data.get_value_unchecked(i);
}
}
} else {
// Needed to be sure even components not present on `x` are multiplied.
*self *= beta;
for i in 0..x.len() {
unsafe {
let k = x.data.row_index_unchecked(i);
let y = self.vget_unchecked_mut(k);
*y += alpha * *x.data.get_value_unchecked(i);
}
}
}
}
/*
pub fn gemv_sparse<R2: Dim, C2: Dim, S2>(&mut self, alpha: N, a: &CsMatrix<N, R2, C2, S2>, x: &DVector<N>, beta: N)
where
S2: CsStorage<N, R2, C2> {
let col2 = a.column(0);
let val = unsafe { *x.vget_unchecked(0) };
self.axpy_sparse(alpha * val, &col2, beta);
for j in 1..ncols2 {
let col2 = a.column(j);
let val = unsafe { *x.vget_unchecked(j) };
self.axpy_sparse(alpha * val, &col2, N::one());
}
}
*/
}
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Mul<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + Zero,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
S1: CsStorage<N, R1, C1>,
S2: CsStorage<N, R2, C2>,
ShapeConstraint: AreMultipliable<R1, C1, R2, C2>,
DefaultAllocator: Allocator<usize, C2> + Allocator<usize, R1> + Allocator<N, R1>,
{
type Output = CsMatrix<N, R1, C2>;
fn mul(self, rhs: &'b CsMatrix<N, R2, C2, S2>) -> CsMatrix<N, R1, C2> {
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
assert_eq!(
ncols1.value(),
nrows2.value(),
"Mismatched dimensions for matrix multiplication."
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut timestamps = VectorN::zeros_generic(nrows1, U1);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) };
let mut nz = 0;
for j in 0..ncols2.value() {
res.data.p[j] = nz;
let new_size_bound = nz + nrows1.value();
res.data.i.resize(new_size_bound, 0);
res.data.vals.resize(new_size_bound, N::zero());
for (i, val) in rhs.data.column_entries(j) {
nz = self.scatter(
i,
val,
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
}
for p in res.data.p[j]..nz {
res.data.vals[p] = workspace[res.data.i[p]]
}
}
res.data.i.truncate(nz);
res.data.i.shrink_to_fit();
res.data.vals.truncate(nz);
res.data.vals.shrink_to_fit();
res
}
}
impl<'a, 'b, N, R1, R2, C1, C2, S1, S2> Add<&'b CsMatrix<N, R2, C2, S2>>
for &'a CsMatrix<N, R1, C1, S1>
where
N: Scalar + ClosedAdd + ClosedMul + One,
R1: Dim,
C1: Dim,
R2: Dim,
C2: Dim,
S1: CsStorage<N, R1, C1>,
S2: CsStorage<N, R2, C2>,
ShapeConstraint: DimEq<R1, R2> + DimEq<C1, C2>,
DefaultAllocator: Allocator<usize, C2> + Allocator<usize, R1> + Allocator<N, R1>,
{
type Output = CsMatrix<N, R1, C2>;
fn add(self, rhs: &'b CsMatrix<N, R2, C2, S2>) -> CsMatrix<N, R1, C2> {
let (nrows1, ncols1) = self.data.shape();
let (nrows2, ncols2) = rhs.data.shape();
assert_eq!(
(nrows1.value(), ncols1.value()),
(nrows2.value(), ncols2.value()),
"Mismatched dimensions for matrix sum."
);
let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len());
let mut timestamps = VectorN::zeros_generic(nrows1, U1);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) };
let mut nz = 0;
for j in 0..ncols2.value() {
res.data.p[j] = nz;
nz = self.scatter(
j,
N::one(),
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
nz = rhs.scatter(
j,
N::one(),
timestamps.as_mut_slice(),
j + 1,
workspace.as_mut_slice(),
nz,
&mut res,
);
for p in res.data.p[j]..nz {
res.data.vals[p] = workspace[res.data.i[p]]
}
}
res.data.i.truncate(nz);
res.data.i.shrink_to_fit();
res.data.vals.truncate(nz);
res.data.vals.shrink_to_fit();
res
}
}

View File

@ -0,0 +1,235 @@
use alga::general::{ClosedAdd, ClosedMul};
use num::{One, Zero};
use std::iter;
use std::marker::PhantomData;
use std::ops::{Add, Mul, Range};
use std::slice;
use allocator::Allocator;
use constraint::{AreMultipliable, DimEq, SameNumberOfRows, ShapeConstraint};
use sparse::{CsMatrix, CsStorage, CsVector};
use storage::{Storage, StorageMut};
use {DefaultAllocator, Dim, Matrix, MatrixMN, Real, Scalar, Vector, VectorN, U1};
impl<N: Real, D: Dim, S: CsStorage<N, D, D>> CsMatrix<N, D, D, S> {
pub fn solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let mut b = b.clone_owned();
if self.solve_lower_triangular_mut(&mut b) {
Some(b)
} else {
None
}
}
pub fn tr_solve_lower_triangular<R2: Dim, C2: Dim, S2>(
&self,
b: &Matrix<N, R2, C2, S2>,
) -> Option<MatrixMN<N, R2, C2>>
where
S2: Storage<N, R2, C2>,
DefaultAllocator: Allocator<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let mut b = b.clone_owned();
if self.tr_solve_lower_triangular_mut(&mut b) {
Some(b)
} else {
None
}
}
pub fn solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let (nrows, ncols) = self.data.shape();
assert_eq!(nrows.value(), ncols.value(), "The matrix must be square.");
assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions.");
for j2 in 0..b.ncols() {
let mut b = b.column_mut(j2);
for j in 0..ncols.value() {
let mut column = self.data.column_entries(j);
let mut diag_found = false;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
return false;
}
b[j] /= val;
diag_found = true;
break;
}
}
if !diag_found {
return false;
}
for (i, val) in column {
b[i] -= b[j] * val;
}
}
}
true
}
pub fn tr_solve_lower_triangular_mut<R2: Dim, C2: Dim, S2>(
&self,
b: &mut Matrix<N, R2, C2, S2>,
) -> bool
where
S2: StorageMut<N, R2, C2>,
ShapeConstraint: SameNumberOfRows<D, R2>,
{
let (nrows, ncols) = self.data.shape();
assert_eq!(nrows.value(), ncols.value(), "The matrix must be square.");
assert_eq!(nrows.value(), b.len(), "Mismatched matrix dimensions.");
for j2 in 0..b.ncols() {
let mut b = b.column_mut(j2);
for j in (0..ncols.value()).rev() {
let mut column = self.data.column_entries(j);
let mut diag = None;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
return false;
}
diag = Some(val);
break;
}
}
if let Some(diag) = diag {
for (i, val) in column {
b[j] -= val * b[i];
}
b[j] /= diag;
} else {
return false;
}
}
}
true
}
pub fn solve_lower_triangular_cs<D2: Dim, S2>(
&self,
b: &CsVector<N, D2, S2>,
) -> Option<CsVector<N, D2>>
where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D> + Allocator<N, D2> + Allocator<usize, D2>,
ShapeConstraint: SameNumberOfRows<D, D2>,
{
let mut reach = Vec::new();
self.lower_triangular_reach(b, &mut reach);
let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) };
for i in reach.iter().cloned() {
workspace[i] = N::zero();
}
for (i, val) in b.data.column_entries(0) {
workspace[i] = val;
}
for j in reach.iter().cloned().rev() {
let mut column = self.data.column_entries(j);
let mut diag_found = false;
while let Some((i, val)) = column.next() {
if i == j {
if val.is_zero() {
break;
}
workspace[j] /= val;
diag_found = true;
break;
}
}
if !diag_found {
return None;
}
for (i, val) in column {
workspace[i] -= workspace[j] * val;
}
}
// Copy the result into a sparse vector.
let mut result = CsVector::new_uninitialized_generic(b.data.shape().0, U1, reach.len());
for (i, val) in reach.iter().zip(result.data.vals.iter_mut()) {
*val = workspace[*i];
}
result.data.i = reach;
Some(result)
}
fn lower_triangular_reach<D2: Dim, S2>(&self, b: &CsVector<N, D2, S2>, xi: &mut Vec<usize>)
where
S2: CsStorage<N, D2>,
DefaultAllocator: Allocator<bool, D>,
{
let mut visited = VectorN::repeat_generic(self.data.shape().1, U1, false);
let mut stack = Vec::new();
for i in b.data.column_range(0) {
let row_index = b.data.row_index(i);
if !visited[row_index] {
let rng = self.data.column_range(row_index);
stack.push((row_index, rng));
self.lower_triangular_dfs(visited.as_mut_slice(), &mut stack, xi);
}
}
}
fn lower_triangular_dfs(
&self,
visited: &mut [bool],
stack: &mut Vec<(usize, Range<usize>)>,
xi: &mut Vec<usize>,
) {
'recursion: while let Some((j, rng)) = stack.pop() {
visited[j] = true;
for i in rng.clone() {
let row_id = self.data.row_index(i);
if row_id > j && !visited[row_id] {
stack.push((j, (i + 1)..rng.end));
stack.push((row_id, self.data.column_range(row_id)));
continue 'recursion;
}
}
xi.push(j)
}
}
}

View File

@ -1,3 +1,8 @@
pub use self::cs_matrix::{CsMatrix, CsVector}; pub use self::cs_matrix::{CsMatrix, CsStorage, CsStorageMut, CsVector};
mod cs_matrix; mod cs_matrix;
mod cs_matrix_analysis;
mod cs_matrix_cholesky;
mod cs_matrix_conversion;
mod cs_matrix_ops;
mod cs_matrix_solve;