Remove Zero bound for transpose and impl SparsityPattern::transpose
This commit is contained in:
parent
3b1303d1e0
commit
ef3477f411
|
@ -107,7 +107,7 @@ impl<'a, T> From<&'a CscMatrix<T>> for DMatrix<T>
|
||||||
|
|
||||||
impl<'a, T> From<&'a CscMatrix<T>> for CsrMatrix<T>
|
impl<'a, T> From<&'a CscMatrix<T>> for CsrMatrix<T>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero
|
T: Scalar
|
||||||
{
|
{
|
||||||
fn from(matrix: &'a CscMatrix<T>) -> Self {
|
fn from(matrix: &'a CscMatrix<T>) -> Self {
|
||||||
convert_csc_csr(matrix)
|
convert_csc_csr(matrix)
|
||||||
|
@ -116,7 +116,7 @@ impl<'a, T> From<&'a CscMatrix<T>> for CsrMatrix<T>
|
||||||
|
|
||||||
impl<'a, T> From<&'a CsrMatrix<T>> for CscMatrix<T>
|
impl<'a, T> From<&'a CsrMatrix<T>> for CscMatrix<T>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero
|
T: Scalar
|
||||||
{
|
{
|
||||||
fn from(matrix: &'a CsrMatrix<T>) -> Self {
|
fn from(matrix: &'a CsrMatrix<T>) -> Self {
|
||||||
convert_csr_csc(matrix)
|
convert_csr_csc(matrix)
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
//! TODO
|
//! TODO
|
||||||
use crate::coo::CooMatrix;
|
use std::ops::Add;
|
||||||
use crate::csr::CsrMatrix;
|
|
||||||
use nalgebra::{DMatrix, Scalar, Matrix, Dim, ClosedAdd};
|
|
||||||
use nalgebra::storage::Storage;
|
|
||||||
use num_traits::Zero;
|
use num_traits::Zero;
|
||||||
|
|
||||||
use std::ops::{Add};
|
use nalgebra::{ClosedAdd, Dim, DMatrix, Matrix, Scalar};
|
||||||
|
use nalgebra::storage::Storage;
|
||||||
|
|
||||||
|
use crate::coo::CooMatrix;
|
||||||
|
use crate::cs;
|
||||||
use crate::csc::CscMatrix;
|
use crate::csc::CscMatrix;
|
||||||
|
use crate::csr::CsrMatrix;
|
||||||
|
|
||||||
/// TODO
|
/// TODO
|
||||||
pub fn convert_dense_coo<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CooMatrix<T>
|
pub fn convert_dense_coo<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CooMatrix<T>
|
||||||
|
@ -192,9 +195,9 @@ pub fn convert_dense_csc<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CscMatrix<T>
|
||||||
/// TODO
|
/// TODO
|
||||||
pub fn convert_csr_csc<T>(csr: &CsrMatrix<T>) -> CscMatrix<T>
|
pub fn convert_csr_csc<T>(csr: &CsrMatrix<T>) -> CscMatrix<T>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero
|
T: Scalar
|
||||||
{
|
{
|
||||||
let (offsets, indices, values) = transpose_cs(csr.nrows(),
|
let (offsets, indices, values) = cs::transpose_cs(csr.nrows(),
|
||||||
csr.ncols(),
|
csr.ncols(),
|
||||||
csr.row_offsets(),
|
csr.row_offsets(),
|
||||||
csr.col_indices(),
|
csr.col_indices(),
|
||||||
|
@ -208,9 +211,9 @@ where
|
||||||
/// TODO
|
/// TODO
|
||||||
pub fn convert_csc_csr<T>(csc: &CscMatrix<T>) -> CsrMatrix<T>
|
pub fn convert_csc_csr<T>(csc: &CscMatrix<T>) -> CsrMatrix<T>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero
|
T: Scalar
|
||||||
{
|
{
|
||||||
let (offsets, indices, values) = transpose_cs(csc.ncols(),
|
let (offsets, indices, values) = cs::transpose_cs(csc.ncols(),
|
||||||
csc.nrows(),
|
csc.nrows(),
|
||||||
csc.col_offsets(),
|
csc.col_offsets(),
|
||||||
csc.row_indices(),
|
csc.row_indices(),
|
||||||
|
@ -326,7 +329,7 @@ fn coo_to_unsorted_cs<T: Clone>(
|
||||||
major_offsets[*major_idx] += 1;
|
major_offsets[*major_idx] += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
convert_counts_to_offsets(major_offsets);
|
cs::convert_counts_to_offsets(major_offsets);
|
||||||
|
|
||||||
{
|
{
|
||||||
// TODO: Instead of allocating a whole new vector storing the current counts,
|
// TODO: Instead of allocating a whole new vector storing the current counts,
|
||||||
|
@ -377,66 +380,6 @@ fn sort_lane<T: Clone>(
|
||||||
apply_permutation(values_result, values, permutation);
|
apply_permutation(values_result, values, permutation);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Transposes the compressed format.
|
|
||||||
///
|
|
||||||
/// This means that major and minor roles are switched. This is used for converting between CSR
|
|
||||||
/// and CSC formats.
|
|
||||||
fn transpose_cs<T>(major_dim: usize,
|
|
||||||
minor_dim: usize,
|
|
||||||
source_major_offsets: &[usize],
|
|
||||||
source_minor_indices: &[usize],
|
|
||||||
values: &[T])
|
|
||||||
-> (Vec<usize>, Vec<usize>, Vec<T>)
|
|
||||||
where
|
|
||||||
T: Scalar + Zero
|
|
||||||
{
|
|
||||||
assert_eq!(source_major_offsets.len(), major_dim + 1);
|
|
||||||
assert_eq!(source_minor_indices.len(), values.len());
|
|
||||||
let nnz = values.len();
|
|
||||||
|
|
||||||
// Count the number of occurences of each minor index
|
|
||||||
let mut minor_counts = vec![0; minor_dim];
|
|
||||||
for minor_idx in source_minor_indices {
|
|
||||||
minor_counts[*minor_idx] += 1;
|
|
||||||
}
|
|
||||||
convert_counts_to_offsets(&mut minor_counts);
|
|
||||||
let mut target_offsets = minor_counts;
|
|
||||||
target_offsets.push(nnz);
|
|
||||||
let mut target_indices = vec![usize::MAX; nnz];
|
|
||||||
let mut target_values = vec![T::zero(); nnz];
|
|
||||||
|
|
||||||
// Keep track of how many entries we have placed in each target major lane
|
|
||||||
let mut current_target_major_counts = vec![0; minor_dim];
|
|
||||||
|
|
||||||
for source_major_idx in 0 .. major_dim {
|
|
||||||
let source_lane_begin = source_major_offsets[source_major_idx];
|
|
||||||
let source_lane_end = source_major_offsets[source_major_idx + 1];
|
|
||||||
let source_lane_indices = &source_minor_indices[source_lane_begin .. source_lane_end];
|
|
||||||
let source_lane_values = &values[source_lane_begin .. source_lane_end];
|
|
||||||
|
|
||||||
for (&source_minor_idx, val) in source_lane_indices.iter().zip(source_lane_values) {
|
|
||||||
// Compute the offset in the target data for this particular source entry
|
|
||||||
let target_lane_count = &mut current_target_major_counts[source_minor_idx];
|
|
||||||
let entry_offset = target_offsets[source_minor_idx] + *target_lane_count;
|
|
||||||
target_indices[entry_offset] = source_major_idx;
|
|
||||||
target_values[entry_offset] = val.inlined_clone();
|
|
||||||
*target_lane_count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
(target_offsets, target_indices, target_values)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn convert_counts_to_offsets(counts: &mut [usize]) {
|
|
||||||
// Convert the counts to an offset
|
|
||||||
let mut offset = 0;
|
|
||||||
for i_offset in counts.iter_mut() {
|
|
||||||
let count = *i_offset;
|
|
||||||
*i_offset = offset;
|
|
||||||
offset += count;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Move this into `utils` or something?
|
// TODO: Move this into `utils` or something?
|
||||||
fn apply_permutation<T: Clone>(out_slice: &mut [T], in_slice: &[T], permutation: &[usize]) {
|
fn apply_permutation<T: Clone>(out_slice: &mut [T], in_slice: &[T], permutation: &[usize]) {
|
||||||
assert_eq!(out_slice.len(), in_slice.len());
|
assert_eq!(out_slice.len(), in_slice.len());
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
use crate::pattern::SparsityPattern;
|
|
||||||
use crate::{SparseEntry, SparseEntryMut};
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::ops::Range;
|
|
||||||
use std::mem::replace;
|
use std::mem::replace;
|
||||||
|
use std::ops::Range;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use num_traits::One;
|
use num_traits::One;
|
||||||
|
|
||||||
use nalgebra::Scalar;
|
use nalgebra::Scalar;
|
||||||
|
|
||||||
|
use crate::{SparseEntry, SparseEntryMut};
|
||||||
|
use crate::pattern::SparsityPattern;
|
||||||
|
|
||||||
/// An abstract compressed matrix.
|
/// An abstract compressed matrix.
|
||||||
///
|
///
|
||||||
/// For the time being, this is only used internally to share implementation between
|
/// For the time being, this is only used internally to share implementation between
|
||||||
|
@ -397,3 +399,100 @@ impl<'a, T> CsLaneMut<'a, T> {
|
||||||
global_minor_index)
|
global_minor_index)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper struct for working with uninitialized data in vectors.
|
||||||
|
/// TODO: This doesn't belong here.
|
||||||
|
struct UninitVec<T> {
|
||||||
|
vec: Vec<T>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> UninitVec<T> {
|
||||||
|
pub fn from_len(len: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
vec: Vec::with_capacity(len)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the element associated with the given index to the provided value.
|
||||||
|
///
|
||||||
|
/// Must be called exactly once per index, otherwise results in undefined behavior.
|
||||||
|
pub unsafe fn set(&mut self, index: usize, value: T) {
|
||||||
|
self.vec.as_mut_ptr().add(index).write(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Marks the vector data as initialized by returning a full vector.
|
||||||
|
///
|
||||||
|
/// It is undefined behavior to call this function unless *all* elements have been written to
|
||||||
|
/// exactly once.
|
||||||
|
pub unsafe fn assume_init(mut self) -> Vec<T> {
|
||||||
|
self.vec.set_len(self.vec.capacity());
|
||||||
|
self.vec
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Transposes the compressed format.
|
||||||
|
///
|
||||||
|
/// This means that major and minor roles are switched. This is used for converting between CSR
|
||||||
|
/// and CSC formats.
|
||||||
|
pub fn transpose_cs<T>(
|
||||||
|
major_dim: usize,
|
||||||
|
minor_dim: usize,
|
||||||
|
source_major_offsets: &[usize],
|
||||||
|
source_minor_indices: &[usize],
|
||||||
|
values: &[T])
|
||||||
|
-> (Vec<usize>, Vec<usize>, Vec<T>)
|
||||||
|
where
|
||||||
|
T: Scalar
|
||||||
|
{
|
||||||
|
assert_eq!(source_major_offsets.len(), major_dim + 1);
|
||||||
|
assert_eq!(source_minor_indices.len(), values.len());
|
||||||
|
let nnz = values.len();
|
||||||
|
|
||||||
|
// Count the number of occurences of each minor index
|
||||||
|
let mut minor_counts = vec![0; minor_dim];
|
||||||
|
for minor_idx in source_minor_indices {
|
||||||
|
minor_counts[*minor_idx] += 1;
|
||||||
|
}
|
||||||
|
convert_counts_to_offsets(&mut minor_counts);
|
||||||
|
let mut target_offsets = minor_counts;
|
||||||
|
target_offsets.push(nnz);
|
||||||
|
let mut target_indices = vec![usize::MAX; nnz];
|
||||||
|
|
||||||
|
// We have to use uninitialized storage, because we don't have any kind of "default" value
|
||||||
|
// available for `T`. Unfortunately this necessitates some small amount of unsafe code
|
||||||
|
let mut target_values = UninitVec::from_len(nnz);
|
||||||
|
|
||||||
|
// Keep track of how many entries we have placed in each target major lane
|
||||||
|
let mut current_target_major_counts = vec![0; minor_dim];
|
||||||
|
|
||||||
|
for source_major_idx in 0 .. major_dim {
|
||||||
|
let source_lane_begin = source_major_offsets[source_major_idx];
|
||||||
|
let source_lane_end = source_major_offsets[source_major_idx + 1];
|
||||||
|
let source_lane_indices = &source_minor_indices[source_lane_begin .. source_lane_end];
|
||||||
|
let source_lane_values = &values[source_lane_begin .. source_lane_end];
|
||||||
|
|
||||||
|
for (&source_minor_idx, val) in source_lane_indices.iter().zip(source_lane_values) {
|
||||||
|
// Compute the offset in the target data for this particular source entry
|
||||||
|
let target_lane_count = &mut current_target_major_counts[source_minor_idx];
|
||||||
|
let entry_offset = target_offsets[source_minor_idx] + *target_lane_count;
|
||||||
|
target_indices[entry_offset] = source_major_idx;
|
||||||
|
unsafe { target_values.set(entry_offset, val.inlined_clone()); }
|
||||||
|
*target_lane_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we should have written to each element in target_values exactly once,
|
||||||
|
// so initialization should be sound
|
||||||
|
let target_values = unsafe { target_values.assume_init() };
|
||||||
|
(target_offsets, target_indices, target_values)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn convert_counts_to_offsets(counts: &mut [usize]) {
|
||||||
|
// Convert the counts to an offset
|
||||||
|
let mut offset = 0;
|
||||||
|
for i_offset in counts.iter_mut() {
|
||||||
|
let count = *i_offset;
|
||||||
|
*i_offset = offset;
|
||||||
|
offset += count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::cs::{CsMatrix, CsLane, CsLaneMut, CsLaneIter, CsLaneIterMut};
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::slice::{IterMut, Iter};
|
use std::slice::{IterMut, Iter};
|
||||||
use num_traits::{Zero, One};
|
use num_traits::{One};
|
||||||
use nalgebra::Scalar;
|
use nalgebra::Scalar;
|
||||||
|
|
||||||
/// A CSC representation of a sparse matrix.
|
/// A CSC representation of a sparse matrix.
|
||||||
|
@ -368,7 +368,7 @@ impl<T> CscMatrix<T> {
|
||||||
|
|
||||||
impl<T> CscMatrix<T>
|
impl<T> CscMatrix<T>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero
|
T: Scalar
|
||||||
{
|
{
|
||||||
/// Compute the transpose of the matrix.
|
/// Compute the transpose of the matrix.
|
||||||
pub fn transpose(&self) -> CscMatrix<T> {
|
pub fn transpose(&self) -> CscMatrix<T> {
|
||||||
|
|
|
@ -5,7 +5,7 @@ use crate::csc::CscMatrix;
|
||||||
use crate::cs::{CsMatrix, CsLaneIterMut, CsLaneIter, CsLane, CsLaneMut};
|
use crate::cs::{CsMatrix, CsLaneIterMut, CsLaneIter, CsLane, CsLaneMut};
|
||||||
|
|
||||||
use nalgebra::Scalar;
|
use nalgebra::Scalar;
|
||||||
use num_traits::{Zero, One};
|
use num_traits::{One};
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::slice::{IterMut, Iter};
|
use std::slice::{IterMut, Iter};
|
||||||
|
@ -368,7 +368,7 @@ impl<T> CsrMatrix<T> {
|
||||||
|
|
||||||
impl<T> CsrMatrix<T>
|
impl<T> CsrMatrix<T>
|
||||||
where
|
where
|
||||||
T: Scalar + Zero
|
T: Scalar
|
||||||
{
|
{
|
||||||
/// Compute the transpose of the matrix.
|
/// Compute the transpose of the matrix.
|
||||||
pub fn transpose(&self) -> CsrMatrix<T> {
|
pub fn transpose(&self) -> CsrMatrix<T> {
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
use crate::SparseFormatError;
|
use crate::SparseFormatError;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
|
use crate::cs::transpose_cs;
|
||||||
|
|
||||||
/// A representation of the sparsity pattern of a CSR or CSC matrix.
|
/// A representation of the sparsity pattern of a CSR or CSC matrix.
|
||||||
///
|
///
|
||||||
|
@ -204,6 +205,24 @@ impl SparsityPattern {
|
||||||
pub fn disassemble(self) -> (Vec<usize>, Vec<usize>) {
|
pub fn disassemble(self) -> (Vec<usize>, Vec<usize>) {
|
||||||
(self.major_offsets, self.minor_indices)
|
(self.major_offsets, self.minor_indices)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TODO
|
||||||
|
pub fn transpose(&self) -> Self {
|
||||||
|
// By using unit () values, we can use the same routines as for CSR/CSC matrices
|
||||||
|
let values = vec![(); self.nnz()];
|
||||||
|
let (new_offsets, new_indices, _) = transpose_cs(
|
||||||
|
self.major_dim(),
|
||||||
|
self.minor_dim(),
|
||||||
|
self.major_offsets(),
|
||||||
|
self.minor_indices(),
|
||||||
|
&values);
|
||||||
|
// TODO: Skip checks
|
||||||
|
Self::try_from_offsets_and_indices(self.minor_dim(),
|
||||||
|
self.major_dim(),
|
||||||
|
new_offsets,
|
||||||
|
new_indices)
|
||||||
|
.expect("Internal error: Transpose should never fail.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error type for `SparsityPattern` format errors.
|
/// Error type for `SparsityPattern` format errors.
|
||||||
|
|
Loading…
Reference in New Issue