Implement remaining conversions (missing docs)

This commit is contained in:
Andreas Longva 2020-11-24 17:36:03 +01:00
parent 6083d24dd6
commit 95ee65fa8e
4 changed files with 904 additions and 18 deletions

View File

@ -1,8 +1,10 @@
use crate::coo::CooMatrix;
use crate::convert::serial::{convert_dense_coo, convert_coo_dense};
use crate::convert::serial::*;
use nalgebra::{Matrix, Scalar, Dim, ClosedAdd, DMatrix};
use nalgebra::storage::{Storage};
use num_traits::Zero;
use crate::csr::CsrMatrix;
use crate::csc::CscMatrix;
impl<'a, T, R, C, S> From<&'a Matrix<T, R, C, S>> for CooMatrix<T>
where
@ -23,4 +25,100 @@ where
fn from(coo: &'a CooMatrix<T>) -> Self {
convert_coo_dense(coo)
}
}
impl<'a, T> From<&'a CooMatrix<T>> for CsrMatrix<T>
where
T: Scalar + Zero + ClosedAdd
{
fn from(matrix: &'a CooMatrix<T>) -> Self {
convert_coo_csr(matrix)
}
}
impl<'a, T> From<&'a CsrMatrix<T>> for CooMatrix<T>
where
T: Scalar + Zero + ClosedAdd
{
fn from(matrix: &'a CsrMatrix<T>) -> Self {
convert_csr_coo(matrix)
}
}
impl<'a, T, R, C, S> From<&'a Matrix<T, R, C, S>> for CsrMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
convert_dense_csr(matrix)
}
}
impl<'a, T> From<&'a CsrMatrix<T>> for DMatrix<T>
where
T: Scalar + Zero + ClosedAdd
{
fn from(matrix: &'a CsrMatrix<T>) -> Self {
convert_csr_dense(matrix)
}
}
impl<'a, T> From<&'a CooMatrix<T>> for CscMatrix<T>
where
T: Scalar + Zero + ClosedAdd
{
fn from(matrix: &'a CooMatrix<T>) -> Self {
convert_coo_csc(matrix)
}
}
impl<'a, T> From<&'a CscMatrix<T>> for CooMatrix<T>
where
T: Scalar + Zero
{
fn from(matrix: &'a CscMatrix<T>) -> Self {
convert_csc_coo(matrix)
}
}
impl<'a, T, R, C, S> From<&'a Matrix<T, R, C, S>> for CscMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>
{
fn from(matrix: &'a Matrix<T, R, C, S>) -> Self {
convert_dense_csc(matrix)
}
}
impl<'a, T> From<&'a CscMatrix<T>> for DMatrix<T>
where
T: Scalar + Zero + ClosedAdd
{
fn from(matrix: &'a CscMatrix<T>) -> Self {
convert_csc_dense(matrix)
}
}
impl<'a, T> From<&'a CscMatrix<T>> for CsrMatrix<T>
where
T: Scalar + Zero
{
fn from(matrix: &'a CscMatrix<T>) -> Self {
convert_csc_csr(matrix)
}
}
impl<'a, T> From<&'a CsrMatrix<T>> for CscMatrix<T>
where
T: Scalar + Zero
{
fn from(matrix: &'a CsrMatrix<T>) -> Self {
convert_csr_csc(matrix)
}
}

View File

@ -1,10 +1,12 @@
//! TODO
use nalgebra::{DMatrix, Scalar, Matrix, Dim};
use crate::coo::CooMatrix;
use crate::csr::CsrMatrix;
use num_traits::Zero;
use std::ops::{Add, AddAssign};
use nalgebra::{DMatrix, Scalar, Matrix, Dim, ClosedAdd};
use nalgebra::storage::Storage;
use num_traits::Zero;
use std::ops::{Add};
use crate::csc::CscMatrix;
/// TODO
pub fn convert_dense_coo<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CooMatrix<T>
@ -33,7 +35,7 @@ where
/// TODO: What should the actual trait bounds be?
pub fn convert_coo_dense<T>(coo: &CooMatrix<T>) -> DMatrix<T>
where
T: Scalar + Zero + Add + AddAssign,
T: Scalar + Zero + ClosedAdd,
{
let mut output = DMatrix::repeat(coo.nrows(), coo.ncols(), T::zero());
for (i, j, v) in coo.triplet_iter() {
@ -43,6 +45,430 @@ where
}
/// TODO
pub fn convert_coo_csr<T>(_coo: &CooMatrix<T>) -> CsrMatrix<T> {
todo!()
pub fn convert_coo_csr<T>(coo: &CooMatrix<T>) -> CsrMatrix<T>
where
T: Scalar + Zero
{
let (offsets, indices, values) = convert_coo_cs(coo.nrows(),
coo.row_indices(),
coo.col_indices(),
coo.values());
// TODO: Avoid "try_from" since it validates the data? (requires unsafe, should benchmark
// to see if it can be justified for performance reasons)
CsrMatrix::try_from_csr_data(coo.nrows(), coo.ncols(), offsets, indices, values)
.expect("Internal error: Invalid CSR data during COO->CSR conversion")
}
/// TODO
pub fn convert_csr_coo<T: Scalar>(csr: &CsrMatrix<T>) -> CooMatrix<T>
{
let mut result = CooMatrix::new(csr.nrows(), csr.ncols());
for (i, j, v) in csr.triplet_iter() {
result.push(i, j, v.inlined_clone());
}
result
}
/// TODO
pub fn convert_csr_dense<T>(csr:& CsrMatrix<T>) -> DMatrix<T>
where
T: Scalar + ClosedAdd + Zero
{
let mut output = DMatrix::zeros(csr.nrows(), csr.ncols());
for (i, j, v) in csr.triplet_iter() {
output[(i, j)] += v.inlined_clone();
}
output
}
/// TODO
pub fn convert_dense_csr<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CsrMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>
{
let mut row_offsets = Vec::with_capacity(dense.nrows() + 1);
let mut col_idx = Vec::new();
let mut values = Vec::new();
// We have to iterate row-by-row to build the CSR matrix, which is at odds with
// nalgebra's column-major storage. The alternative would be to perform an initial sweep
// to count number of non-zeros per row.
row_offsets.push(0);
for i in 0 .. dense.nrows() {
for j in 0 .. dense.ncols() {
let v = dense.index((i, j));
if v != &T::zero() {
col_idx.push(j);
values.push(v.inlined_clone());
}
}
row_offsets.push(col_idx.len());
}
// TODO: Consider circumventing the data validity check here
// (would require unsafe, should benchmark)
CsrMatrix::try_from_csr_data(dense.nrows(), dense.ncols(), row_offsets, col_idx, values)
.expect("Internal error: Invalid CsrMatrix format during dense-> CSR conversion")
}
/// TODO
pub fn convert_coo_csc<T>(coo: &CooMatrix<T>) -> CscMatrix<T>
where
T: Scalar + Zero
{
let (offsets, indices, values) = convert_coo_cs(coo.ncols(),
coo.col_indices(),
coo.row_indices(),
coo.values());
// TODO: Avoid "try_from" since it validates the data? (requires unsafe, should benchmark
// to see if it can be justified for performance reasons)
CscMatrix::try_from_csc_data(coo.nrows(), coo.ncols(), offsets, indices, values)
.expect("Internal error: Invalid CSC data during COO->CSC conversion")
}
/// TODO
pub fn convert_csc_coo<T>(csc: &CscMatrix<T>) -> CooMatrix<T>
where
T: Scalar
{
let mut coo = CooMatrix::new(csc.nrows(), csc.ncols());
for (i, j, v) in csc.triplet_iter() {
coo.push(i, j, v.inlined_clone());
}
coo
}
/// TODO
pub fn convert_csc_dense<T>(csc: &CscMatrix<T>) -> DMatrix<T>
where
T: Scalar + ClosedAdd + Zero
{
let mut output = DMatrix::zeros(csc.nrows(), csc.ncols());
for (i, j, v) in csc.triplet_iter() {
output[(i, j)] += v.inlined_clone();
}
output
}
/// TODO
pub fn convert_dense_csc<T, R, C, S>(dense: &Matrix<T, R, C, S>) -> CscMatrix<T>
where
T: Scalar + Zero,
R: Dim,
C: Dim,
S: Storage<T, R, C>
{
let mut col_offsets = Vec::with_capacity(dense.ncols() + 1);
let mut row_idx = Vec::new();
let mut values = Vec::new();
col_offsets.push(0);
for j in 0 .. dense.ncols() {
for i in 0 .. dense.nrows() {
let v = dense.index((i, j));
if v != &T::zero() {
row_idx.push(i);
values.push(v.inlined_clone());
}
}
col_offsets.push(row_idx.len());
}
// TODO: Consider circumventing the data validity check here
// (would require unsafe, should benchmark)
CscMatrix::try_from_csc_data(dense.nrows(), dense.ncols(), col_offsets, row_idx, values)
.expect("Internal error: Invalid CscMatrix format during dense-> CSC conversion")
}
/// TODO
pub fn convert_csr_csc<T>(csr: &CsrMatrix<T>) -> CscMatrix<T>
where
T: Scalar + Zero
{
let (offsets, indices, values) = transpose_cs(csr.nrows(),
csr.ncols(),
csr.row_offsets(),
csr.col_indices(),
csr.values());
// TODO: Avoid data validity check?
CscMatrix::try_from_csc_data(csr.nrows(), csr.ncols(), offsets, indices, values)
.expect("Internal error: Invalid CSC data during CSR->CSC conversion")
}
/// TODO
pub fn convert_csc_csr<T>(csc: &CscMatrix<T>) -> CsrMatrix<T>
where
T: Scalar + Zero
{
let (offsets, indices, values) = transpose_cs(csc.ncols(),
csc.nrows(),
csc.col_offsets(),
csc.row_indices(),
csc.values());
// TODO: Avoid data validity check?
CsrMatrix::try_from_csr_data(csc.nrows(), csc.ncols(), offsets, indices, values)
.expect("Internal error: Invalid CSR data during CSC->CSR conversion")
}
fn convert_coo_cs<T>(major_dim: usize,
major_indices: &[usize],
minor_indices: &[usize],
values: &[T])
-> (Vec<usize>, Vec<usize>, Vec<T>)
where
T: Scalar + Zero
{
assert_eq!(major_indices.len(), minor_indices.len());
assert_eq!(minor_indices.len(), values.len());
let nnz = major_indices.len();
let (unsorted_major_offsets, unsorted_minor_idx, unsorted_vals) = {
let mut offsets = vec![0usize; major_dim + 1];
let mut minor_idx = vec![0usize; nnz];
let mut vals = vec![T::zero(); nnz];
coo_to_unsorted_cs(
&mut offsets,
&mut minor_idx,
&mut vals,
major_dim,
major_indices,
minor_indices,
values,
);
(offsets, minor_idx, vals)
};
// TODO: If input is sorted and/or without duplicates, we can avoid additional allocations
// and work. Might want to take advantage of this.
// At this point, assembly is essentially complete. However, we must ensure
// that minor indices are sorted within each lane and without duplicates.
let mut sorted_major_offsets = Vec::new();
let mut sorted_minor_idx = Vec::new();
let mut sorted_vals = Vec::new();
sorted_major_offsets.push(0);
// We need some temporary storage when working with each lane. Since lanes often have a
// very small number of non-zero entries, we try to amortize allocations across
// lanes by reusing workspace vectors
let mut idx_workspace = Vec::new();
let mut perm_workspace = Vec::new();
let mut values_workspace = Vec::new();
for lane in 0..major_dim {
let begin = unsorted_major_offsets[lane];
let end = unsorted_major_offsets[lane + 1];
let count = end - begin;
let range = begin..end;
// Ensure that workspaces can hold enough data
perm_workspace.resize(count, 0);
idx_workspace.resize(count, 0);
values_workspace.resize(count, T::zero());
sort_lane(
&mut idx_workspace[..count],
&mut values_workspace[..count],
&unsorted_minor_idx[range.clone()],
&unsorted_vals[range.clone()],
&mut perm_workspace[..count],
);
let sorted_ja_current_len = sorted_minor_idx.len();
combine_duplicates(
|idx| sorted_minor_idx.push(idx),
|val| sorted_vals.push(val),
&idx_workspace[..count],
&values_workspace[..count],
&Add::add,
);
let new_col_count = sorted_minor_idx.len() - sorted_ja_current_len;
sorted_major_offsets.push(sorted_major_offsets.last().unwrap() + new_col_count);
}
(sorted_major_offsets, sorted_minor_idx, sorted_vals)
}
/// Converts matrix data given in triplet format to unsorted CSR/CSC, retaining any duplicated
/// indices.
///
/// Here `major/minor` is `row/col` for CSR and `col/row` for CSC.
fn coo_to_unsorted_cs<T: Clone>(
major_offsets: &mut [usize],
cs_minor_idx: &mut [usize],
cs_values: &mut [T],
major_dim: usize,
major_indices: &[usize],
minor_indices: &[usize],
coo_values: &[T],
) {
assert_eq!(major_offsets.len(), major_dim + 1);
assert_eq!(cs_minor_idx.len(), cs_values.len());
assert_eq!(cs_values.len(), major_indices.len());
assert_eq!(major_indices.len(), minor_indices.len());
assert_eq!(minor_indices.len(), coo_values.len());
// Count the number of occurrences of each row
for major_idx in major_indices {
major_offsets[*major_idx] += 1;
}
convert_counts_to_offsets(major_offsets);
{
// TODO: Instead of allocating a whole new vector storing the current counts,
// I think it's possible to be a bit more clever by storing each count
// in the last of the column indices for each row
let mut current_counts = vec![0usize; major_dim + 1];
let triplet_iter = major_indices.iter().zip(minor_indices).zip(coo_values);
for ((i, j), value) in triplet_iter {
let current_offset = major_offsets[*i] + current_counts[*i];
cs_minor_idx[current_offset] = *j;
cs_values[current_offset] = value.clone();
current_counts[*i] += 1;
}
}
}
/// Sort the indices of the given lane.
///
/// The indices and values in `minor_idx` and `values` are sorted according to the
/// minor indices and stored in `minor_idx_result` and `values_result` respectively.
///
/// All input slices are expected to be of the same length. The contents of mutable slices
/// can be arbitrary, as they are anyway overwritten.
fn sort_lane<T: Clone>(
minor_idx_result: &mut [usize],
values_result: &mut [T],
minor_idx: &[usize],
values: &[T],
workspace: &mut [usize],
) {
assert_eq!(minor_idx_result.len(), values_result.len());
assert_eq!(values_result.len(), minor_idx.len());
assert_eq!(minor_idx.len(), values.len());
assert_eq!(values.len(), workspace.len());
let permutation = workspace;
// Set permutation to identity
for (i, p) in permutation.iter_mut().enumerate() {
*p = i;
}
// Compute permutation needed to bring minor indices into sorted order
// Note: Using sort_unstable here avoids internal allocations, which is crucial since
// each lane might have a small number of elements
permutation.sort_unstable_by_key(|idx| minor_idx[*idx]);
apply_permutation(minor_idx_result, minor_idx, permutation);
apply_permutation(values_result, values, permutation);
}
/// Transposes the compressed format.
///
/// This means that major and minor roles are switched. This is used for converting between CSR
/// and CSC formats.
fn transpose_cs<T>(major_dim: usize,
minor_dim: usize,
source_major_offsets: &[usize],
source_minor_indices: &[usize],
values: &[T])
-> (Vec<usize>, Vec<usize>, Vec<T>)
where
T: Scalar + Zero
{
assert_eq!(source_major_offsets.len(), major_dim + 1);
assert_eq!(source_minor_indices.len(), values.len());
let nnz = values.len();
// Count the number of occurences of each minor index
let mut minor_counts = vec![0; minor_dim];
for minor_idx in source_minor_indices {
minor_counts[*minor_idx] += 1;
}
convert_counts_to_offsets(&mut minor_counts);
let mut target_offsets = minor_counts;
target_offsets.push(nnz);
let mut target_indices = vec![usize::MAX; nnz];
let mut target_values = vec![T::zero(); nnz];
// Keep track of how many entries we have placed in each target major lane
let mut current_target_major_counts = vec![0; minor_dim];
for source_major_idx in 0 .. major_dim {
let source_lane_begin = source_major_offsets[source_major_idx];
let source_lane_end = source_major_offsets[source_major_idx + 1];
let source_lane_indices = &source_minor_indices[source_lane_begin .. source_lane_end];
let source_lane_values = &values[source_lane_begin .. source_lane_end];
for (&source_minor_idx, val) in source_lane_indices.iter().zip(source_lane_values) {
// Compute the offset in the target data for this particular source entry
let target_lane_count = &mut current_target_major_counts[source_minor_idx];
let entry_offset = target_offsets[source_minor_idx] + *target_lane_count;
target_indices[entry_offset] = source_major_idx;
target_values[entry_offset] = val.inlined_clone();
*target_lane_count += 1;
}
}
(target_offsets, target_indices, target_values)
}
fn convert_counts_to_offsets(counts: &mut [usize]) {
// Convert the counts to an offset
let mut offset = 0;
for i_offset in counts.iter_mut() {
let count = *i_offset;
*i_offset = offset;
offset += count;
}
}
// TODO: Move this into `utils` or something?
fn apply_permutation<T: Clone>(out_slice: &mut [T], in_slice: &[T], permutation: &[usize]) {
assert_eq!(out_slice.len(), in_slice.len());
assert_eq!(out_slice.len(), permutation.len());
for (out_element, old_pos) in out_slice.iter_mut().zip(permutation) {
*out_element = in_slice[*old_pos].clone();
}
}
/// Given *sorted* indices and corresponding scalar values, combines duplicates with the given
/// associative combiner and calls the provided produce methods with combined indices and values.
fn combine_duplicates<T: Clone>(
mut produce_idx: impl FnMut(usize),
mut produce_value: impl FnMut(T),
idx_array: &[usize],
values: &[T],
combiner: impl Fn(T, T) -> T,
) {
assert_eq!(idx_array.len(), values.len());
let mut i = 0;
while i < idx_array.len() {
let idx = idx_array[i];
let mut combined_value = values[i].clone();
let mut j = i + 1;
while j < idx_array.len() && idx_array[j] == idx {
let j_val = values[j].clone();
combined_value = combiner(combined_value, j_val);
j += 1;
}
produce_idx(idx);
produce_value(combined_value);
i = j;
}
}

View File

@ -45,14 +45,17 @@
//! - [x] "Disassemble" the COO matrix into its underlying triplet arrays.
//! - Format conversion:
//! - [x] COO -> Dense
//! - [ ] CSR -> Dense
//! - [ ] CSC -> Dense
//! - [ ] COO -> CSR
//! - [ ] COO -> CSC
//! - [ ] CSR -> CSC
//! - [ ] CSC -> CSR
//! - [ ] CSR -> COO
//! - [ ] CSC -> COO
//! - [x] CSR -> Dense
//! - [x] CSC -> Dense
//! - [x] COO -> CSR
//! - [x] COO -> CSC
//! - [x] CSR -> CSC
//! - [x] CSC -> CSR
//! - [x] CSR -> COO
//! - [x] CSC -> COO
//! - [x] Dense -> COO
//! - [x] Dense -> CSR
//! - [x] Dense -> CSC
//! - Arithmetic. In general arithmetic is only implemented between instances of the same format,
//! or between dense and instances of a given format. For example, we do not implement
//! CSR * CSC, only CSR * CSR and CSC * CSC.

View File

@ -1,9 +1,16 @@
use nalgebra_sparse::coo::CooMatrix;
use nalgebra_sparse::convert::serial::{convert_coo_dense, convert_dense_coo};
use nalgebra_sparse::proptest::coo_with_duplicates;
use nalgebra_sparse::convert::serial::{convert_coo_dense, convert_coo_csr,
convert_dense_coo, convert_csr_dense,
convert_csr_coo, convert_dense_csr,
convert_csc_coo, convert_coo_csc,
convert_csc_dense, convert_dense_csc,
convert_csr_csc, convert_csc_csr};
use nalgebra_sparse::proptest::{coo_with_duplicates, coo_no_duplicates, csr, csc};
use nalgebra::proptest::matrix;
use proptest::prelude::*;
use nalgebra::DMatrix;
use nalgebra_sparse::csr::CsrMatrix;
use nalgebra_sparse::csc::CscMatrix;
#[test]
fn test_convert_dense_coo() {
@ -49,10 +56,244 @@ fn test_convert_dense_coo() {
}
}
#[test]
fn test_convert_coo_csr() {
// No duplicates
{
let coo = {
let mut coo = CooMatrix::new(3, 4);
coo.push(1, 3, 4);
coo.push(0, 1, 2);
coo.push(2, 0, 1);
coo.push(2, 3, 2);
coo.push(2, 2, 1);
coo
};
let expected_csr = CsrMatrix::try_from_csr_data(
3,
4,
vec![0, 1, 2, 5],
vec![1, 3, 0, 2, 3],
vec![2, 4, 1, 1, 2]
).unwrap();
assert_eq!(convert_coo_csr(&coo), expected_csr);
}
// Duplicates
{
let coo = {
let mut coo = CooMatrix::new(3, 4);
coo.push(1, 3, 4);
coo.push(2, 3, 2);
coo.push(0, 1, 2);
coo.push(2, 0, 1);
coo.push(2, 3, 2);
coo.push(0, 1, 3);
coo.push(2, 2, 1);
coo
};
let expected_csr = CsrMatrix::try_from_csr_data(
3,
4,
vec![0, 1, 2, 5],
vec![1, 3, 0, 2, 3],
vec![5, 4, 1, 1, 4]
).unwrap();
assert_eq!(convert_coo_csr(&coo), expected_csr);
}
}
#[test]
fn test_convert_csr_coo() {
let csr = CsrMatrix::try_from_csr_data(
3,
4,
vec![0, 1, 2, 5],
vec![1, 3, 0, 2, 3],
vec![5, 4, 1, 1, 4]
).unwrap();
let expected_coo = CooMatrix::try_from_triplets(
3,
4,
vec![0, 1, 2, 2, 2],
vec![1, 3, 0, 2, 3],
vec![5, 4, 1, 1, 4]
).unwrap();
assert_eq!(convert_csr_coo(&csr), expected_coo);
}
#[test]
fn test_convert_coo_csc() {
// No duplicates
{
let coo = {
let mut coo = CooMatrix::new(3, 4);
coo.push(1, 3, 4);
coo.push(0, 1, 2);
coo.push(2, 0, 1);
coo.push(2, 3, 2);
coo.push(2, 2, 1);
coo
};
let expected_csc = CscMatrix::try_from_csc_data(
3,
4,
vec![0, 1, 2, 3, 5],
vec![2, 0, 2, 1, 2],
vec![1, 2, 1, 4, 2]
).unwrap();
assert_eq!(convert_coo_csc(&coo), expected_csc);
}
// Duplicates
{
let coo = {
let mut coo = CooMatrix::new(3, 4);
coo.push(1, 3, 4);
coo.push(2, 3, 2);
coo.push(0, 1, 2);
coo.push(2, 0, 1);
coo.push(2, 3, 2);
coo.push(0, 1, 3);
coo.push(2, 2, 1);
coo
};
let expected_csc = CscMatrix::try_from_csc_data(
3,
4,
vec![0, 1, 2, 3, 5],
vec![2, 0, 2, 1, 2],
vec![1, 5, 1, 4, 4]
).unwrap();
assert_eq!(convert_coo_csc(&coo), expected_csc);
}
}
#[test]
fn test_convert_csc_coo() {
let csc = CscMatrix::try_from_csc_data(
3,
4,
vec![0, 1, 2, 3, 5],
vec![2, 0, 2, 1, 2],
vec![1, 2, 1, 4, 2]
).unwrap();
let expected_coo = CooMatrix::try_from_triplets(
3,
4,
vec![2, 0, 2, 1, 2],
vec![0, 1, 2, 3, 3],
vec![1, 2, 1, 4, 2]
).unwrap();
assert_eq!(convert_csc_coo(&csc), expected_coo);
}
#[test]
fn test_convert_csr_csc_bidirectional() {
let csr = CsrMatrix::try_from_csr_data(
3,
4,
vec![0, 3, 4, 6],
vec![1, 2, 3, 0, 1, 3],
vec![5, 3, 2, 2, 1, 4],
).unwrap();
let csc = CscMatrix::try_from_csc_data(
3,
4,
vec![0, 1, 3, 4, 6],
vec![1, 0, 2, 0, 0, 2],
vec![2, 5, 1, 3, 2, 4],
).unwrap();
assert_eq!(convert_csr_csc(&csr), csc);
assert_eq!(convert_csc_csr(&csc), csr);
}
#[test]
fn test_convert_csr_dense_bidirectional() {
let csr = CsrMatrix::try_from_csr_data(
3,
4,
vec![0, 3, 4, 6],
vec![1, 2, 3, 0, 1, 3],
vec![5, 3, 2, 2, 1, 4],
).unwrap();
#[rustfmt::skip]
let dense = DMatrix::from_row_slice(3, 4, &[
0, 5, 3, 2,
2, 0, 0, 0,
0, 1, 0, 4
]);
assert_eq!(convert_csr_dense(&csr), dense);
assert_eq!(convert_dense_csr(&dense), csr);
}
#[test]
fn test_convert_csc_dense_bidirectional() {
let csc = CscMatrix::try_from_csc_data(
3,
4,
vec![0, 1, 3, 4, 6],
vec![1, 0, 2, 0, 0, 2],
vec![2, 5, 1, 3, 2, 4],
).unwrap();
#[rustfmt::skip]
let dense = DMatrix::from_row_slice(3, 4, &[
0, 5, 3, 2,
2, 0, 0, 0,
0, 1, 0, 4
]);
assert_eq!(convert_csc_dense(&csc), dense);
assert_eq!(convert_dense_csc(&dense), csc);
}
fn coo_strategy() -> impl Strategy<Value=CooMatrix<i32>> {
coo_with_duplicates(-5 ..= 5, 0..=6usize, 0..=6usize, 40, 2)
}
fn coo_no_duplicates_strategy() -> impl Strategy<Value=CooMatrix<i32>> {
coo_no_duplicates(-5 ..= 5, 0..=6usize, 0..=6usize, 40)
}
fn csr_strategy() -> impl Strategy<Value=CsrMatrix<i32>> {
csr(-5 ..= 5, 0..=6usize, 0..=6usize, 40)
}
fn csc_strategy() -> impl Strategy<Value=CscMatrix<i32>> {
csc(-5 ..= 5, 0..=6usize, 0..=6usize, 40)
}
/// Avoid generating explicit zero values so that it is possible to reason about sparsity patterns
fn non_zero_csr_strategy() -> impl Strategy<Value=CsrMatrix<i32>> {
csr(1 ..= 5, 0..=6usize, 0..=6usize, 40)
}
/// Avoid generating explicit zero values so that it is possible to reason about sparsity patterns
fn non_zero_csc_strategy() -> impl Strategy<Value=CscMatrix<i32>> {
csc(1 ..= 5, 0..=6usize, 0..=6usize, 40)
}
fn dense_strategy() -> impl Strategy<Value=DMatrix<i32>> {
matrix(-5..=5, 0..=6, 0..=6)
}
proptest! {
#[test]
@ -78,7 +319,125 @@ proptest! {
}
#[test]
fn from_dense_coo_roundtrip(dense in matrix(-5..=5, 0..=6, 0..=6)) {
fn coo_from_dense_roundtrip(dense in dense_strategy()) {
prop_assert_eq!(&dense, &DMatrix::from(&CooMatrix::from(&dense)));
}
#[test]
fn convert_coo_csr_agrees_with_csr_dense(coo in coo_strategy()) {
let coo_dense = convert_coo_dense(&coo);
let csr = convert_coo_csr(&coo);
let csr_dense = convert_csr_dense(&csr);
prop_assert_eq!(csr_dense, coo_dense);
// It might be that COO matrices have a higher nnz due to duplicates,
// so we can only check that the CSR matrix has no more than the original COO matrix
prop_assert!(csr.nnz() <= coo.nnz());
}
#[test]
fn convert_coo_csr_nnz(coo in coo_no_duplicates_strategy()) {
// Check that the NNZ are equal when converting from a CooMatrix without
// duplicates to a CSR matrix
let csr = convert_coo_csr(&coo);
prop_assert_eq!(csr.nnz(), coo.nnz());
}
#[test]
fn convert_csr_coo_roundtrip(csr in csr_strategy()) {
let coo = convert_csr_coo(&csr);
let csr2 = convert_coo_csr(&coo);
prop_assert_eq!(csr2, csr);
}
#[test]
fn coo_from_csr_roundtrip(csr in csr_strategy()) {
prop_assert_eq!(&csr, &CsrMatrix::from(&CooMatrix::from(&csr)));
}
#[test]
fn csr_from_dense_roundtrip(dense in dense_strategy()) {
prop_assert_eq!(&dense, &DMatrix::from(&CsrMatrix::from(&dense)));
}
#[test]
fn convert_csr_dense_roundtrip(csr in non_zero_csr_strategy()) {
// Since we only generate CSR matrices with non-zero values, we know that the
// number of explicitly stored entries when converting CSR->Dense->CSR should be
// unchanged, so that we can verify that the result is the same as the input
let dense = convert_csr_dense(&csr);
let csr2 = convert_dense_csr(&dense);
prop_assert_eq!(csr2, csr);
}
#[test]
fn convert_csc_coo_roundtrip(csc in csc_strategy()) {
let coo = convert_csc_coo(&csc);
let csc2 = convert_coo_csc(&coo);
prop_assert_eq!(csc2, csc);
}
#[test]
fn coo_from_csc_roundtrip(csc in csc_strategy()) {
prop_assert_eq!(&csc, &CscMatrix::from(&CooMatrix::from(&csc)));
}
#[test]
fn convert_csc_dense_roundtrip(csc in non_zero_csc_strategy()) {
// Since we only generate CSC matrices with non-zero values, we know that the
// number of explicitly stored entries when converting CSC->Dense->CSC should be
// unchanged, so that we can verify that the result is the same as the input
let dense = convert_csc_dense(&csc);
let csc2 = convert_dense_csc(&dense);
prop_assert_eq!(csc2, csc);
}
#[test]
fn csc_from_dense_roundtrip(dense in dense_strategy()) {
prop_assert_eq!(&dense, &DMatrix::from(&CscMatrix::from(&dense)));
}
#[test]
fn convert_coo_csc_agrees_with_csc_dense(coo in coo_strategy()) {
let coo_dense = convert_coo_dense(&coo);
let csc = convert_coo_csc(&coo);
let csc_dense = convert_csc_dense(&csc);
prop_assert_eq!(csc_dense, coo_dense);
// It might be that COO matrices have a higher nnz due to duplicates,
// so we can only check that the CSR matrix has no more than the original COO matrix
prop_assert!(csc.nnz() <= coo.nnz());
}
#[test]
fn convert_coo_csc_nnz(coo in coo_no_duplicates_strategy()) {
// Check that the NNZ are equal when converting from a CooMatrix without
// duplicates to a CSR matrix
let csc = convert_coo_csc(&coo);
prop_assert_eq!(csc.nnz(), coo.nnz());
}
#[test]
fn convert_csc_csr_roundtrip(csc in csc_strategy()) {
let csr = convert_csc_csr(&csc);
let csc2 = convert_csr_csc(&csr);
prop_assert_eq!(csc2, csc);
}
#[test]
fn convert_csr_csc_roundtrip(csr in csr_strategy()) {
let csc = convert_csr_csc(&csr);
let csr2 = convert_csc_csr(&csc);
prop_assert_eq!(csr2, csr);
}
#[test]
fn csc_from_csr_roundtrip(csr in csr_strategy()) {
prop_assert_eq!(&csr, &CsrMatrix::from(&CscMatrix::from(&csr)));
}
#[test]
fn csr_from_csc_roundtrip(csc in csc_strategy()) {
prop_assert_eq!(&csc, &CscMatrix::from(&CsrMatrix::from(&csc)));
}
}