diff --git a/.github/workflows/nalgebra-ci-build.yml b/.github/workflows/nalgebra-ci-build.yml
index 3a56df13..c00b6cbc 100644
--- a/.github/workflows/nalgebra-ci-build.yml
+++ b/.github/workflows/nalgebra-ci-build.yml
@@ -38,8 +38,12 @@ jobs:
run: cargo build --features serde-serialize
- name: Build nalgebra-lapack
run: cd nalgebra-lapack; cargo build;
- - name: Build nalgebra-sparse
+ - name: Build nalgebra-sparse --no-default-features
+ run: cd nalgebra-sparse; cargo build --no-default-features;
+ - name: Build nalgebra-sparse (default features)
run: cd nalgebra-sparse; cargo build;
+ - name: Build nalgebra-sparse --all-features
+ run: cd nalgebra-sparse; cargo build --all-features;
# Run this on it’s own job because it alone takes a lot of time.
# So it’s best to let it run in parallel to the other jobs.
build-nalgebra-all-features:
@@ -57,13 +61,13 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: test
- run: cargo test --features arbitrary,rand,serde-serialize,abomonation-serialize,sparse,debug,io,compare,libm,proptest-support,slow-tests;
+ run: cargo test --features arbitrary,rand,serde-serialize,sparse,debug,io,compare,libm,proptest-support,slow-tests;
test-nalgebra-glm:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: test nalgebra-glm
- run: cargo test -p nalgebra-glm --features arbitrary,serde-serialize,abomonation-serialize;
+ run: cargo test -p nalgebra-glm --features arbitrary,serde-serialize;
test-nalgebra-sparse:
runs-on: ubuntu-latest
steps:
@@ -71,10 +75,10 @@ jobs:
- name: test nalgebra-sparse
# Manifest-path is necessary because cargo otherwise won't correctly forward features
# We increase number of proptest cases to hopefully catch more potential bugs
- run: PROPTEST_CASES=10000 cargo test --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,io
+ run: PROPTEST_CASES=10000 cargo test --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,io,serde-serialize
- name: test nalgebra-sparse (slow tests)
# Unfortunately, the "slow-tests" take so much time that we need to run them with --release
- run: PROPTEST_CASES=10000 cargo test --release --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,io,slow-tests slow
+ run: PROPTEST_CASES=10000 cargo test --release --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,io,serde-serialize,slow-tests slow
test-nalgebra-macros:
runs-on: ubuntu-latest
steps:
@@ -120,6 +124,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: Jimver/cuda-toolkit@v0.2.4
+ with:
+ cuda: '11.2.2'
- name: Install nightly-2021-12-04
uses: actions-rs/toolchain@v1
with:
@@ -128,4 +134,6 @@ jobs:
- uses: actions/checkout@v2
- run: rustup target add nvptx64-nvidia-cuda
- run: cargo build --no-default-features --features cuda
- - run: cargo build --no-default-features --features cuda --target=nvptx64-nvidia-cuda
\ No newline at end of file
+ - run: cargo build --no-default-features --features cuda --target=nvptx64-nvidia-cuda
+ env:
+ CUDA_ARCH: "350"
\ No newline at end of file
diff --git a/Cargo.toml b/Cargo.toml
index 13f0584c..8a3fea5c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -32,7 +32,7 @@ compare = [ "matrixcompare-core" ]
libm = [ "simba/libm" ]
libm-force = [ "simba/libm_force" ]
macros = [ "nalgebra-macros" ]
-cuda = [ "cust", "simba/cuda" ]
+cuda = [ "cust_core", "simba/cuda" ]
# Conversion
convert-mint = [ "mint" ]
@@ -53,7 +53,6 @@ convert-glam020 = [ "glam020" ]
## `serde-serialize`.
serde-serialize-no-std = [ "serde", "num-complex/serde" ]
serde-serialize = [ "serde-serialize-no-std", "serde/std" ]
-abomonation-serialize = [ "abomonation" ]
rkyv-serialize-no-std = [ "rkyv" ]
rkyv-serialize = [ "rkyv-serialize-no-std", "rkyv/std" ]
@@ -81,7 +80,6 @@ alga = { version = "0.9", default-features = false, optional = true }
rand_distr = { version = "0.4", default-features = false, optional = true }
matrixmultiply = { version = "0.3", optional = true }
serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true }
-abomonation = { version = "0.7", optional = true }
rkyv = { version = "~0.6.4", default-features = false, features = ["const_generics"], optional = true }
mint = { version = "0.5", optional = true }
quickcheck = { version = "1", optional = true }
@@ -98,9 +96,7 @@ glam017 = { package = "glam", version = "0.17", optional = true }
glam018 = { package = "glam", version = "0.18", optional = true }
glam019 = { package = "glam", version = "0.19", optional = true }
glam020 = { package = "glam", version = "0.20", optional = true }
-
-[target.'cfg(not(target_os = "cuda"))'.dependencies]
-cust = { version = "0.2", optional = true }
+cust_core = { version = "0.1", optional = true }
[dev-dependencies]
diff --git a/README.md b/README.md
index fa1e0904..62ab4759 100644
--- a/README.md
+++ b/README.md
@@ -42,6 +42,9 @@ And our gold sponsors:
-
+
+
+
+
\ No newline at end of file
diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml
index 287bb8c7..f8087581 100644
--- a/nalgebra-glm/Cargo.toml
+++ b/nalgebra-glm/Cargo.toml
@@ -21,7 +21,6 @@ default = [ "std" ]
std = [ "nalgebra/std", "simba/std" ]
arbitrary = [ "nalgebra/arbitrary" ]
serde-serialize = [ "nalgebra/serde-serialize-no-std" ]
-abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
cuda = [ "nalgebra/cuda" ]
# Conversion
diff --git a/nalgebra-macros/src/lib.rs b/nalgebra-macros/src/lib.rs
index 9a403e0d..0d7889ae 100644
--- a/nalgebra-macros/src/lib.rs
+++ b/nalgebra-macros/src/lib.rs
@@ -111,7 +111,7 @@ impl Parse for Matrix {
/// Construct a fixed-size matrix directly from data.
///
-/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
+/// **Note: Requires the `macros` feature to be enabled (enabled by default)**.
///
/// This macro facilitates easy construction of matrices when the entries of the matrix are known
/// (either as constants or expressions). This macro produces an instance of `SMatrix`. This means
@@ -125,7 +125,6 @@ impl Parse for Matrix {
/// (`;`) designates that a new row begins.
///
/// # Examples
-///
/// ```
/// use nalgebra::matrix;
///
@@ -164,12 +163,13 @@ pub fn matrix(stream: TokenStream) -> TokenStream {
/// Construct a dynamic matrix directly from data.
///
-/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
+/// **Note: Requires the `macros` feature to be enabled (enabled by default)**.
///
/// The syntax is exactly the same as for [`matrix!`], but instead of producing instances of
/// `SMatrix`, it produces instances of `DMatrix`. At the moment it is not usable
/// in `const fn` contexts.
///
+/// # Example
/// ```
/// use nalgebra::dmatrix;
///
@@ -233,7 +233,7 @@ impl Parse for Vector {
/// Construct a fixed-size column vector directly from data.
///
-/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
+/// **Note: Requires the `macros` feature to be enabled (enabled by default)**.
///
/// Similarly to [`matrix!`], this macro facilitates easy construction of fixed-size vectors.
/// However, whereas the [`matrix!`] macro expects each row to be separated by a semi-colon,
@@ -243,8 +243,7 @@ impl Parse for Vector {
/// `vector!` is intended to be the most readable and performant way of constructing small,
/// fixed-size vectors, and it is usable in `const fn` contexts.
///
-/// ## Examples
-///
+/// # Example
/// ```
/// use nalgebra::vector;
///
@@ -265,12 +264,13 @@ pub fn vector(stream: TokenStream) -> TokenStream {
/// Construct a dynamic column vector directly from data.
///
-/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
+/// **Note: Requires the `macros` feature to be enabled (enabled by default)**.
///
/// The syntax is exactly the same as for [`vector!`], but instead of producing instances of
/// `SVector`, it produces instances of `DVector`. At the moment it is not usable
/// in `const fn` contexts.
///
+/// # Example
/// ```
/// use nalgebra::dvector;
///
@@ -294,15 +294,14 @@ pub fn dvector(stream: TokenStream) -> TokenStream {
/// Construct a fixed-size point directly from data.
///
-/// **Note: Requires the `macro` feature to be enabled (enabled by default)**.
+/// **Note: Requires the `macros` feature to be enabled (enabled by default)**.
///
/// Similarly to [`vector!`], this macro facilitates easy construction of points.
///
/// `point!` is intended to be the most readable and performant way of constructing small,
/// points, and it is usable in `const fn` contexts.
///
-/// ## Examples
-///
+/// # Example
/// ```
/// use nalgebra::point;
///
diff --git a/nalgebra-sparse/Cargo.toml b/nalgebra-sparse/Cargo.toml
index 4e414322..0ed64acb 100644
--- a/nalgebra-sparse/Cargo.toml
+++ b/nalgebra-sparse/Cargo.toml
@@ -15,6 +15,7 @@ license = "Apache-2.0"
[features]
proptest-support = ["proptest", "nalgebra/proptest-support"]
compare = [ "matrixcompare-core" ]
+serde-serialize = [ "serde/std" ]
# Enable matrix market I/O
io = [ "pest", "pest_derive" ]
@@ -29,13 +30,15 @@ proptest = { version = "1.0", optional = true }
matrixcompare-core = { version = "0.1.0", optional = true }
pest = { version = "2", optional = true }
pest_derive = { version = "2", optional = true }
+serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true }
[dev-dependencies]
itertools = "0.10"
matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] }
nalgebra = { version="0.30", path = "../", features = ["compare"] }
tempfile = "3"
+serde_json = "1.0"
[package.metadata.docs.rs]
# Enable certain features when building docs for docs.rs
-features = [ "proptest-support", "compare" , "io"]
\ No newline at end of file
+features = [ "proptest-support", "compare", "io"]
diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs
index ecbe1dab..50fc50e4 100644
--- a/nalgebra-sparse/src/convert/serial.rs
+++ b/nalgebra-sparse/src/convert/serial.rs
@@ -14,6 +14,7 @@ use crate::coo::CooMatrix;
use crate::cs;
use crate::csc::CscMatrix;
use crate::csr::CsrMatrix;
+use crate::utils::{apply_permutation, compute_sort_permutation};
/// Converts a dense matrix to [`CooMatrix`].
pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix
@@ -376,29 +377,12 @@ fn sort_lane(
assert_eq!(values.len(), workspace.len());
let permutation = workspace;
- // Set permutation to identity
- for (i, p) in permutation.iter_mut().enumerate() {
- *p = i;
- }
-
- // Compute permutation needed to bring minor indices into sorted order
- // Note: Using sort_unstable here avoids internal allocations, which is crucial since
- // each lane might have a small number of elements
- permutation.sort_unstable_by_key(|idx| minor_idx[*idx]);
+ compute_sort_permutation(permutation, minor_idx);
apply_permutation(minor_idx_result, minor_idx, permutation);
apply_permutation(values_result, values, permutation);
}
-// TODO: Move this into `utils` or something?
-fn apply_permutation(out_slice: &mut [T], in_slice: &[T], permutation: &[usize]) {
- assert_eq!(out_slice.len(), in_slice.len());
- assert_eq!(out_slice.len(), permutation.len());
- for (out_element, old_pos) in out_slice.iter_mut().zip(permutation) {
- *out_element = in_slice[*old_pos].clone();
- }
-}
-
/// Given *sorted* indices and corresponding scalar values, combines duplicates with the given
/// associative combiner and calls the provided produce methods with combined indices and values.
fn combine_duplicates(
diff --git a/nalgebra-sparse/src/coo.rs b/nalgebra-sparse/src/coo.rs
index 34e5ceec..2b302e37 100644
--- a/nalgebra-sparse/src/coo.rs
+++ b/nalgebra-sparse/src/coo.rs
@@ -1,5 +1,8 @@
//! An implementation of the COO sparse matrix format.
+#[cfg(feature = "serde-serialize")]
+mod coo_serde;
+
use crate::SparseFormatError;
/// A COO representation of a sparse matrix.
diff --git a/nalgebra-sparse/src/coo/coo_serde.rs b/nalgebra-sparse/src/coo/coo_serde.rs
new file mode 100644
index 00000000..7ffcdf4a
--- /dev/null
+++ b/nalgebra-sparse/src/coo/coo_serde.rs
@@ -0,0 +1,65 @@
+use crate::coo::CooMatrix;
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+/// This is an intermediate type for (de)serializing `CooMatrix`.
+///
+/// Deserialization requires using a `try_from_*` function for validation. We could have used
+/// the `remote = "Self"` trick (https://github.com/serde-rs/serde/issues/1220) which allows
+/// to directly serialize/deserialize the original fields and combine it with validation.
+/// However, this would lead to nested serialization of the `CsMatrix` and `SparsityPattern`
+/// types. Instead, we decided that we want a more human-readable serialization format using
+/// field names like `row_indices` and `col_indices`. The easiest way to achieve this is to
+/// introduce an intermediate type. It also allows the serialization format to stay constant
+/// even if the internal layout in `nalgebra` changes.
+///
+/// We want to avoid unnecessary copies when serializing (i.e. cloning slices into owned
+/// storage). Therefore, we use generic arguments to allow using slices during serialization and
+/// owned storage (i.e. `Vec`) during deserialization. Without a major update of serde, slices
+/// and `Vec`s should always (de)serialize identically.
+#[derive(Serialize, Deserialize)]
+struct CooMatrixSerializationData {
+ nrows: usize,
+ ncols: usize,
+ row_indices: Indices,
+ col_indices: Indices,
+ values: Values,
+}
+
+impl Serialize for CooMatrix
+where
+ T: Serialize + Clone,
+{
+ fn serialize(&self, serializer: S) -> Result
+ where
+ S: Serializer,
+ {
+ CooMatrixSerializationData::<&[usize], &[T]> {
+ nrows: self.nrows(),
+ ncols: self.ncols(),
+ row_indices: self.row_indices(),
+ col_indices: self.col_indices(),
+ values: self.values(),
+ }
+ .serialize(serializer)
+ }
+}
+
+impl<'de, T> Deserialize<'de> for CooMatrix
+where
+ T: Deserialize<'de> + Clone,
+{
+ fn deserialize(deserializer: D) -> Result, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let de = CooMatrixSerializationData::, Vec>::deserialize(deserializer)?;
+ CooMatrix::try_from_triplets(
+ de.nrows,
+ de.ncols,
+ de.row_indices,
+ de.col_indices,
+ de.values,
+ )
+ .map_err(|e| de::Error::custom(e))
+ }
+}
diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs
index cffdd6c7..474eb2c0 100644
--- a/nalgebra-sparse/src/cs.rs
+++ b/nalgebra-sparse/src/cs.rs
@@ -6,7 +6,8 @@ use num_traits::One;
use nalgebra::Scalar;
use crate::pattern::SparsityPattern;
-use crate::{SparseEntry, SparseEntryMut};
+use crate::utils::{apply_permutation, compute_sort_permutation};
+use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind};
/// An abstract compressed matrix.
///
@@ -543,3 +544,151 @@ pub fn convert_counts_to_offsets(counts: &mut [usize]) {
offset += count;
}
}
+
+/// Validates cs data, optionally sorts minor indices and values
+pub(crate) fn validate_and_optionally_sort_cs_data(
+ major_dim: usize,
+ minor_dim: usize,
+ major_offsets: &[usize],
+ minor_indices: &mut [usize],
+ values: Option<&mut [T]>,
+ sort: bool,
+) -> Result<(), SparseFormatError>
+where
+ T: Scalar,
+{
+ let mut values_option = values;
+
+ if let Some(values) = values_option.as_mut() {
+ if minor_indices.len() != values.len() {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::InvalidStructure,
+ "Number of values and minor indices must be the same.",
+ ));
+ }
+ } else if sort {
+ unreachable!("Internal error: Sorting currently not supported if no values are present.");
+ }
+ if major_offsets.len() == 0 {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::InvalidStructure,
+ "Number of offsets should be greater than 0.",
+ ));
+ }
+ if major_offsets.len() != major_dim + 1 {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::InvalidStructure,
+ "Length of offset array is not equal to (major_dim + 1).",
+ ));
+ }
+
+ // Check that the first and last offsets conform to the specification
+ {
+ let first_offset_ok = *major_offsets.first().unwrap() == 0;
+ let last_offset_ok = *major_offsets.last().unwrap() == minor_indices.len();
+ if !first_offset_ok || !last_offset_ok {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::InvalidStructure,
+ "First or last offset is incompatible with format.",
+ ));
+ }
+ }
+
+ // Set up required buffers up front
+ let mut minor_idx_buffer: Vec = Vec::new();
+ let mut values_buffer: Vec = Vec::new();
+ let mut minor_index_permutation: Vec = Vec::new();
+
+ // Test that each lane has strictly monotonically increasing minor indices, i.e.
+ // minor indices within a lane are sorted, unique. Sort minor indices within a lane if needed.
+ // In addition, each minor index must be in bounds with respect to the minor dimension.
+ {
+ for lane_idx in 0..major_dim {
+ let range_start = major_offsets[lane_idx];
+ let range_end = major_offsets[lane_idx + 1];
+
+ // Test that major offsets are monotonically increasing
+ if range_start > range_end {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::InvalidStructure,
+ "Offsets are not monotonically increasing.",
+ ));
+ }
+
+ let minor_idx_in_lane = minor_indices.get(range_start..range_end).ok_or(
+ SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::IndexOutOfBounds,
+ "A major offset is out of bounds.",
+ ),
+ )?;
+
+ // We test for in-bounds, uniqueness and monotonicity at the same time
+ // to ensure that we only visit each minor index once
+ let mut prev = None;
+ let mut monotonic = true;
+
+ for &minor_idx in minor_idx_in_lane {
+ if minor_idx >= minor_dim {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::IndexOutOfBounds,
+ "A minor index is out of bounds.",
+ ));
+ }
+
+ if let Some(prev) = prev {
+ if prev >= minor_idx {
+ if !sort {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::InvalidStructure,
+ "Minor indices are not strictly monotonically increasing in each lane.",
+ ));
+ }
+ monotonic = false;
+ }
+ }
+ prev = Some(minor_idx);
+ }
+
+ // sort if indices are not monotonic and sorting is expected
+ if !monotonic && sort {
+ let range_size = range_end - range_start;
+ minor_index_permutation.resize(range_size, 0);
+ compute_sort_permutation(&mut minor_index_permutation, &minor_idx_in_lane);
+ minor_idx_buffer.clear();
+ minor_idx_buffer.extend_from_slice(&minor_idx_in_lane);
+ apply_permutation(
+ &mut minor_indices[range_start..range_end],
+ &minor_idx_buffer,
+ &minor_index_permutation,
+ );
+
+ // check duplicates
+ prev = None;
+ for &minor_idx in &minor_indices[range_start..range_end] {
+ if let Some(prev) = prev {
+ if prev == minor_idx {
+ return Err(SparseFormatError::from_kind_and_msg(
+ SparseFormatErrorKind::DuplicateEntry,
+ "Input data contains duplicate entries.",
+ ));
+ }
+ }
+ prev = Some(minor_idx);
+ }
+
+ // sort values if they exist
+ if let Some(values) = values_option.as_mut() {
+ values_buffer.clear();
+ values_buffer.extend_from_slice(&values[range_start..range_end]);
+ apply_permutation(
+ &mut values[range_start..range_end],
+ &values_buffer,
+ &minor_index_permutation,
+ );
+ }
+ }
+ }
+ }
+
+ Ok(())
+}
diff --git a/nalgebra-sparse/src/csc.rs b/nalgebra-sparse/src/csc.rs
index 607cc0cf..d926dafb 100644
--- a/nalgebra-sparse/src/csc.rs
+++ b/nalgebra-sparse/src/csc.rs
@@ -3,6 +3,10 @@
//! This is the module-level documentation. See [`CscMatrix`] for the main documentation of the
//! CSC implementation.
+#[cfg(feature = "serde-serialize")]
+mod csc_serde;
+
+use crate::cs;
use crate::cs::{CsLane, CsLaneIter, CsLaneIterMut, CsLaneMut, CsMatrix};
use crate::csr::CsrMatrix;
use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter};
@@ -170,6 +174,50 @@ impl CscMatrix {
Self::try_from_pattern_and_values(pattern, values)
}
+ /// Try to construct a CSC matrix from raw CSC data with unsorted row indices.
+ ///
+ /// It is assumed that each column contains unique row indices that are in
+ /// bounds with respect to the number of rows in the matrix. If this is not the case,
+ /// an error is returned to indicate the failure.
+ ///
+ /// An error is returned if the data given does not conform to the CSC storage format
+ /// with the exception of having unsorted row indices and values.
+ /// See the documentation for [CscMatrix](struct.CscMatrix.html) for more information.
+ pub fn try_from_unsorted_csc_data(
+ num_rows: usize,
+ num_cols: usize,
+ col_offsets: Vec,
+ mut row_indices: Vec,
+ mut values: Vec,
+ ) -> Result
+ where
+ T: Scalar,
+ {
+ let result = cs::validate_and_optionally_sort_cs_data(
+ num_cols,
+ num_rows,
+ &col_offsets,
+ &mut row_indices,
+ Some(&mut values),
+ true,
+ );
+
+ match result {
+ Ok(()) => {
+ let pattern = unsafe {
+ SparsityPattern::from_offset_and_indices_unchecked(
+ num_cols,
+ num_rows,
+ col_offsets,
+ row_indices,
+ )
+ };
+ Self::try_from_pattern_and_values(pattern, values)
+ }
+ Err(err) => Err(err),
+ }
+ }
+
/// Try to construct a CSC matrix from a sparsity pattern and associated non-zero values.
///
/// Returns an error if the number of values does not match the number of minor indices
diff --git a/nalgebra-sparse/src/csc/csc_serde.rs b/nalgebra-sparse/src/csc/csc_serde.rs
new file mode 100644
index 00000000..aab12d47
--- /dev/null
+++ b/nalgebra-sparse/src/csc/csc_serde.rs
@@ -0,0 +1,65 @@
+use crate::CscMatrix;
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+/// This is an intermediate type for (de)serializing `CscMatrix`.
+///
+/// Deserialization requires using a `try_from_*` function for validation. We could have used
+/// the `remote = "Self"` trick (https://github.com/serde-rs/serde/issues/1220) which allows
+/// to directly serialize/deserialize the original fields and combine it with validation.
+/// However, this would lead to nested serialization of the `CsMatrix` and `SparsityPattern`
+/// types. Instead, we decided that we want a more human-readable serialization format using
+/// field names like `col_offsets` and `row_indices`. The easiest way to achieve this is to
+/// introduce an intermediate type. It also allows the serialization format to stay constant
+/// even if the internal layout in `nalgebra` changes.
+///
+/// We want to avoid unnecessary copies when serializing (i.e. cloning slices into owned
+/// storage). Therefore, we use generic arguments to allow using slices during serialization and
+/// owned storage (i.e. `Vec`) during deserialization. Without a major update of serde, slices
+/// and `Vec`s should always (de)serialize identically.
+#[derive(Serialize, Deserialize)]
+struct CscMatrixSerializationData {
+ nrows: usize,
+ ncols: usize,
+ col_offsets: Indices,
+ row_indices: Indices,
+ values: Values,
+}
+
+impl Serialize for CscMatrix
+where
+ T: Serialize + Clone,
+{
+ fn serialize(&self, serializer: S) -> Result
+ where
+ S: Serializer,
+ {
+ CscMatrixSerializationData::<&[usize], &[T]> {
+ nrows: self.nrows(),
+ ncols: self.ncols(),
+ col_offsets: self.col_offsets(),
+ row_indices: self.row_indices(),
+ values: self.values(),
+ }
+ .serialize(serializer)
+ }
+}
+
+impl<'de, T> Deserialize<'de> for CscMatrix
+where
+ T: Deserialize<'de> + Clone,
+{
+ fn deserialize(deserializer: D) -> Result, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let de = CscMatrixSerializationData::, Vec>::deserialize(deserializer)?;
+ CscMatrix::try_from_csc_data(
+ de.nrows,
+ de.ncols,
+ de.col_offsets,
+ de.row_indices,
+ de.values,
+ )
+ .map_err(|e| de::Error::custom(e))
+ }
+}
diff --git a/nalgebra-sparse/src/csr.rs b/nalgebra-sparse/src/csr.rs
index 4324d18d..90be35f1 100644
--- a/nalgebra-sparse/src/csr.rs
+++ b/nalgebra-sparse/src/csr.rs
@@ -2,6 +2,11 @@
//!
//! This is the module-level documentation. See [`CsrMatrix`] for the main documentation of the
//! CSC implementation.
+
+#[cfg(feature = "serde-serialize")]
+mod csr_serde;
+
+use crate::cs;
use crate::cs::{CsLane, CsLaneIter, CsLaneIterMut, CsLaneMut, CsMatrix};
use crate::csc::CscMatrix;
use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter};
@@ -10,7 +15,6 @@ use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKin
use nalgebra::Scalar;
use num_traits::One;
-use std::iter::FromIterator;
use std::slice::{Iter, IterMut};
/// A CSR representation of a sparse matrix.
@@ -184,62 +188,35 @@ impl CsrMatrix {
num_rows: usize,
num_cols: usize,
row_offsets: Vec,
- col_indices: Vec,
- values: Vec,
+ mut col_indices: Vec,
+ mut values: Vec,
) -> Result
where
T: Scalar,
{
- use SparsityPatternFormatError::*;
- let count = col_indices.len();
- let mut p: Vec = (0..count).collect();
-
- if col_indices.len() != values.len() {
- return Err(SparseFormatError::from_kind_and_msg(
- SparseFormatErrorKind::InvalidStructure,
- "Number of values and column indices must be the same",
- ));
- }
-
- if row_offsets.len() == 0 {
- return Err(SparseFormatError::from_kind_and_msg(
- SparseFormatErrorKind::InvalidStructure,
- "Number of offsets should be greater than 0",
- ));
- }
-
- for (index, &offset) in row_offsets[0..row_offsets.len() - 1].iter().enumerate() {
- let next_offset = row_offsets[index + 1];
- if next_offset > count {
- return Err(SparseFormatError::from_kind_and_msg(
- SparseFormatErrorKind::InvalidStructure,
- "No row offset should be greater than the number of column indices",
- ));
- }
- if offset > next_offset {
- return Err(NonmonotonicOffsets).map_err(pattern_format_error_to_csr_error);
- }
- p[offset..next_offset].sort_by(|a, b| {
- let x = &col_indices[*a];
- let y = &col_indices[*b];
- x.partial_cmp(y).unwrap()
- });
- }
-
- // permute indices
- let sorted_col_indices: Vec =
- Vec::from_iter((p.iter().map(|i| &col_indices[*i])).cloned());
-
- // permute values
- let sorted_values: Vec = Vec::from_iter((p.iter().map(|i| &values[*i])).cloned());
-
- return Self::try_from_csr_data(
+ let result = cs::validate_and_optionally_sort_cs_data(
num_rows,
num_cols,
- row_offsets,
- sorted_col_indices,
- sorted_values,
+ &row_offsets,
+ &mut col_indices,
+ Some(&mut values),
+ true,
);
+
+ match result {
+ Ok(()) => {
+ let pattern = unsafe {
+ SparsityPattern::from_offset_and_indices_unchecked(
+ num_rows,
+ num_cols,
+ row_offsets,
+ col_indices,
+ )
+ };
+ Self::try_from_pattern_and_values(pattern, values)
+ }
+ Err(err) => Err(err),
+ }
}
/// Try to construct a CSR matrix from a sparsity pattern and associated non-zero values.
diff --git a/nalgebra-sparse/src/csr/csr_serde.rs b/nalgebra-sparse/src/csr/csr_serde.rs
new file mode 100644
index 00000000..1b33fda0
--- /dev/null
+++ b/nalgebra-sparse/src/csr/csr_serde.rs
@@ -0,0 +1,65 @@
+use crate::CsrMatrix;
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+/// This is an intermediate type for (de)serializing `CsrMatrix`.
+///
+/// Deserialization requires using a `try_from_*` function for validation. We could have used
+/// the `remote = "Self"` trick (https://github.com/serde-rs/serde/issues/1220) which allows
+/// to directly serialize/deserialize the original fields and combine it with validation.
+/// However, this would lead to nested serialization of the `CsMatrix` and `SparsityPattern`
+/// types. Instead, we decided that we want a more human-readable serialization format using
+/// field names like `row_offsets` and `cal_indices`. The easiest way to achieve this is to
+/// introduce an intermediate type. It also allows the serialization format to stay constant
+/// even if the internal layout in `nalgebra` changes.
+///
+/// We want to avoid unnecessary copies when serializing (i.e. cloning slices into owned
+/// storage). Therefore, we use generic arguments to allow using slices during serialization and
+/// owned storage (i.e. `Vec`) during deserialization. Without a major update of serde, slices
+/// and `Vec`s should always (de)serialize identically.
+#[derive(Serialize, Deserialize)]
+struct CsrMatrixSerializationData {
+ nrows: usize,
+ ncols: usize,
+ row_offsets: Indices,
+ col_indices: Indices,
+ values: Values,
+}
+
+impl Serialize for CsrMatrix
+where
+ T: Serialize + Clone,
+{
+ fn serialize(&self, serializer: S) -> Result
+ where
+ S: Serializer,
+ {
+ CsrMatrixSerializationData::<&[usize], &[T]> {
+ nrows: self.nrows(),
+ ncols: self.ncols(),
+ row_offsets: self.row_offsets(),
+ col_indices: self.col_indices(),
+ values: self.values(),
+ }
+ .serialize(serializer)
+ }
+}
+
+impl<'de, T> Deserialize<'de> for CsrMatrix
+where
+ T: Deserialize<'de> + Clone,
+{
+ fn deserialize(deserializer: D) -> Result, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let de = CsrMatrixSerializationData::, Vec>::deserialize(deserializer)?;
+ CsrMatrix::try_from_csr_data(
+ de.nrows,
+ de.ncols,
+ de.row_offsets,
+ de.col_indices,
+ de.values,
+ )
+ .map_err(|e| de::Error::custom(e))
+ }
+}
diff --git a/nalgebra-sparse/src/lib.rs b/nalgebra-sparse/src/lib.rs
index edbf83bd..8567261a 100644
--- a/nalgebra-sparse/src/lib.rs
+++ b/nalgebra-sparse/src/lib.rs
@@ -160,6 +160,7 @@ pub mod ops;
pub mod pattern;
pub(crate) mod cs;
+pub(crate) mod utils;
#[cfg(feature = "proptest-support")]
pub mod proptest;
diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs
index 85f6bc1a..c51945b7 100644
--- a/nalgebra-sparse/src/pattern.rs
+++ b/nalgebra-sparse/src/pattern.rs
@@ -1,4 +1,8 @@
//! Sparsity patterns for CSR and CSC matrices.
+
+#[cfg(feature = "serde-serialize")]
+mod pattern_serde;
+
use crate::cs::transpose_cs;
use crate::SparseFormatError;
use std::error::Error;
@@ -184,6 +188,35 @@ impl SparsityPattern {
})
}
+ /// Try to construct a sparsity pattern from the given dimensions, major offsets
+ /// and minor indices.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the number of major offsets is not exactly one greater than the major dimension
+ /// or if major offsets do not start with 0 and end with the number of minor indices.
+ pub unsafe fn from_offset_and_indices_unchecked(
+ major_dim: usize,
+ minor_dim: usize,
+ major_offsets: Vec,
+ minor_indices: Vec,
+ ) -> Self {
+ assert_eq!(major_offsets.len(), major_dim + 1);
+
+ // Check that the first and last offsets conform to the specification
+ {
+ let first_offset_ok = *major_offsets.first().unwrap() == 0;
+ let last_offset_ok = *major_offsets.last().unwrap() == minor_indices.len();
+ assert!(first_offset_ok && last_offset_ok);
+ }
+
+ Self {
+ major_offsets,
+ minor_indices,
+ minor_dim,
+ }
+ }
+
/// An iterator over the explicitly stored "non-zero" entries (i, j).
///
/// The iteration happens in a lane-major fashion, meaning that the lane index i
diff --git a/nalgebra-sparse/src/pattern/pattern_serde.rs b/nalgebra-sparse/src/pattern/pattern_serde.rs
new file mode 100644
index 00000000..e11a550a
--- /dev/null
+++ b/nalgebra-sparse/src/pattern/pattern_serde.rs
@@ -0,0 +1,56 @@
+use crate::pattern::SparsityPattern;
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+/// This is an intermediate type for (de)serializing `SparsityPattern`.
+///
+/// Deserialization requires using a `try_from_*` function for validation. We could have used
+/// the `remote = "Self"` trick (https://github.com/serde-rs/serde/issues/1220) which allows
+/// to directly serialize/deserialize the original fields and combine it with validation.
+/// However, this would lead to nested serialization of the `CsMatrix` and `SparsityPattern`
+/// types. Instead, we decided that we want a more human-readable serialization format using
+/// field names like `major_offsets` and `minor_indices`. The easiest way to achieve this is to
+/// introduce an intermediate type. It also allows the serialization format to stay constant
+/// even when the internal layout in `nalgebra` changes.
+///
+/// We want to avoid unnecessary copies when serializing (i.e. cloning slices into owned
+/// storage). Therefore, we use generic arguments to allow using slices during serialization and
+/// owned storage (i.e. `Vec`) during deserialization. Without a major update of serde, slices
+/// and `Vec`s should always (de)serialize identically.
+#[derive(Serialize, Deserialize)]
+struct SparsityPatternSerializationData {
+ major_dim: usize,
+ minor_dim: usize,
+ major_offsets: Indices,
+ minor_indices: Indices,
+}
+
+impl Serialize for SparsityPattern {
+ fn serialize(&self, serializer: S) -> Result
+ where
+ S: Serializer,
+ {
+ SparsityPatternSerializationData::<&[usize]> {
+ major_dim: self.major_dim(),
+ minor_dim: self.minor_dim(),
+ major_offsets: self.major_offsets(),
+ minor_indices: self.minor_indices(),
+ }
+ .serialize(serializer)
+ }
+}
+
+impl<'de> Deserialize<'de> for SparsityPattern {
+ fn deserialize(deserializer: D) -> Result
+ where
+ D: Deserializer<'de>,
+ {
+ let de = SparsityPatternSerializationData::>::deserialize(deserializer)?;
+ SparsityPattern::try_from_offsets_and_indices(
+ de.major_dim,
+ de.minor_dim,
+ de.major_offsets,
+ de.minor_indices,
+ )
+ .map_err(|e| de::Error::custom(e))
+ }
+}
diff --git a/nalgebra-sparse/src/utils.rs b/nalgebra-sparse/src/utils.rs
new file mode 100644
index 00000000..73d4e967
--- /dev/null
+++ b/nalgebra-sparse/src/utils.rs
@@ -0,0 +1,26 @@
+//! Helper functions for sparse matrix computations
+
+/// permutes entries of in_slice according to permutation slice and puts them to out_slice
+#[inline]
+pub fn apply_permutation(out_slice: &mut [T], in_slice: &[T], permutation: &[usize]) {
+ assert_eq!(out_slice.len(), in_slice.len());
+ assert_eq!(out_slice.len(), permutation.len());
+ for (out_element, old_pos) in out_slice.iter_mut().zip(permutation) {
+ *out_element = in_slice[*old_pos].clone();
+ }
+}
+
+/// computes permutation by using provided indices as keys
+#[inline]
+pub fn compute_sort_permutation(permutation: &mut [usize], indices: &[usize]) {
+ assert_eq!(permutation.len(), indices.len());
+ // Set permutation to identity
+ for (i, p) in permutation.iter_mut().enumerate() {
+ *p = i;
+ }
+
+ // Compute permutation needed to bring minor indices into sorted order
+ // Note: Using sort_unstable here avoids internal allocations, which is crucial since
+ // each lane might have a small number of elements
+ permutation.sort_unstable_by_key(|idx| indices[*idx]);
+}
diff --git a/nalgebra-sparse/tests/serde.rs b/nalgebra-sparse/tests/serde.rs
new file mode 100644
index 00000000..1ce1953f
--- /dev/null
+++ b/nalgebra-sparse/tests/serde.rs
@@ -0,0 +1,206 @@
+#![cfg(feature = "serde-serialize")]
+//! Serialization tests
+#[cfg(any(not(feature = "proptest-support"), not(feature = "compare")))]
+compile_error!("Tests must be run with features `proptest-support` and `compare`");
+
+#[macro_use]
+pub mod common;
+
+use nalgebra_sparse::coo::CooMatrix;
+use nalgebra_sparse::csc::CscMatrix;
+use nalgebra_sparse::csr::CsrMatrix;
+use nalgebra_sparse::pattern::SparsityPattern;
+
+use proptest::prelude::*;
+use serde::{Deserialize, Serialize};
+
+use crate::common::{csc_strategy, csr_strategy};
+
+fn json_roundtrip Deserialize<'a>>(csr: &T) -> T {
+ let serialized = serde_json::to_string(csr).unwrap();
+ let deserialized: T = serde_json::from_str(&serialized).unwrap();
+ deserialized
+}
+
+#[test]
+fn pattern_roundtrip() {
+ {
+ // A pattern with zero explicitly stored entries
+ let pattern =
+ SparsityPattern::try_from_offsets_and_indices(3, 2, vec![0, 0, 0, 0], Vec::new())
+ .unwrap();
+
+ assert_eq!(json_roundtrip(&pattern), pattern);
+ }
+
+ {
+ // Arbitrary pattern
+ let offsets = vec![0, 2, 2, 5];
+ let indices = vec![0, 5, 1, 2, 3];
+ let pattern =
+ SparsityPattern::try_from_offsets_and_indices(3, 6, offsets.clone(), indices.clone())
+ .unwrap();
+
+ assert_eq!(json_roundtrip(&pattern), pattern);
+ }
+}
+
+#[test]
+#[rustfmt::skip]
+fn pattern_deserialize_invalid() {
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0,2,2,5],"minor_indices":[0,5,1,2,3]}"#).is_ok());
+ assert!(serde_json::from_str::(r#"{"major_dim":0,"minor_dim":0,"major_offsets":[],"minor_indices":[]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 3, 5],"minor_indices":[0, 1, 2, 3, 5]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[1, 2, 2, 5],"minor_indices":[0, 5, 1, 2, 3]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 2, 2, 4],"minor_indices":[0, 5, 1, 2, 3]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 2, 2],"minor_indices":[0, 5, 1, 2, 3]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 3, 2, 5],"minor_indices":[0, 1, 2, 3, 4]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 2, 2, 5],"minor_indices":[0, 2, 3, 1, 4]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 2, 2, 5],"minor_indices":[0, 6, 1, 2, 3]}"#).is_err());
+ assert!(serde_json::from_str::(r#"{"major_dim":3,"minor_dim":6,"major_offsets":[0, 2, 2, 5],"minor_indices":[0, 5, 2, 2, 3]}"#).is_err());
+}
+
+#[test]
+fn coo_roundtrip() {
+ {
+ // A COO matrix without entries
+ let matrix =
+ CooMatrix::::try_from_triplets(3, 2, Vec::new(), Vec::new(), Vec::new()).unwrap();
+
+ assert_eq!(json_roundtrip(&matrix), matrix);
+ }
+
+ {
+ // Arbitrary COO matrix, no duplicates
+ let i = vec![0, 1, 0, 0, 2];
+ let j = vec![0, 2, 1, 3, 3];
+ let v = vec![2, 3, 7, 3, 1];
+ let matrix =
+ CooMatrix::::try_from_triplets(3, 5, i.clone(), j.clone(), v.clone()).unwrap();
+
+ assert_eq!(json_roundtrip(&matrix), matrix);
+ }
+}
+
+#[test]
+fn coo_deserialize_invalid() {
+ // Valid matrix: {"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3,1]}
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3,1]}"#).is_ok());
+ assert!(serde_json::from_str::>(r#"{"nrows":0,"ncols":0,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":-3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3,4,5]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,8,3],"values":[2,3,7,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0],"col_indices":[0,2,1,8,3],"values":[2,3,7,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,10,0,0,2],"col_indices":[0,2,1,3,3],"values":[2,3,7,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2],"col_indices":[0,2,1,30,3],"values":[2,3,7,3,4]}"#).is_err());
+}
+
+#[test]
+fn coo_deserialize_duplicates() {
+ assert_eq!(
+ serde_json::from_str::>(
+ r#"{"nrows":3,"ncols":5,"row_indices":[0,1,0,0,2,0,1],"col_indices":[0,2,1,3,3,0,2],"values":[2,3,7,3,1,5,6]}"#
+ ).unwrap(),
+ CooMatrix::::try_from_triplets(
+ 3,
+ 5,
+ vec![0, 1, 0, 0, 2, 0, 1],
+ vec![0, 2, 1, 3, 3, 0, 2],
+ vec![2, 3, 7, 3, 1, 5, 6]
+ )
+ .unwrap()
+ );
+}
+
+#[test]
+fn csc_roundtrip() {
+ {
+ // A CSC matrix with zero explicitly stored entries
+ let offsets = vec![0, 0, 0, 0];
+ let indices = vec![];
+ let values = Vec::::new();
+ let matrix = CscMatrix::try_from_csc_data(2, 3, offsets, indices, values).unwrap();
+
+ assert_eq!(json_roundtrip(&matrix), matrix);
+ }
+
+ {
+ // An arbitrary CSC matrix
+ let offsets = vec![0, 2, 2, 5];
+ let indices = vec![0, 5, 1, 2, 3];
+ let values = vec![0, 1, 2, 3, 4];
+ let matrix =
+ CscMatrix::try_from_csc_data(6, 3, offsets.clone(), indices.clone(), values.clone())
+ .unwrap();
+
+ assert_eq!(json_roundtrip(&matrix), matrix);
+ }
+}
+
+#[test]
+fn csc_deserialize_invalid() {
+ // Valid matrix: {"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}
+ assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_ok());
+ assert!(serde_json::from_str::>(r#"{"nrows":0,"ncols":0,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":-6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4,5]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,8,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3,1,1],"values":[0,1,2,3,4]}"#).is_err());
+ // The following actually panics ('range end index 10 out of range for slice of length 5', nalgebra-sparse\src\pattern.rs:156:38)
+ //assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,10,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+}
+
+#[test]
+fn csr_roundtrip() {
+ {
+ // A CSR matrix with zero explicitly stored entries
+ let offsets = vec![0, 0, 0, 0];
+ let indices = vec![];
+ let values = Vec::::new();
+ let matrix = CsrMatrix::try_from_csr_data(3, 2, offsets, indices, values).unwrap();
+
+ assert_eq!(json_roundtrip(&matrix), matrix);
+ }
+
+ {
+ // An arbitrary CSR matrix
+ let offsets = vec![0, 2, 2, 5];
+ let indices = vec![0, 5, 1, 2, 3];
+ let values = vec![0, 1, 2, 3, 4];
+ let matrix =
+ CsrMatrix::try_from_csr_data(3, 6, offsets.clone(), indices.clone(), values.clone())
+ .unwrap();
+
+ assert_eq!(json_roundtrip(&matrix), matrix);
+ }
+}
+
+#[test]
+fn csr_deserialize_invalid() {
+ // Valid matrix: {"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_ok());
+ assert!(serde_json::from_str::>(r#"{"nrows":0,"ncols":0,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":-3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4,5]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,8,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,2,2,5],"col_indices":[0,5,1,2,3,1,1],"values":[0,1,2,3,4]}"#).is_err());
+ // The following actually panics ('range end index 10 out of range for slice of length 5', nalgebra-sparse\src\pattern.rs:156:38)
+ //assert!(serde_json::from_str::>(r#"{"nrows":3,"ncols":6,"row_offsets":[0,10,2,5],"col_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+ assert!(serde_json::from_str::>(r#"{"nrows":6,"ncols":3,"col_offsets":[0,2,2,5],"row_indices":[0,5,1,2,3],"values":[0,1,2,3,4]}"#).is_err());
+}
+
+proptest! {
+ #[test]
+ fn csc_roundtrip_proptest(csc in csc_strategy()) {
+ prop_assert_eq!(json_roundtrip(&csc), csc);
+ }
+
+ #[test]
+ fn csr_roundtrip_proptest(csr in csr_strategy()) {
+ prop_assert_eq!(json_roundtrip(&csr), csr);
+ }
+}
diff --git a/nalgebra-sparse/tests/unit_tests/csc.rs b/nalgebra-sparse/tests/unit_tests/csc.rs
index 7fb0de54..1554b8a6 100644
--- a/nalgebra-sparse/tests/unit_tests/csc.rs
+++ b/nalgebra-sparse/tests/unit_tests/csc.rs
@@ -5,6 +5,8 @@ use nalgebra_sparse::{SparseEntry, SparseEntryMut, SparseFormatErrorKind};
use proptest::prelude::*;
use proptest::sample::subsequence;
+use super::test_data_examples::{InvalidCsDataExamples, ValidCsDataExamples};
+
use crate::assert_panics;
use crate::common::csc_strategy;
@@ -171,11 +173,26 @@ fn csc_matrix_valid_data() {
}
}
+#[test]
+fn csc_matrix_valid_data_unsorted_column_indices() {
+ let valid_data: ValidCsDataExamples = ValidCsDataExamples::new();
+
+ let (offsets, indices, values) = valid_data.valid_unsorted_cs_data;
+ let csc = CscMatrix::try_from_unsorted_csc_data(5, 4, offsets, indices, values).unwrap();
+
+ let (offsets2, indices2, values2) = valid_data.valid_cs_data;
+ let expected_csc = CscMatrix::try_from_csc_data(5, 4, offsets2, indices2, values2).unwrap();
+
+ assert_eq!(csc, expected_csc);
+}
+
#[test]
fn csc_matrix_try_from_invalid_csc_data() {
+ let invalid_data: InvalidCsDataExamples = InvalidCsDataExamples::new();
{
// Empty offset array (invalid length)
- let matrix = CscMatrix::try_from_csc_data(0, 0, Vec::new(), Vec::new(), Vec::::new());
+ let (offsets, indices, values) = invalid_data.empty_offset_array;
+ let matrix = CscMatrix::try_from_csc_data(0, 0, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
&SparseFormatErrorKind::InvalidStructure
@@ -184,10 +201,8 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Offset array invalid length for arbitrary data
- let offsets = vec![0, 3, 5];
- let indices = vec![0, 1, 2, 3, 5];
- let values = vec![0, 1, 2, 3, 4];
-
+ let (offsets, indices, values) =
+ invalid_data.offset_array_invalid_length_for_arbitrary_data;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -197,9 +212,7 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Invalid first entry in offsets array
- let offsets = vec![1, 2, 2, 5];
- let indices = vec![0, 5, 1, 2, 3];
- let values = vec![0, 1, 2, 3, 4];
+ let (offsets, indices, values) = invalid_data.invalid_first_entry_in_offsets_array;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -209,9 +222,7 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Invalid last entry in offsets array
- let offsets = vec![0, 2, 2, 4];
- let indices = vec![0, 5, 1, 2, 3];
- let values = vec![0, 1, 2, 3, 4];
+ let (offsets, indices, values) = invalid_data.invalid_last_entry_in_offsets_array;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -221,9 +232,7 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Invalid length of offsets array
- let offsets = vec![0, 2, 2];
- let indices = vec![0, 5, 1, 2, 3];
- let values = vec![0, 1, 2, 3, 4];
+ let (offsets, indices, values) = invalid_data.invalid_length_of_offsets_array;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -233,9 +242,7 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Nonmonotonic offsets
- let offsets = vec![0, 3, 2, 5];
- let indices = vec![0, 1, 2, 3, 4];
- let values = vec![0, 1, 2, 3, 4];
+ let (offsets, indices, values) = invalid_data.nonmonotonic_offsets;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -257,9 +264,7 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Minor index out of bounds
- let offsets = vec![0, 2, 2, 5];
- let indices = vec![0, 6, 1, 2, 3];
- let values = vec![0, 1, 2, 3, 4];
+ let (offsets, indices, values) = invalid_data.minor_index_out_of_bounds;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -269,9 +274,7 @@ fn csc_matrix_try_from_invalid_csc_data() {
{
// Duplicate entry
- let offsets = vec![0, 2, 2, 5];
- let indices = vec![0, 5, 2, 2, 3];
- let values = vec![0, 1, 2, 3, 4];
+ let (offsets, indices, values) = invalid_data.duplicate_entry;
let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values);
assert_eq!(
matrix.unwrap_err().kind(),
@@ -280,6 +283,121 @@ fn csc_matrix_try_from_invalid_csc_data() {
}
}
+#[test]
+fn csc_matrix_try_from_unsorted_invalid_csc_data() {
+ let invalid_data: InvalidCsDataExamples = InvalidCsDataExamples::new();
+ {
+ // Empty offset array (invalid length)
+ let (offsets, indices, values) = invalid_data.empty_offset_array;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(0, 0, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+
+ {
+ // Offset array invalid length for arbitrary data
+ let (offsets, indices, values) =
+ invalid_data.offset_array_invalid_length_for_arbitrary_data;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+
+ {
+ // Invalid first entry in offsets array
+ let (offsets, indices, values) = invalid_data.invalid_first_entry_in_offsets_array;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+
+ {
+ // Invalid last entry in offsets array
+ let (offsets, indices, values) = invalid_data.invalid_last_entry_in_offsets_array;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+
+ {
+ // Invalid length of offsets array
+ let (offsets, indices, values) = invalid_data.invalid_length_of_offsets_array;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+
+ {
+ // Nonmonotonic offsets
+ let (offsets, indices, values) = invalid_data.nonmonotonic_offsets;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+
+ {
+ // Major offset out of bounds
+ let (offsets, indices, values) = invalid_data.major_offset_out_of_bounds;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::IndexOutOfBounds
+ );
+ }
+
+ {
+ // Minor index out of bounds
+ let (offsets, indices, values) = invalid_data.minor_index_out_of_bounds;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::IndexOutOfBounds
+ );
+ }
+
+ {
+ // Duplicate entry
+ let (offsets, indices, values) = invalid_data.duplicate_entry;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::DuplicateEntry
+ );
+ }
+
+ {
+ // Duplicate entry in unsorted lane
+ let (offsets, indices, values) = invalid_data.duplicate_entry_unsorted;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::DuplicateEntry
+ );
+ }
+
+ {
+ // Wrong values length
+ let (offsets, indices, values) = invalid_data.wrong_values_length;
+ let matrix = CscMatrix::try_from_unsorted_csc_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
+}
+
#[test]
fn csc_disassemble_avoids_clone_when_owned() {
// Test that disassemble avoids cloning the sparsity pattern when it holds the sole reference
diff --git a/nalgebra-sparse/tests/unit_tests/csr.rs b/nalgebra-sparse/tests/unit_tests/csr.rs
index 3ca2f0dc..a00470d5 100644
--- a/nalgebra-sparse/tests/unit_tests/csr.rs
+++ b/nalgebra-sparse/tests/unit_tests/csr.rs
@@ -5,7 +5,7 @@ use nalgebra_sparse::{SparseEntry, SparseEntryMut, SparseFormatErrorKind};
use proptest::prelude::*;
use proptest::sample::subsequence;
-use super::test_data_examples::InvalidCsrDataExamples;
+use super::test_data_examples::{InvalidCsDataExamples, ValidCsDataExamples};
use crate::assert_panics;
use crate::common::csr_strategy;
@@ -175,30 +175,20 @@ fn csr_matrix_valid_data() {
#[test]
fn csr_matrix_valid_data_unsorted_column_indices() {
- let csr = CsrMatrix::try_from_unsorted_csr_data(
- 4,
- 5,
- vec![0, 3, 5, 8, 11],
- vec![4, 1, 3, 3, 1, 2, 3, 0, 3, 4, 1],
- vec![5, 1, 4, 7, 4, 2, 3, 1, 8, 9, 6],
- )
- .unwrap();
+ let valid_data: ValidCsDataExamples = ValidCsDataExamples::new();
- let expected_csr = CsrMatrix::try_from_csr_data(
- 4,
- 5,
- vec![0, 3, 5, 8, 11],
- vec![1, 3, 4, 1, 3, 0, 2, 3, 1, 3, 4],
- vec![1, 4, 5, 4, 7, 1, 2, 3, 6, 8, 9],
- )
- .unwrap();
+ let (offsets, indices, values) = valid_data.valid_unsorted_cs_data;
+ let csr = CsrMatrix::try_from_unsorted_csr_data(4, 5, offsets, indices, values).unwrap();
+
+ let (offsets2, indices2, values2) = valid_data.valid_cs_data;
+ let expected_csr = CsrMatrix::try_from_csr_data(4, 5, offsets2, indices2, values2).unwrap();
assert_eq!(csr, expected_csr);
}
#[test]
fn csr_matrix_try_from_invalid_csr_data() {
- let invalid_data: InvalidCsrDataExamples = InvalidCsrDataExamples::new();
+ let invalid_data: InvalidCsDataExamples = InvalidCsDataExamples::new();
{
// Empty offset array (invalid length)
let (offsets, indices, values) = invalid_data.empty_offset_array;
@@ -293,7 +283,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
#[test]
fn csr_matrix_try_from_unsorted_invalid_csr_data() {
- let invalid_data: InvalidCsrDataExamples = InvalidCsrDataExamples::new();
+ let invalid_data: InvalidCsDataExamples = InvalidCsDataExamples::new();
{
// Empty offset array (invalid length)
let (offsets, indices, values) = invalid_data.empty_offset_array;
@@ -355,6 +345,16 @@ fn csr_matrix_try_from_unsorted_invalid_csr_data() {
);
}
+ {
+ // Major offset out of bounds
+ let (offsets, indices, values) = invalid_data.major_offset_out_of_bounds;
+ let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::IndexOutOfBounds
+ );
+ }
+
{
// Minor index out of bounds
let (offsets, indices, values) = invalid_data.minor_index_out_of_bounds;
@@ -374,6 +374,26 @@ fn csr_matrix_try_from_unsorted_invalid_csr_data() {
&SparseFormatErrorKind::DuplicateEntry
);
}
+
+ {
+ // Duplicate entry in unsorted lane
+ let (offsets, indices, values) = invalid_data.duplicate_entry_unsorted;
+ let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::DuplicateEntry
+ );
+ }
+
+ {
+ // Wrong values length
+ let (offsets, indices, values) = invalid_data.wrong_values_length;
+ let matrix = CsrMatrix::try_from_unsorted_csr_data(6, 3, offsets, indices, values);
+ assert_eq!(
+ matrix.unwrap_err().kind(),
+ &SparseFormatErrorKind::InvalidStructure
+ );
+ }
}
#[test]
diff --git a/nalgebra-sparse/tests/unit_tests/test_data_examples.rs b/nalgebra-sparse/tests/unit_tests/test_data_examples.rs
index 20721087..a80b5064 100644
--- a/nalgebra-sparse/tests/unit_tests/test_data_examples.rs
+++ b/nalgebra-sparse/tests/unit_tests/test_data_examples.rs
@@ -1,5 +1,31 @@
-/// Examples of *invalid* raw CSR data `(offsets, indices, values)`.
-pub struct InvalidCsrDataExamples {
+/// Examples of *valid* raw CS data `(offsets, indices, values)`.
+pub struct ValidCsDataExamples {
+ pub valid_cs_data: (Vec, Vec, Vec),
+ pub valid_unsorted_cs_data: (Vec, Vec, Vec),
+}
+
+impl ValidCsDataExamples {
+ pub fn new() -> Self {
+ let valid_cs_data = (
+ vec![0, 3, 5, 8, 11],
+ vec![1, 3, 4, 1, 3, 0, 2, 3, 1, 3, 4],
+ vec![1, 4, 5, 4, 7, 1, 2, 3, 6, 8, 9],
+ );
+ let valid_unsorted_cs_data = (
+ vec![0, 3, 5, 8, 11],
+ vec![4, 1, 3, 3, 1, 2, 3, 0, 3, 4, 1],
+ vec![5, 1, 4, 7, 4, 2, 3, 1, 8, 9, 6],
+ );
+
+ return Self {
+ valid_cs_data,
+ valid_unsorted_cs_data,
+ };
+ }
+}
+
+/// Examples of *invalid* raw CS data `(offsets, indices, values)`.
+pub struct InvalidCsDataExamples {
pub empty_offset_array: (Vec, Vec, Vec),
pub offset_array_invalid_length_for_arbitrary_data: (Vec, Vec, Vec),
pub invalid_first_entry_in_offsets_array: (Vec, Vec, Vec),
@@ -7,11 +33,14 @@ pub struct InvalidCsrDataExamples {
pub invalid_length_of_offsets_array: (Vec, Vec, Vec),
pub nonmonotonic_offsets: (Vec, Vec, Vec),
pub nonmonotonic_minor_indices: (Vec, Vec, Vec),
+ pub major_offset_out_of_bounds: (Vec, Vec, Vec),
pub minor_index_out_of_bounds: (Vec, Vec, Vec),
pub duplicate_entry: (Vec, Vec, Vec),
+ pub duplicate_entry_unsorted: (Vec, Vec, Vec),
+ pub wrong_values_length: (Vec, Vec, Vec),
}
-impl InvalidCsrDataExamples {
+impl InvalidCsDataExamples {
pub fn new() -> Self {
let empty_offset_array = (Vec::::new(), Vec::::new(), Vec::::new());
let offset_array_invalid_length_for_arbitrary_data =
@@ -25,9 +54,13 @@ impl InvalidCsrDataExamples {
let nonmonotonic_offsets = (vec![0, 3, 2, 5], vec![0, 1, 2, 3, 4], vec![0, 1, 2, 3, 4]);
let nonmonotonic_minor_indices =
(vec![0, 2, 2, 5], vec![0, 2, 3, 1, 4], vec![0, 1, 2, 3, 4]);
+ let major_offset_out_of_bounds =
+ (vec![0, 7, 2, 5], vec![0, 2, 3, 1, 4], vec![0, 1, 2, 3, 4]);
let minor_index_out_of_bounds =
(vec![0, 2, 2, 5], vec![0, 6, 1, 2, 3], vec![0, 1, 2, 3, 4]);
- let duplicate_entry = (vec![0, 2, 2, 5], vec![0, 5, 2, 2, 3], vec![0, 1, 2, 3, 4]);
+ let duplicate_entry = (vec![0, 1, 2, 5], vec![1, 3, 2, 3, 3], vec![0, 1, 2, 3, 4]);
+ let duplicate_entry_unsorted = (vec![0, 1, 4, 5], vec![1, 3, 2, 3, 3], vec![0, 1, 2, 3, 4]);
+ let wrong_values_length = (vec![0, 1, 2, 5], vec![1, 3, 2, 3, 0], vec![5, 4]);
return Self {
empty_offset_array,
@@ -37,8 +70,11 @@ impl InvalidCsrDataExamples {
invalid_length_of_offsets_array,
nonmonotonic_minor_indices,
nonmonotonic_offsets,
+ major_offset_out_of_bounds,
minor_index_out_of_bounds,
duplicate_entry,
+ duplicate_entry_unsorted,
+ wrong_values_length,
};
}
}
diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs
index b46d442f..b6bd236a 100644
--- a/src/base/array_storage.rs
+++ b/src/base/array_storage.rs
@@ -1,7 +1,5 @@
use std::fmt::{self, Debug, Formatter};
// use std::hash::{Hash, Hasher};
-#[cfg(feature = "abomonation-serialize")]
-use std::io::{Result as IOResult, Write};
use std::ops::Mul;
#[cfg(feature = "serde-serialize-no-std")]
@@ -13,9 +11,6 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "serde-serialize-no-std")]
use std::marker::PhantomData;
-#[cfg(feature = "abomonation-serialize")]
-use abomonation::Abomonation;
-
use crate::base::allocator::Allocator;
use crate::base::default_allocator::DefaultAllocator;
use crate::base::dimension::{Const, ToTypenum};
@@ -32,10 +27,7 @@ use std::mem;
/// A array-based statically sized matrix data storage.
#[repr(transparent)]
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
pub struct ArrayStorage(pub [[T; R]; C]);
impl ArrayStorage {
@@ -282,32 +274,6 @@ unsafe impl by
{
}
-#[cfg(feature = "abomonation-serialize")]
-impl Abomonation for ArrayStorage
-where
- T: Scalar + Abomonation,
-{
- unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> {
- for element in self.as_slice() {
- element.entomb(writer)?;
- }
-
- Ok(())
- }
-
- unsafe fn exhume<'a, 'b>(&'a mut self, mut bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
- for element in self.as_mut_slice() {
- let temp = bytes;
- bytes = element.exhume(temp)?
- }
- Some(bytes)
- }
-
- fn extent(&self) -> usize {
- self.as_slice().iter().fold(0, |acc, e| acc + e.extent())
- }
-}
-
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::ArrayStorage;
diff --git a/src/base/blas.rs b/src/base/blas.rs
index 4f56a70e..e65304b5 100644
--- a/src/base/blas.rs
+++ b/src/base/blas.rs
@@ -175,8 +175,7 @@ where
/// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix
/// multiplication, use one of: `.gemm`, `.mul_to`, `.mul`, the `*` operator.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Vector3, Matrix2x3};
/// let vec1 = Vector3::new(1.0, 2.0, 3.0);
@@ -207,8 +206,7 @@ where
/// Note that this is **not** the matrix multiplication as in, e.g., numpy. For matrix
/// multiplication, use one of: `.gemm`, `.mul_to`, `.mul`, the `*` operator.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Vector2, Complex};
/// let vec1 = Vector2::new(Complex::new(1.0, 2.0), Complex::new(3.0, 4.0));
@@ -232,8 +230,7 @@ where
/// The dot product between the transpose of `self` and `rhs`.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Vector3, RowVector3, Matrix2x3, Matrix3x2};
/// let vec1 = Vector3::new(1.0, 2.0, 3.0);
@@ -285,8 +282,7 @@ where
///
/// If `b` is zero, `self` is never read from.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::Vector3;
/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0);
@@ -308,8 +304,7 @@ where
///
/// If `b` is zero, `self` is never read from.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::Vector3;
/// let mut vec1 = Vector3::new(1.0, 2.0, 3.0);
@@ -333,8 +328,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mut vec1 = Vector2::new(1.0, 2.0);
@@ -425,8 +419,7 @@ where
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
///
- /// # Examples:
- ///
+ /// # Examples
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mat = Matrix2::new(1.0, 2.0,
@@ -468,8 +461,7 @@ where
/// If `beta` is zero, `self` is never read. If `self` is read, only its lower-triangular part
/// (including the diagonal) is actually read.
///
- /// # Examples:
- ///
+ /// # Examples
/// ```
/// # use nalgebra::{Matrix2, Vector2, Complex};
/// let mat = Matrix2::new(Complex::new(1.0, 0.0), Complex::new(2.0, -0.1),
@@ -552,8 +544,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mat = Matrix2::new(1.0, 3.0,
@@ -587,8 +578,7 @@ where
/// For real matrices, this is the same as `.gemv_tr`.
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2, Vector2, Complex};
/// let mat = Matrix2::new(Complex::new(1.0, 2.0), Complex::new(3.0, 4.0),
@@ -656,8 +646,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2x3, Vector2, Vector3};
/// let mut mat = Matrix2x3::repeat(4.0);
@@ -688,8 +677,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix2x3, Vector2, Vector3, Complex};
@@ -722,8 +710,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix2x3, Matrix3x4, Matrix2x4};
@@ -763,8 +750,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix3x2, Matrix3x4, Matrix2x4};
@@ -821,8 +807,7 @@ where
///
/// If `beta` is zero, `self` is never read.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix3x2, Matrix3x4, Matrix2x4, Complex};
@@ -921,8 +906,7 @@ where
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mut mat = Matrix2::identity();
@@ -934,6 +918,7 @@ where
/// mat.ger_symm(10.0, &vec1, &vec2, 5.0);
/// assert_eq!(mat.lower_triangle(), expected.lower_triangle());
/// assert_eq!(mat.m12, 99999.99999); // This was untouched.
+ /// ```
#[inline]
#[deprecated(note = "This is renamed `syger` to match the original BLAS terminology.")]
pub fn ger_symm(
@@ -958,8 +943,7 @@ where
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2, Vector2};
/// let mut mat = Matrix2::identity();
@@ -971,6 +955,7 @@ where
/// mat.syger(10.0, &vec1, &vec2, 5.0);
/// assert_eq!(mat.lower_triangle(), expected.lower_triangle());
/// assert_eq!(mat.m12, 99999.99999); // This was untouched.
+ /// ```
#[inline]
pub fn syger(
&mut self,
@@ -993,8 +978,7 @@ where
/// If `beta` is zero, `self` is never read. The result is symmetric. Only the lower-triangular
/// (including the diagonal) part of `self` is read/written.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::{Matrix2, Vector2, Complex};
/// let mut mat = Matrix2::identity();
@@ -1006,6 +990,7 @@ where
/// mat.hegerc(Complex::new(10.0, 20.0), &vec1, &vec2, Complex::new(5.0, 15.0));
/// assert_eq!(mat.lower_triangle(), expected.lower_triangle());
/// assert_eq!(mat.m12, Complex::new(99999.99999, 88888.88888)); // This was untouched.
+ /// ```
#[inline]
pub fn hegerc(
&mut self,
@@ -1031,8 +1016,7 @@ where
///
/// This uses the provided workspace `work` to avoid allocations for intermediate results.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{DMatrix, DVector};
@@ -1053,6 +1037,7 @@ where
///
/// mat.quadform_tr_with_workspace(&mut workspace, 10.0, &lhs, &mid, 5.0);
/// assert_relative_eq!(mat, expected);
+ /// ```
pub fn quadform_tr_with_workspace(
&mut self,
work: &mut Vector,
@@ -1085,8 +1070,7 @@ where
/// If `D1` is a type-level integer, then the allocation is performed on the stack.
/// Use `.quadform_tr_with_workspace(...)` instead to avoid allocations.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix2, Matrix3, Matrix2x3, Vector2};
@@ -1100,6 +1084,7 @@ where
///
/// mat.quadform_tr(10.0, &lhs, &mid, 5.0);
/// assert_relative_eq!(mat, expected);
+ /// ```
pub fn quadform_tr(
&mut self,
alpha: T,
@@ -1124,6 +1109,7 @@ where
///
/// This uses the provided workspace `work` to avoid allocations for intermediate results.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{DMatrix, DVector};
@@ -1145,6 +1131,7 @@ where
///
/// mat.quadform_with_workspace(&mut workspace, 10.0, &mid, &rhs, 5.0);
/// assert_relative_eq!(mat, expected);
+ /// ```
pub fn quadform_with_workspace(
&mut self,
work: &mut Vector,
@@ -1180,6 +1167,7 @@ where
/// If `D2` is a type-level integer, then the allocation is performed on the stack.
/// Use `.quadform_with_workspace(...)` instead to avoid allocations.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix2, Matrix3x2, Matrix3};
@@ -1194,6 +1182,7 @@ where
///
/// mat.quadform(10.0, &mid, &rhs, 5.0);
/// assert_relative_eq!(mat, expected);
+ /// ```
pub fn quadform(
&mut self,
alpha: T,
diff --git a/src/base/dimension.rs b/src/base/dimension.rs
index 86006f3d..de51339f 100644
--- a/src/base/dimension.rs
+++ b/src/base/dimension.rs
@@ -13,10 +13,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Dim of dynamically-sized algebraic entities.
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
pub struct Dynamic {
value: usize,
}
@@ -201,10 +198,7 @@ dim_ops!(
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
pub struct Const;
/// Trait implemented exclusively by type-level integers.
@@ -309,24 +303,24 @@ impl DimName for Const {
pub type U1 = Const<1>;
-impl ToTypenum for Const<{ typenum::U1::USIZE }> {
+impl ToTypenum for Const<1> {
type Typenum = typenum::U1;
}
impl ToConst for typenum::U1 {
- type Const = Const<{ typenum::U1::USIZE }>;
+ type Const = Const<1>;
}
macro_rules! from_to_typenum (
- ($($D: ident),* $(,)*) => {$(
- pub type $D = Const<{ typenum::$D::USIZE }>;
+ ($($D: ident, $VAL: expr);* $(;)*) => {$(
+ pub type $D = Const<$VAL>;
- impl ToTypenum for Const<{ typenum::$D::USIZE }> {
+ impl ToTypenum for Const<$VAL> {
type Typenum = typenum::$D;
}
impl ToConst for typenum::$D {
- type Const = Const<{ typenum::$D::USIZE }>;
+ type Const = Const<$VAL>;
}
impl IsNotStaticOne for $D { }
@@ -334,12 +328,12 @@ macro_rules! from_to_typenum (
);
from_to_typenum!(
- U0, /*U1,*/ U2, U3, U4, U5, U6, U7, U8, U9, U10, U11, U12, U13, U14, U15, U16, U17, U18,
- U19, U20, U21, U22, U23, U24, U25, U26, U27, U28, U29, U30, U31, U32, U33, U34, U35, U36, U37,
- U38, U39, U40, U41, U42, U43, U44, U45, U46, U47, U48, U49, U50, U51, U52, U53, U54, U55, U56,
- U57, U58, U59, U60, U61, U62, U63, U64, U65, U66, U67, U68, U69, U70, U71, U72, U73, U74, U75,
- U76, U77, U78, U79, U80, U81, U82, U83, U84, U85, U86, U87, U88, U89, U90, U91, U92, U93, U94,
- U95, U96, U97, U98, U99, U100, U101, U102, U103, U104, U105, U106, U107, U108, U109, U110,
- U111, U112, U113, U114, U115, U116, U117, U118, U119, U120, U121, U122, U123, U124, U125, U126,
- U127
+ U0, 0; /*U1,1;*/ U2, 2; U3, 3; U4, 4; U5, 5; U6, 6; U7, 7; U8, 8; U9, 9; U10, 10; U11, 11; U12, 12; U13, 13; U14, 14; U15, 15; U16, 16; U17, 17; U18, 18;
+ U19, 19; U20, 20; U21, 21; U22, 22; U23, 23; U24, 24; U25, 25; U26, 26; U27, 27; U28, 28; U29, 29; U30, 30; U31, 31; U32, 32; U33, 33; U34, 34; U35, 35; U36, 36; U37, 37;
+ U38, 38; U39, 39; U40, 40; U41, 41; U42, 42; U43, 43; U44, 44; U45, 45; U46, 46; U47, 47; U48, 48; U49, 49; U50, 50; U51, 51; U52, 52; U53, 53; U54, 54; U55, 55; U56, 56;
+ U57, 57; U58, 58; U59, 59; U60, 60; U61, 61; U62, 62; U63, 63; U64, 64; U65, 65; U66, 66; U67, 67; U68, 68; U69, 69; U70, 70; U71, 71; U72, 72; U73, 73; U74, 74; U75, 75;
+ U76, 76; U77, 77; U78, 78; U79, 79; U80, 80; U81, 81; U82, 82; U83, 83; U84, 84; U85, 85; U86, 86; U87, 87; U88, 88; U89, 89; U90, 90; U91, 91; U92, 92; U93, 93; U94, 94;
+ U95, 95; U96, 96; U97, 97; U98, 98; U99, 99; U100, 100; U101, 101; U102, 102; U103, 103; U104, 104; U105, 105; U106, 106; U107, 107; U108, 108; U109, 109; U110, 110;
+ U111, 111; U112, 112; U113, 113; U114, 114; U115, 115; U116, 116; U117, 117; U118, 118; U119, 119; U120, 120; U121, 121; U122, 122; U123, 123; U124, 124; U125, 125; U126, 126;
+ U127, 127
);
diff --git a/src/base/matrix.rs b/src/base/matrix.rs
index 652eace1..f12cb3fa 100644
--- a/src/base/matrix.rs
+++ b/src/base/matrix.rs
@@ -1,6 +1,4 @@
use num::{One, Zero};
-#[cfg(feature = "abomonation-serialize")]
-use std::io::{Result as IOResult, Write};
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
use std::any::TypeId;
@@ -13,9 +11,6 @@ use std::mem;
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
-#[cfg(feature = "abomonation-serialize")]
-use abomonation::Abomonation;
-
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field, SupersetOf};
use simba::simd::SimdPartialOrd;
@@ -155,10 +150,7 @@ pub type MatrixCross =
/// some concrete types for `T` and a compatible data storage type `S`).
#[repr(C)]
#[derive(Clone, Copy)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
pub struct Matrix {
/// The data storage that contains all the matrix components. Disappointed?
///
@@ -254,21 +246,6 @@ where
}
}
-#[cfg(feature = "abomonation-serialize")]
-impl Abomonation for Matrix {
- unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> {
- self.data.entomb(writer)
- }
-
- unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
- self.data.exhume(bytes)
- }
-
- fn extent(&self) -> usize {
- self.data.extent()
- }
-}
-
#[cfg(feature = "compare")]
impl> matrixcompare_core::Matrix
for Matrix
@@ -434,8 +411,6 @@ where
{
/// Assumes a matrix's entries to be initialized. This operation should be near zero-cost.
///
- /// For the similar method that operates on matrix slices, see [`slice_assume_init`].
- ///
/// # Safety
/// The user must make sure that every single entry of the buffer has been initialized,
/// or Undefined Behavior will immediately occur.
@@ -456,12 +431,12 @@ impl> Matrix {
/// The shape of this matrix returned as the tuple (number of rows, number of columns).
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::Matrix3x4;
/// let mat = Matrix3x4::::zeros();
/// assert_eq!(mat.shape(), (3, 4));
+ /// ```
#[inline]
#[must_use]
pub fn shape(&self) -> (usize, usize) {
@@ -478,12 +453,12 @@ impl> Matrix {
/// The number of rows of this matrix.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::Matrix3x4;
/// let mat = Matrix3x4::::zeros();
/// assert_eq!(mat.nrows(), 3);
+ /// ```
#[inline]
#[must_use]
pub fn nrows(&self) -> usize {
@@ -492,12 +467,12 @@ impl> Matrix {
/// The number of columns of this matrix.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::Matrix3x4;
/// let mat = Matrix3x4::::zeros();
/// assert_eq!(mat.ncols(), 4);
+ /// ```
#[inline]
#[must_use]
pub fn ncols(&self) -> usize {
@@ -506,14 +481,14 @@ impl> Matrix {
/// The strides (row stride, column stride) of this matrix.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::DMatrix;
/// let mat = DMatrix::::zeros(10, 10);
/// let slice = mat.slice_with_steps((0, 0), (5, 3), (1, 2));
/// // The column strides is the number of steps (here 2) multiplied by the corresponding dimension.
/// assert_eq!(mat.strides(), (1, 10));
+ /// ```
#[inline]
#[must_use]
pub fn strides(&self) -> (usize, usize) {
@@ -1108,8 +1083,7 @@ impl> Matrix {
impl> Matrix {
/// Iterates through this matrix coordinates in column-major order.
///
- /// # Examples:
- ///
+ /// # Example
/// ```
/// # use nalgebra::Matrix2x3;
/// let mat = Matrix2x3::new(11, 12, 13,
@@ -1122,6 +1096,7 @@ impl> Matrix {
/// assert_eq!(*it.next().unwrap(), 13);
/// assert_eq!(*it.next().unwrap(), 23);
/// assert!(it.next().is_none());
+ /// ```
#[inline]
pub fn iter(&self) -> MatrixIter<'_, T, R, C, S> {
MatrixIter::new(&self.data)
@@ -1144,6 +1119,7 @@ impl> Matrix {
}
/// Iterate through the columns of this matrix.
+ ///
/// # Example
/// ```
/// # use nalgebra::Matrix2x3;
diff --git a/src/base/unit.rs b/src/base/unit.rs
index 60281b8f..bb8b56a1 100644
--- a/src/base/unit.rs
+++ b/src/base/unit.rs
@@ -1,14 +1,9 @@
use std::fmt;
-#[cfg(feature = "abomonation-serialize")]
-use std::io::{Result as IOResult, Write};
use std::ops::Deref;
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
-#[cfg(feature = "abomonation-serialize")]
-use abomonation::Abomonation;
-
use crate::allocator::Allocator;
use crate::base::DefaultAllocator;
use crate::storage::RawStorage;
@@ -26,10 +21,7 @@ use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealF
/// in their documentation, read their dedicated pages directly.
#[repr(transparent)]
#[derive(Clone, Hash, Copy)]
-// #[cfg_attr(
-// all(not(target_os = "cuda"), feature = "cuda"),
-// derive(cust::DeviceCopy)
-// )]
+// #[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
pub struct Unit {
pub(crate) value: T,
}
@@ -66,21 +58,6 @@ impl<'de, T: Deserialize<'de>> Deserialize<'de> for Unit {
}
}
-#[cfg(feature = "abomonation-serialize")]
-impl Abomonation for Unit {
- unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> {
- self.value.entomb(writer)
- }
-
- fn extent(&self) -> usize {
- self.value.extent()
- }
-
- unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
- self.value.exhume(bytes)
- }
-}
-
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Unit;
@@ -122,9 +99,8 @@ mod rkyv_impl {
}
}
-#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
-unsafe impl cust::memory::DeviceCopy
- for Unit>
+#[cfg(feature = "cuda")]
+unsafe impl cust_core::DeviceCopy for Unit>
where
T: Scalar,
R: Dim,
diff --git a/src/base/vec_storage.rs b/src/base/vec_storage.rs
index bf73661d..414354cd 100644
--- a/src/base/vec_storage.rs
+++ b/src/base/vec_storage.rs
@@ -1,6 +1,3 @@
-#[cfg(feature = "abomonation-serialize")]
-use std::io::{Result as IOResult, Write};
-
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
@@ -18,8 +15,6 @@ use serde::{
};
use crate::Storage;
-#[cfg(feature = "abomonation-serialize")]
-use abomonation::Abomonation;
use std::mem::MaybeUninit;
/*
@@ -402,21 +397,6 @@ where
}
}
-#[cfg(feature = "abomonation-serialize")]
-impl Abomonation for VecStorage {
- unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> {
- self.data.entomb(writer)
- }
-
- unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
- self.data.exhume(bytes)
- }
-
- fn extent(&self) -> usize {
- self.data.extent()
- }
-}
-
impl Extend for VecStorage {
/// Extends the number of columns of the `VecStorage` with elements
/// from the given iterator.
diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs
index 4280668a..719ae13d 100644
--- a/src/geometry/dual_quaternion.rs
+++ b/src/geometry/dual_quaternion.rs
@@ -19,6 +19,7 @@ use simba::scalar::{ClosedNeg, RealField};
/// `DualQuaternions` are stored as \[..real, ..dual\].
/// Both of the quaternion components are laid out in `i, j, k, w` order.
///
+/// # Example
/// ```
/// # use nalgebra::{DualQuaternion, Quaternion};
///
@@ -39,10 +40,7 @@ use simba::scalar::{ClosedNeg, RealField};
/// See
#[repr(C)]
#[derive(Debug, Copy, Clone)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
pub struct DualQuaternion {
/// The real component of the quaternion
pub real: Quaternion,
@@ -623,6 +621,7 @@ where
/// dq.rotation().euler_angles().0, std::f32::consts::FRAC_PI_2, epsilon = 1.0e-6
/// );
/// assert_relative_eq!(dq.translation().vector.y, 3.0, epsilon = 1.0e-6);
+ /// ```
#[inline]
#[must_use]
pub fn sclerp(&self, other: &Self, t: T) -> Self
@@ -713,6 +712,7 @@ where
/// Return the rotation part of this unit dual quaternion.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3};
@@ -733,6 +733,7 @@ where
/// Return the translation part of this unit dual quaternion.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3};
@@ -758,6 +759,7 @@ where
/// Builds an isometry from this unit dual quaternion.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3};
@@ -783,6 +785,7 @@ where
///
/// This is the same as the multiplication `self * pt`.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3};
@@ -807,6 +810,7 @@ where
///
/// This is the same as the multiplication `self * v`.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3};
@@ -831,6 +835,7 @@ where
/// This may be cheaper than inverting the unit dual quaternion and
/// transforming the point.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3};
@@ -856,6 +861,7 @@ where
/// This may be cheaper than inverting the unit dual quaternion and
/// transforming the vector.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3};
@@ -880,6 +886,7 @@ where
/// cheaper than inverting the unit dual quaternion and transforming the
/// vector.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Unit, Vector3};
@@ -909,6 +916,7 @@ where
/// Converts this unit dual quaternion interpreted as an isometry
/// into its equivalent homogeneous transformation matrix.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Matrix4, UnitDualQuaternion, UnitQuaternion, Vector3};
diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs
index 94bbc04f..ae7b5c97 100644
--- a/src/geometry/dual_quaternion_construction.rs
+++ b/src/geometry/dual_quaternion_construction.rs
@@ -27,7 +27,6 @@ impl DualQuaternion {
/// The dual quaternion multiplicative identity.
///
/// # Example
- ///
/// ```
/// # use nalgebra::{DualQuaternion, Quaternion};
///
@@ -134,6 +133,7 @@ impl UnitDualQuaternion {
/// The unit dual quaternion multiplicative identity, which also represents
/// the identity transformation as an isometry.
///
+ /// # Example
/// ```
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3};
/// let ident = UnitDualQuaternion::identity();
@@ -171,6 +171,7 @@ where
/// Return a dual quaternion representing the translation and orientation
/// given by the provided rotation quaternion and translation vector.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3};
@@ -196,6 +197,7 @@ where
/// Return a unit dual quaternion representing the translation and orientation
/// given by the provided isometry.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::{Isometry3, UnitDualQuaternion, UnitQuaternion, Vector3, Point3};
diff --git a/src/geometry/isometry.rs b/src/geometry/isometry.rs
index f020c0e9..0179f1ff 100755
--- a/src/geometry/isometry.rs
+++ b/src/geometry/isometry.rs
@@ -1,15 +1,10 @@
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
use std::fmt;
use std::hash;
-#[cfg(feature = "abomonation-serialize")]
-use std::io::{Result as IOResult, Write};
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Serialize};
-#[cfg(feature = "abomonation-serialize")]
-use abomonation::Abomonation;
-
use simba::scalar::{RealField, SubsetOf};
use simba::simd::SimdRealField;
@@ -55,10 +50,7 @@ use crate::geometry::{AbstractRotation, Point, Translation};
///
#[repr(C)]
#[derive(Debug, Copy, Clone)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize-no-std",
@@ -81,29 +73,6 @@ pub struct Isometry {
pub translation: Translation,
}
-#[cfg(feature = "abomonation-serialize")]
-impl Abomonation for Isometry
-where
- T: SimdRealField,
- R: Abomonation,
- Translation: Abomonation,
-{
- unsafe fn entomb(&self, writer: &mut W) -> IOResult<()> {
- self.rotation.entomb(writer)?;
- self.translation.entomb(writer)
- }
-
- fn extent(&self) -> usize {
- self.rotation.extent() + self.translation.extent()
- }
-
- unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
- self.rotation
- .exhume(bytes)
- .and_then(|bytes| self.translation.exhume(bytes))
- }
-}
-
#[cfg(feature = "rkyv-serialize-no-std")]
mod rkyv_impl {
use super::Isometry;
diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs
index 18a7852d..1119d4e3 100644
--- a/src/geometry/orthographic.rs
+++ b/src/geometry/orthographic.rs
@@ -19,10 +19,7 @@ use crate::geometry::{Point3, Projective3};
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
#[repr(C)]
-#[cfg_attr(
- all(not(target_os = "cuda"), feature = "cuda"),
- derive(cust::DeviceCopy)
-)]
+#[cfg_attr(feature = "cuda", derive(cust_core::DeviceCopy))]
#[derive(Copy, Clone)]
pub struct Orthographic3 {
matrix: Matrix4,
@@ -319,6 +316,7 @@ impl Orthographic3 {
/// The left offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -336,6 +334,7 @@ impl Orthographic3 {
/// The right offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -353,6 +352,7 @@ impl Orthographic3 {
/// The bottom offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -370,6 +370,7 @@ impl Orthographic3 {
/// The top offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -387,6 +388,7 @@ impl Orthographic3 {
/// The near plane offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -404,6 +406,7 @@ impl Orthographic3 {
/// The far plane offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -526,6 +529,7 @@ impl Orthographic3 {
/// Sets the left offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -545,6 +549,7 @@ impl Orthographic3 {
/// Sets the right offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -564,6 +569,7 @@ impl Orthographic3 {
/// Sets the bottom offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -583,6 +589,7 @@ impl Orthographic3 {
/// Sets the top offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -602,6 +609,7 @@ impl Orthographic3 {
/// Sets the near plane offset of the view cuboid.
///
+ /// # Example
/// ```
/// # #[macro_use] extern crate approx;
/// # use nalgebra::Orthographic3;
@@ -621,6 +629,7 @@ impl Orthographic3