diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 446e1139..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,119 +0,0 @@ -version: 2.1 - -executors: - rust-nightly-executor: - docker: - - image: rustlang/rust:nightly - rust-executor: - docker: - - image: rust:latest - - -jobs: - check-fmt: - executor: rust-executor - steps: - - checkout - - run: - name: install rustfmt - command: rustup component add rustfmt - - run: - name: check formatting - command: cargo fmt -- --check - clippy: - executor: rust-executor - steps: - - checkout - - run: - name: install clippy - command: rustup component add clippy - - run: - name: clippy - command: cargo clippy - build-native: - executor: rust-executor - steps: - - checkout - - run: apt-get update - - run: apt-get install -y cmake gfortran libblas-dev liblapack-dev - - run: - name: build --no-default-feature - command: cargo build --no-default-features; - - run: - name: build (default features) - command: cargo build; - - run: - name: build --all-features - command: cargo build --all-features - - run: - name: build nalgebra-glm - command: cargo build -p nalgebra-glm --all-features - - run: - name: build nalgebra-lapack - command: cd nalgebra-lapack; cargo build - test-native: - executor: rust-executor - steps: - - checkout - - run: - name: test - command: cargo test --features arbitrary --features serde-serialize --features abomonation-serialize --features sparse --features debug --features io --features compare --features libm - - run: - name: test nalgebra-glm - command: cargo test -p nalgebra-glm --features arbitrary --features serde-serialize --features abomonation-serialize --features sparse --features debug --features io --features compare --features libm - build-wasm: - executor: rust-executor - steps: - - checkout - - run: - name: install cargo-web - command: cargo install -f cargo-web; - - run: - name: build --all-features - command: cargo web build --verbose --target wasm32-unknown-unknown; - - run: - name: build nalgebra-glm - command: cargo build -p nalgebra-glm --all-features - build-no-std: - executor: rust-nightly-executor - steps: - - checkout - - run: - name: install xargo - command: cp .circleci/Xargo.toml .; rustup component add rust-src; cargo install -f xargo; - - run: - name: build - command: xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu; - - run: - name: build --features alloc - command: xargo build --verbose --no-default-features --features alloc --target=x86_64-unknown-linux-gnu; - build-nightly: - executor: rust-nightly-executor - steps: - - checkout - - run: - name: build --all-features - command: cargo build --all-features - - -workflows: - version: 2 - build: - jobs: - - check-fmt - - clippy - - build-native: - requires: - - check-fmt - - build-wasm: - requires: - - check-fmt - - build-no-std: - requires: - - check-fmt - - build-nightly: - requires: - - check-fmt - - test-native: - requires: - - build-native diff --git a/.circleci/Xargo.toml b/.github/Xargo.toml similarity index 100% rename from .circleci/Xargo.toml rename to .github/Xargo.toml diff --git a/.github/workflows/nalgebra-ci-build.yml b/.github/workflows/nalgebra-ci-build.yml new file mode 100644 index 00000000..181e092e --- /dev/null +++ b/.github/workflows/nalgebra-ci-build.yml @@ -0,0 +1,96 @@ +name: nalgebra CI build + +on: + push: + branches: [ dev, master ] + pull_request: + branches: [ dev, master ] + +env: + CARGO_TERM_COLOR: always + +jobs: + check-fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check formatting + run: cargo fmt -- --check + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install clippy + run: rustup component add clippy + - name: Run clippy + run: cargo clippy + build-nalgebra: + runs-on: ubuntu-latest +# env: +# RUSTFLAGS: -D warnings + steps: + - uses: actions/checkout@v2 + - name: Build --no-default-feature + run: cargo build --no-default-features; + - name: Build (default features) + run: cargo build; + - name: Build --all-features + run: cargo build --all-features; + - name: Build nalgebra-glm + run: cargo build -p nalgebra-glm --all-features; + - name: Build nalgebra-lapack + run: cd nalgebra-lapack; cargo build; + - name: Build nalgebra-sparse + run: cd nalgebra-sparse; cargo build; + test-nalgebra: + runs-on: ubuntu-latest +# env: +# RUSTFLAGS: -D warnings + steps: + - uses: actions/checkout@v2 + - name: test + run: cargo test --features arbitrary --features serde-serialize,abomonation-serialize,sparse,debug,io,compare,libm,proptest-support,slow-tests; + test-nalgebra-glm: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: test nalgebra-glm + run: cargo test -p nalgebra-glm --features arbitrary,serde-serialize,abomonation-serialize,sparse,debug,io,compare,libm,proptest-support,slow-tests; + test-nalgebra-sparse: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: test nalgebra-sparse + # Manifest-path is necessary because cargo otherwise won't correctly forward features + # We increase number of proptest cases to hopefully catch more potential bugs + run: PROPTEST_CASES=10000 cargo test --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support + - name: test nalgebra-sparse (slow tests) + # Unfortunately, the "slow-tests" take so much time that we need to run them with --release + run: PROPTEST_CASES=10000 cargo test --release --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,slow-tests slow + build-wasm: + runs-on: ubuntu-latest +# env: +# RUSTFLAGS: -D warnings + steps: + - uses: actions/checkout@v2 + - run: rustup target add wasm32-unknown-unknown + - name: build nalgebra + run: cargo build --verbose --target wasm32-unknown-unknown; + - name: build nalgebra-glm + run: cargo build -p nalgebra-glm --verbose --target wasm32-unknown-unknown; + build-no-std: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install latest nightly + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + override: true + components: rustfmt + - name: install xargo + run: cp .github/Xargo.toml .; rustup component add rust-src; cargo install -f xargo; + - name: build + run: xargo build --verbose --no-default-features --target=x86_64-unknown-linux-gnu; + - name: build --feature alloc + run: xargo build --verbose --no-default-features --features alloc --target=x86_64-unknown-linux-gnu; diff --git a/.gitignore b/.gitignore index 796e70b3..3ae51367 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ Cargo.lock site/ .vscode/ .idea/ +proptest-regressions \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index cfc0e080..23cdd62e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,27 @@ documented here. This project adheres to [Semantic Versioning](https://semver.org/). +## [0.25.0] +This updates all the dependencies of nalgebra to their latest version, including: +- rand 0.8 +- proptest 1.0 +- simba 0.4 + +### New crate! +Alongside this release of `nalgebra`, we are releasing `nalgebra-sparse`: a crate dedicated to sparse matrix +computation with `nalgebra`. The `sparse` module of `nalgebra`itself still exists for backward compatibility +but it will be deprecated soon in favor of the `nalgebra-sparse` crate. + +### Added +* Add `UnitDualQuaternion`, a dual-quaternion with unit magnitude which can be used as an isometry transformation. +* Add `UDU::new()` and `matrix.udu()` to compute the UDU factorization of a matrix. +* Add `ColPivQR::new()` and `matrix.col_piv_qr()` to compute the QR decomposition with column pivoting of a matrix. +* Add `from_basis_unchecked` to all the rotation types. This builds a rotation from a set of basis vectors (representing the columns of the corresponding rotation matrix). +* Add `Matrix::cap_magnitude` to cap the magnitude of a vector. +* Add `UnitQuaternion::append_axisangle_linearized` to approximately append a rotation represented as an axis-angle to a rotation represented as an unit quaternion. +* Mark the iterators on matrix components as `DoubleEndedIter`. +* Re-export `simba::simd::SimdValue` at the root of the `nalgebra` crate. + ## [0.24.0] ### Added @@ -67,7 +88,7 @@ In this release, we are no longer relying on traits from the __alga__ crate for Instead, we use traits from the new [simba](https://crates.io/crates/simba) crate which are both simpler, and allow for significant optimizations like AoSoA SIMD. -Refer to the [monthly Rustsim blogpost](https://www.rustsim.org/blog/2020/04/01/this-month-in-rustsim/) +Refer to the [monthly dimforge blogpost](https://www.dimforge.org/blog/2020/04/01/this-month-in-dimforge/) for details about this switch and its benefits. ### Added diff --git a/Cargo.toml b/Cargo.toml index 615942a8..15f9effd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,28 +1,29 @@ [package] name = "nalgebra" -version = "0.24.0" +version = "0.25.0" authors = [ "Sébastien Crozet " ] -description = "Linear algebra library with transformations and statically-sized or dynamically-sized matrices." -documentation = "https://nalgebra.org/rustdoc/nalgebra/index.html" +description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices." +documentation = "https://www.nalgebra.org/docs" homepage = "https://nalgebra.org" -repository = "https://github.com/rustsim/nalgebra" +repository = "https://github.com/dimforge/nalgebra" readme = "README.md" -categories = [ "science" ] +categories = [ "science", "mathematics", "wasm", "no-std" ] keywords = [ "linear", "algebra", "matrix", "vector", "math" ] -license = "Apache-2.0" +license = "BSD-3-Clause" edition = "2018" - exclude = ["/ci/*", "/.travis.yml", "/Makefile"] +[badges] +maintenance = { status = "actively-developed" } + [lib] name = "nalgebra" path = "src/lib.rs" [features] default = [ "std" ] -std = [ "matrixmultiply", "rand/std", "rand_distr", "simba/std" ] -stdweb = [ "rand/stdweb" ] +std = [ "matrixmultiply", "rand/std", "rand/std_rng", "rand_distr", "simba/std" ] arbitrary = [ "quickcheck" ] serde-serialize = [ "serde", "num-complex/serde" ] abomonation-serialize = [ "abomonation" ] @@ -33,32 +34,39 @@ io = [ "pest", "pest_derive" ] compare = [ "matrixcompare-core" ] libm = [ "simba/libm" ] libm-force = [ "simba/libm_force" ] +proptest-support = [ "proptest" ] +no_unsound_assume_init = [ ] +# This feature is only used for tests, and enables tests that require more time to run +slow-tests = [] [dependencies] typenum = "1.12" generic-array = "0.14" -rand = { version = "0.7", default-features = false } +rand = { version = "0.8", default-features = false } +getrandom = { version = "0.2", default-features = false, features = [ "js" ] } # For wasm num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.3", default-features = false } num-rational = { version = "0.3", default-features = false } approx = { version = "0.4", default-features = false } -simba = { version = "0.3", default-features = false } +simba = { version = "0.4", default-features = false } alga = { version = "0.9", default-features = false, optional = true } -rand_distr = { version = "0.3", optional = true } -matrixmultiply = { version = "0.2", optional = true } +rand_distr = { version = "0.4", default-features = false, optional = true } +matrixmultiply = { version = "0.3", optional = true } serde = { version = "1.0", default-features = false, features = [ "derive" ], optional = true } abomonation = { version = "0.7", optional = true } mint = { version = "0.5", optional = true } -quickcheck = { version = "0.9", optional = true } +quickcheck = { version = "1", optional = true } pest = { version = "2", optional = true } pest_derive = { version = "2", optional = true } +bytemuck = { version = "1.5", optional = true } matrixcompare-core = { version = "0.1", optional = true } +proptest = { version = "1", optional = true, default-features = false, features = ["std"] } [dev-dependencies] serde_json = "1.0" -rand_xorshift = "0.2" -rand_isaac = "0.2" +rand_xorshift = "0.3" +rand_isaac = "0.3" ### Uncomment this line before running benchmarks. ### We can't just let this uncommented because that would break ### compilation for #[no-std] because of the terrible Cargo bug @@ -66,10 +74,11 @@ rand_isaac = "0.2" #criterion = "0.2.10" # For matrix comparison macro -matrixcompare = "0.1.3" +matrixcompare = "0.2.0" +itertools = "0.10" [workspace] -members = [ "nalgebra-lapack", "nalgebra-glm" ] +members = [ "nalgebra-lapack", "nalgebra-glm", "nalgebra-sparse" ] [[bench]] name = "nalgebra_bench" @@ -78,3 +87,7 @@ path = "benches/lib.rs" [profile.bench] lto = true + +[package.metadata.docs.rs] +# Enable certain features when building docs for docs.rs +features = [ "proptest-support", "compare" ] diff --git a/README.md b/README.md index a885855b..08350907 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@

- Users guide | Documentation | Forum + Users guide | Documentation | Forum

@@ -36,4 +36,4 @@ Rapier is supported by: -

\ No newline at end of file +

diff --git a/benches/core/matrix.rs b/benches/core/matrix.rs index 7b4f85bd..7f4432e3 100644 --- a/benches/core/matrix.rs +++ b/benches/core/matrix.rs @@ -136,6 +136,30 @@ fn mat500_mul_mat500(bench: &mut criterion::Criterion) { bench.bench_function("mat500_mul_mat500", move |bh| bh.iter(|| &a * &b)); } +fn iter(bench: &mut criterion::Criterion) { + let a = DMatrix::::new_random(1000, 1000); + + bench.bench_function("iter", move |bh| { + bh.iter(|| { + for value in a.iter() { + criterion::black_box(value); + } + }) + }); +} + +fn iter_rev(bench: &mut criterion::Criterion) { + let a = DMatrix::::new_random(1000, 1000); + + bench.bench_function("iter_rev", move |bh| { + bh.iter(|| { + for value in a.iter().rev() { + criterion::black_box(value); + } + }) + }); +} + fn copy_from(bench: &mut criterion::Criterion) { let a = DMatrix::::new_random(1000, 1000); let mut b = DMatrix::::new_random(1000, 1000); @@ -235,6 +259,8 @@ criterion_group!( mat10_mul_mat10_static, mat100_mul_mat100, mat500_mul_mat500, + iter, + iter_rev, copy_from, axpy, tr_mul_to, diff --git a/nalgebra-glm/Cargo.toml b/nalgebra-glm/Cargo.toml index 6cbf7ab7..1cfb164d 100644 --- a/nalgebra-glm/Cargo.toml +++ b/nalgebra-glm/Cargo.toml @@ -1,22 +1,24 @@ [package] name = "nalgebra-glm" -version = "0.10.0" +version = "0.11.0" authors = ["sebcrozet "] description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library." -documentation = "https://www.nalgebra.org/rustdoc_glm/nalgebra_glm/index.html" +documentation = "https://www.nalgebra.org/docs" homepage = "https://nalgebra.org" -repository = "https://github.com/rustsim/nalgebra" +repository = "https://github.com/dimforge/nalgebra" readme = "../README.md" -categories = [ "science" ] +categories = [ "science", "mathematics", "wasm", "no standard library" ] keywords = [ "linear", "algebra", "matrix", "vector", "math" ] license = "BSD-3-Clause" edition = "2018" +[badges] +maintenance = { status = "actively-developed" } + [features] default = [ "std" ] std = [ "nalgebra/std", "simba/std" ] -stdweb = [ "nalgebra/stdweb" ] arbitrary = [ "nalgebra/arbitrary" ] serde-serialize = [ "nalgebra/serde-serialize" ] abomonation-serialize = [ "nalgebra/abomonation-serialize" ] @@ -24,5 +26,5 @@ abomonation-serialize = [ "nalgebra/abomonation-serialize" ] [dependencies] num-traits = { version = "0.2", default-features = false } approx = { version = "0.4", default-features = false } -simba = { version = "0.3", default-features = false } -nalgebra = { path = "..", version = "0.24", default-features = false } +simba = { version = "0.4", default-features = false } +nalgebra = { path = "..", version = "0.25", default-features = false } diff --git a/nalgebra-lapack/Cargo.toml b/nalgebra-lapack/Cargo.toml index 1ea68287..322dc108 100644 --- a/nalgebra-lapack/Cargo.toml +++ b/nalgebra-lapack/Cargo.toml @@ -1,40 +1,47 @@ [package] name = "nalgebra-lapack" -version = "0.15.0" +version = "0.16.0" authors = [ "Sébastien Crozet ", "Andrew Straw " ] -description = "Linear algebra library with transformations and satically-sized or dynamically-sized matrices." -documentation = "https://nalgebra.org/doc/nalgebra/index.html" -homepage = "https://nalgebra.org" -repository = "https://github.com/rustsim/nalgebra" -readme = "README.md" -keywords = [ "linear", "algebra", "matrix", "vector" ] -license = "BSD-3-Clause" -edition = "2018" +description = "Matrix decompositions using nalgebra matrices and Lapack bindings." +documentation = "https://www.nalgebra.org/docs" +homepage = "https://nalgebra.org" +repository = "https://github.com/dimforge/nalgebra" +readme = "../README.md" +categories = [ "science", "mathematics" ] +keywords = [ "linear", "algebra", "matrix", "vector", "math", "lapack" ] +license = "BSD-3-Clause" +edition = "2018" + +[badges] +maintenance = { status = "actively-developed" } [features] serde-serialize = [ "serde", "serde_derive" ] +proptest-support = [ "nalgebra/proptest-support" ] +arbitrary = [ "nalgebra/arbitrary" ] # For BLAS/LAPACK -default = ["openblas"] +default = ["netlib"] openblas = ["lapack-src/openblas"] netlib = ["lapack-src/netlib"] accelerate = ["lapack-src/accelerate"] intel-mkl = ["lapack-src/intel-mkl"] [dependencies] -nalgebra = { version = "0.24", path = ".." } +nalgebra = { version = "0.25", path = ".." } num-traits = "0.2" -num-complex = { version = "0.2", default-features = false } -simba = "0.2" +num-complex = { version = "0.3", default-features = false } +simba = "0.4" serde = { version = "1.0", optional = true } serde_derive = { version = "1.0", optional = true } -lapack = { version = "0.16", default-features = false } -lapack-src = { version = "0.5", default-features = false } +lapack = { version = "0.17", default-features = false } +lapack-src = { version = "0.6", default-features = false } # clippy = "*" [dev-dependencies] -nalgebra = { version = "0.24", features = [ "arbitrary" ], path = ".." } -quickcheck = "0.9" -approx = "0.3" -rand = "0.7" +nalgebra = { version = "0.25", features = [ "arbitrary" ], path = ".." } +proptest = { version = "1", default-features = false, features = ["std"] } +quickcheck = "1" +approx = "0.4" +rand = "0.8" diff --git a/nalgebra-lapack/src/eigen.rs b/nalgebra-lapack/src/eigen.rs index 1ccd6e3f..1aae842c 100644 --- a/nalgebra-lapack/src/eigen.rs +++ b/nalgebra-lapack/src/eigen.rs @@ -78,9 +78,9 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; // TODO: Tap into the workspace. - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [N::zero()]; @@ -107,8 +107,10 @@ where match (left_eigenvectors, eigenvectors) { (true, true) => { - let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; - let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vl = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; + let mut vr = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; N::xgeev( ljob, @@ -137,7 +139,8 @@ where } } (true, false) => { - let mut vl = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vl = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; N::xgeev( ljob, @@ -166,7 +169,8 @@ where } } (false, true) => { - let mut vr = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut vr = + unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; N::xgeev( ljob, @@ -243,8 +247,8 @@ where let lda = n as i32; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let mut placeholder1 = [N::zero()]; @@ -287,7 +291,7 @@ where ); lapack_panic!(info); - let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut res = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; for i in 0..res.len() { res[i] = Complex::new(wr[i], wi[i]); diff --git a/nalgebra-lapack/src/hessenberg.rs b/nalgebra-lapack/src/hessenberg.rs index b20df55e..ed456ecb 100644 --- a/nalgebra-lapack/src/hessenberg.rs +++ b/nalgebra-lapack/src/hessenberg.rs @@ -60,7 +60,7 @@ where "Unable to compute the hessenberg decomposition of an empty matrix." ); - let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1) }; + let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.sub(U1), U1).assume_init() }; let mut info = 0; let lwork = diff --git a/nalgebra-lapack/src/qr.rs b/nalgebra-lapack/src/qr.rs index ac8ad672..c9216135 100644 --- a/nalgebra-lapack/src/qr.rs +++ b/nalgebra-lapack/src/qr.rs @@ -57,7 +57,8 @@ where let (nrows, ncols) = m.data.shape(); let mut info = 0; - let mut tau = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; + let mut tau = + unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; if nrows.value() == 0 || ncols.value() == 0 { return Self { qr: m, tau: tau }; diff --git a/nalgebra-lapack/src/schur.rs b/nalgebra-lapack/src/schur.rs index 5079efbf..0480f73f 100644 --- a/nalgebra-lapack/src/schur.rs +++ b/nalgebra-lapack/src/schur.rs @@ -78,9 +78,9 @@ where let mut info = 0; - let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; - let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + let mut wr = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut wi = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; + let mut q = unsafe { Matrix::new_uninitialized_generic(nrows, ncols).assume_init() }; // Placeholders: let mut bwork = [0i32]; let mut unused = 0; @@ -151,7 +151,8 @@ where where DefaultAllocator: Allocator, D>, { - let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; + let mut out = + unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1).assume_init() }; for i in 0..out.len() { out[i] = Complex::new(self.re[i], self.im[i]) diff --git a/nalgebra-lapack/src/svd.rs b/nalgebra-lapack/src/svd.rs index 18b4957f..70c7fd18 100644 --- a/nalgebra-lapack/src/svd.rs +++ b/nalgebra-lapack/src/svd.rs @@ -99,9 +99,9 @@ macro_rules! svd_impl( let lda = nrows.value() as i32; - let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows) }; - let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1) }; - let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols) }; + let mut u = unsafe { Matrix::new_uninitialized_generic(nrows, nrows).assume_init() }; + let mut s = unsafe { Matrix::new_uninitialized_generic(nrows.min(ncols), U1).assume_init() }; + let mut vt = unsafe { Matrix::new_uninitialized_generic(ncols, ncols).assume_init() }; let ldu = nrows.value(); let ldvt = ncols.value(); diff --git a/nalgebra-lapack/src/symmetric_eigen.rs b/nalgebra-lapack/src/symmetric_eigen.rs index 93961328..c255058d 100644 --- a/nalgebra-lapack/src/symmetric_eigen.rs +++ b/nalgebra-lapack/src/symmetric_eigen.rs @@ -94,7 +94,7 @@ where let lda = n as i32; - let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, U1) }; + let mut values = unsafe { Matrix::new_uninitialized_generic(nrows, U1).assume_init() }; let mut info = 0; let lwork = N::xsyev_work_size(jobz, b'L', n as i32, m.as_mut_slice(), lda, &mut info); diff --git a/nalgebra-lapack/tests/lib.rs b/nalgebra-lapack/tests/lib.rs index 37e0b903..973b6d17 100644 --- a/nalgebra-lapack/tests/lib.rs +++ b/nalgebra-lapack/tests/lib.rs @@ -1,8 +1,14 @@ #[macro_use] extern crate approx; +#[cfg(not(feature = "proptest-support"))] +compile_error!("Tests must be run with `proptest-support`"); + extern crate nalgebra as na; extern crate nalgebra_lapack as nl; -#[macro_use] -extern crate quickcheck; + +extern crate lapack; +extern crate lapack_src; mod linalg; +#[path = "../../tests/proptest/mod.rs"] +mod proptest; diff --git a/nalgebra-lapack/tests/linalg/cholesky.rs b/nalgebra-lapack/tests/linalg/cholesky.rs index f811726a..632347b8 100644 --- a/nalgebra-lapack/tests/linalg/cholesky.rs +++ b/nalgebra-lapack/tests/linalg/cholesky.rs @@ -1,101 +1,90 @@ use std::cmp; -use na::{DMatrix, DVector, Matrix3, Matrix4, Matrix4x3, Vector4}; +use na::{DMatrix, DVector, Matrix4x3, Vector4}; use nl::Cholesky; -quickcheck! { - fn cholesky(m: DMatrix) -> bool { - if m.len() != 0 { - let m = &m * m.transpose(); - if let Some(chol) = Cholesky::new(m.clone()) { - let l = chol.unpack(); - let reconstructed_m = &l * l.transpose(); +use crate::proptest::*; +use proptest::{prop_assert, proptest}; - return relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) - } +proptest! { + #[test] + fn cholesky(m in dmatrix()) { + let m = &m * m.transpose(); + if let Some(chol) = Cholesky::new(m.clone()) { + let l = chol.unpack(); + let reconstructed_m = &l * l.transpose(); + + prop_assert!(relative_eq!(reconstructed_m, m, epsilon = 1.0e-7)); } - return true } - fn cholesky_static(m: Matrix3) -> bool { + #[test] + fn cholesky_static(m in matrix3()) { let m = &m * m.transpose(); if let Some(chol) = Cholesky::new(m) { let l = chol.unpack(); let reconstructed_m = &l * l.transpose(); - relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) - } - else { - false + prop_assert!(relative_eq!(reconstructed_m, m, epsilon = 1.0e-7)) } } - fn cholesky_solve(n: usize, nb: usize) -> bool { - if n != 0 { - let n = cmp::min(n, 15); // To avoid slowing down the test too much. - let nb = cmp::min(nb, 15); // To avoid slowing down the test too much. - let m = DMatrix::::new_random(n, n); - let m = &m * m.transpose(); + #[test] + fn cholesky_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { + let n = cmp::min(n, 15); // To avoid slowing down the test too much. + let nb = cmp::min(nb, 15); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); + let m = &m * m.transpose(); - if let Some(chol) = Cholesky::new(m.clone()) { - let b1 = DVector::new_random(n); - let b2 = DMatrix::new_random(n, nb); + if let Some(chol) = Cholesky::new(m.clone()) { + let b1 = DVector::new_random(n); + let b2 = DMatrix::new_random(n, nb); - let sol1 = chol.solve(&b1).unwrap(); - let sol2 = chol.solve(&b2).unwrap(); + let sol1 = chol.solve(&b1).unwrap(); + let sol2 = chol.solve(&b2).unwrap(); - return relative_eq!(&m * sol1, b1, epsilon = 1.0e-6) && - relative_eq!(&m * sol2, b2, epsilon = 1.0e-6) - } + prop_assert!(relative_eq!(&m * sol1, b1, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(&m * sol2, b2, epsilon = 1.0e-6)); } - - return true; } - fn cholesky_solve_static(m: Matrix4) -> bool { + #[test] + fn cholesky_solve_static(m in matrix4()) { let m = &m * m.transpose(); - match Cholesky::new(m) { - Some(chol) => { - let b1 = Vector4::new_random(); - let b2 = Matrix4x3::new_random(); + if let Some(chol) = Cholesky::new(m) { + let b1 = Vector4::new_random(); + let b2 = Matrix4x3::new_random(); - let sol1 = chol.solve(&b1).unwrap(); - let sol2 = chol.solve(&b2).unwrap(); + let sol1 = chol.solve(&b1).unwrap(); + let sol2 = chol.solve(&b2).unwrap(); - relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && - relative_eq!(m * sol2, b2, epsilon = 1.0e-7) - }, - None => true + prop_assert!(relative_eq!(m * sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * sol2, b2, epsilon = 1.0e-7)); } } - fn cholesky_inverse(n: usize) -> bool { - if n != 0 { - let n = cmp::min(n, 15); // To avoid slowing down the test too much. - let m = DMatrix::::new_random(n, n); - let m = &m * m.transpose(); + #[test] + fn cholesky_inverse(n in PROPTEST_MATRIX_DIM) { + let n = cmp::min(n, 15); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); + let m = &m * m.transpose(); - if let Some(m1) = Cholesky::new(m.clone()).unwrap().inverse() { - let id1 = &m * &m1; - let id2 = &m1 * &m; + if let Some(m1) = Cholesky::new(m.clone()).unwrap().inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; - return id1.is_identity(1.0e-6) && id2.is_identity(1.0e-6); - } + prop_assert!(id1.is_identity(1.0e-6) && id2.is_identity(1.0e-6)); } - - return true; } - fn cholesky_inverse_static(m: Matrix4) -> bool { + #[test] + fn cholesky_inverse_static(m in matrix4()) { let m = m * m.transpose(); - match Cholesky::new(m.clone()).unwrap().inverse() { - Some(m1) => { - let id1 = &m * &m1; - let id2 = &m1 * &m; + if let Some(m1) = Cholesky::new(m.clone()).unwrap().inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - }, - None => true + prop_assert!(id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5)) } } } diff --git a/nalgebra-lapack/tests/linalg/hessenberg.rs b/nalgebra-lapack/tests/linalg/hessenberg.rs index bb48633e..5292ab17 100644 --- a/nalgebra-lapack/tests/linalg/hessenberg.rs +++ b/nalgebra-lapack/tests/linalg/hessenberg.rs @@ -1,38 +1,32 @@ use std::cmp; -use nl::Hessenberg; use na::{DMatrix, Matrix4}; +use nl::Hessenberg; -quickcheck!{ - fn hessenberg(n: usize) -> bool { - if n != 0 { - let n = cmp::min(n, 25); - let m = DMatrix::::new_random(n, n); +use crate::proptest::*; +use proptest::{prop_assert, proptest}; - match Hessenberg::new(m.clone()) { - Some(hess) => { - let h = hess.h(); - let p = hess.p(); +proptest! { + #[test] + fn hessenberg(n in PROPTEST_MATRIX_DIM) { + let n = cmp::min(n, 25); + let m = DMatrix::::new_random(n, n); - relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7) - }, - None => true - } - } - else { - true + if let Some(hess) = Hessenberg::new(m.clone()) { + let h = hess.h(); + let p = hess.p(); + + prop_assert!(relative_eq!(m, &p * h * p.transpose(), epsilon = 1.0e-7)) } } - fn hessenberg_static(m: Matrix4) -> bool { - match Hessenberg::new(m) { - Some(hess) => { - let h = hess.h(); - let p = hess.p(); + #[test] + fn hessenberg_static(m in matrix4()) { + if let Some(hess) = Hessenberg::new(m) { + let h = hess.h(); + let p = hess.p(); - relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7) - }, - None => true + prop_assert!(relative_eq!(m, p * h * p.transpose(), epsilon = 1.0e-7)) } } } diff --git a/nalgebra-lapack/tests/linalg/lu.rs b/nalgebra-lapack/tests/linalg/lu.rs index 71293436..9665964e 100644 --- a/nalgebra-lapack/tests/linalg/lu.rs +++ b/nalgebra-lapack/tests/linalg/lu.rs @@ -1,28 +1,28 @@ use std::cmp; -use na::{DMatrix, DVector, Matrix3x4, Matrix4, Matrix4x3, Vector4}; +use na::{DMatrix, DVector, Matrix4x3, Vector4}; use nl::LU; -quickcheck! { - fn lup(m: DMatrix) -> bool { - if m.len() != 0 { - let lup = LU::new(m.clone()); - let l = lup.l(); - let u = lup.u(); - let mut computed1 = &l * &u; - lup.permute(&mut computed1); +use crate::proptest::*; +use proptest::{prop_assert, proptest}; - let computed2 = lup.p() * l * u; +proptest! { + #[test] + fn lup(m in dmatrix()) { + let lup = LU::new(m.clone()); + let l = lup.l(); + let u = lup.u(); + let mut computed1 = &l * &u; + lup.permute(&mut computed1); - relative_eq!(computed1, m, epsilon = 1.0e-7) && - relative_eq!(computed2, m, epsilon = 1.0e-7) - } - else { - true - } + let computed2 = lup.p() * l * u; + + prop_assert!(relative_eq!(computed1, m, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(computed2, m, epsilon = 1.0e-7)); } - fn lu_static(m: Matrix3x4) -> bool { + #[test] + fn lu_static(m in matrix3x5()) { let lup = LU::new(m); let l = lup.l(); let u = lup.u(); @@ -31,37 +31,34 @@ quickcheck! { let computed2 = lup.p() * l * u; - relative_eq!(computed1, m, epsilon = 1.0e-7) && - relative_eq!(computed2, m, epsilon = 1.0e-7) + prop_assert!(relative_eq!(computed1, m, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(computed2, m, epsilon = 1.0e-7)); } - fn lu_solve(n: usize, nb: usize) -> bool { - if n != 0 { - let n = cmp::min(n, 25); // To avoid slowing down the test too much. - let nb = cmp::min(nb, 25); // To avoid slowing down the test too much. - let m = DMatrix::::new_random(n, n); + #[test] + fn lu_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { + let n = cmp::min(n, 25); // To avoid slowing down the test too much. + let nb = cmp::min(nb, 25); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); - let lup = LU::new(m.clone()); - let b1 = DVector::new_random(n); - let b2 = DMatrix::new_random(n, nb); + let lup = LU::new(m.clone()); + let b1 = DVector::new_random(n); + let b2 = DMatrix::new_random(n, nb); - let sol1 = lup.solve(&b1).unwrap(); - let sol2 = lup.solve(&b2).unwrap(); + let sol1 = lup.solve(&b1).unwrap(); + let sol2 = lup.solve(&b2).unwrap(); - let tr_sol1 = lup.solve_transpose(&b1).unwrap(); - let tr_sol2 = lup.solve_transpose(&b2).unwrap(); + let tr_sol1 = lup.solve_transpose(&b1).unwrap(); + let tr_sol2 = lup.solve_transpose(&b2).unwrap(); - relative_eq!(&m * sol1, b1, epsilon = 1.0e-7) && - relative_eq!(&m * sol2, b2, epsilon = 1.0e-7) && - relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7) && - relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7) - } - else { - true - } + prop_assert!(relative_eq!(&m * sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(&m * sol2, b2, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7)); } - fn lu_solve_static(m: Matrix4) -> bool { + #[test] + fn lu_solve_static(m in matrix4()) { let lup = LU::new(m); let b1 = Vector4::new_random(); let b2 = Matrix4x3::new_random(); @@ -71,37 +68,32 @@ quickcheck! { let tr_sol1 = lup.solve_transpose(&b1).unwrap(); let tr_sol2 = lup.solve_transpose(&b2).unwrap(); - relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && - relative_eq!(m * sol2, b2, epsilon = 1.0e-7) && - relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7) && - relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m * sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * sol2, b2, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m.transpose() * tr_sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m.transpose() * tr_sol2, b2, epsilon = 1.0e-7)); } - fn lu_inverse(n: usize) -> bool { - if n != 0 { - let n = cmp::min(n, 25); // To avoid slowing down the test too much. - let m = DMatrix::::new_random(n, n); + #[test] + fn lu_inverse(n in PROPTEST_MATRIX_DIM) { + let n = cmp::min(n, 25); // To avoid slowing down the test too much. + let m = DMatrix::::new_random(n, n); - if let Some(m1) = LU::new(m.clone()).inverse() { - let id1 = &m * &m1; - let id2 = &m1 * &m; + if let Some(m1) = LU::new(m.clone()).inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; - return id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7); - } + prop_assert!(id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)); } - - return true; } - fn lu_inverse_static(m: Matrix4) -> bool { - match LU::new(m.clone()).inverse() { - Some(m1) => { - let id1 = &m * &m1; - let id2 = &m1 * &m; + #[test] + fn lu_inverse_static(m in matrix4()) { + if let Some(m1) = LU::new(m.clone()).inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - }, - None => true + prop_assert!(id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5)) } } } diff --git a/nalgebra-lapack/tests/linalg/qr.rs b/nalgebra-lapack/tests/linalg/qr.rs index 1d193a86..138d38e9 100644 --- a/nalgebra-lapack/tests/linalg/qr.rs +++ b/nalgebra-lapack/tests/linalg/qr.rs @@ -1,20 +1,24 @@ -use na::{DMatrix, Matrix4x3}; use nl::QR; -quickcheck! { - fn qr(m: DMatrix) -> bool { +use crate::proptest::*; +use proptest::{prop_assert, proptest}; + +proptest! { + #[test] + fn qr(m in dmatrix()) { let qr = QR::new(m.clone()); let q = qr.q(); let r = qr.r(); - relative_eq!(m, q * r, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, q * r, epsilon = 1.0e-7)) } - fn qr_static(m: Matrix4x3) -> bool { + #[test] + fn qr_static(m in matrix5x3()) { let qr = QR::new(m); let q = qr.q(); let r = qr.r(); - relative_eq!(m, q * r, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, q * r, epsilon = 1.0e-7)) } } diff --git a/nalgebra-lapack/tests/linalg/real_eigensystem.rs b/nalgebra-lapack/tests/linalg/real_eigensystem.rs index a711d882..3d1c91eb 100644 --- a/nalgebra-lapack/tests/linalg/real_eigensystem.rs +++ b/nalgebra-lapack/tests/linalg/real_eigensystem.rs @@ -3,46 +3,40 @@ use std::cmp; use na::{DMatrix, Matrix4}; use nl::Eigen; -quickcheck! { - fn eigensystem(n: usize) -> bool { - if n != 0 { - let n = cmp::min(n, 25); - let m = DMatrix::::new_random(n, n); +use crate::proptest::*; +use proptest::{prop_assert, proptest}; - match Eigen::new(m.clone(), true, true) { - Some(eig) => { - let eigvals = DMatrix::from_diagonal(&eig.eigenvalues); - let transformed_eigvectors = &m * eig.eigenvectors.as_ref().unwrap(); - let scaled_eigvectors = eig.eigenvectors.as_ref().unwrap() * &eigvals; +proptest! { + #[test] + fn eigensystem(n in PROPTEST_MATRIX_DIM) { + let n = cmp::min(n, 25); + let m = DMatrix::::new_random(n, n); - let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.as_ref().unwrap(); - let scaled_left_eigvectors = eig.left_eigenvectors.as_ref().unwrap() * &eigvals; + if let Some(eig) = Eigen::new(m.clone(), true, true) { + let eigvals = DMatrix::from_diagonal(&eig.eigenvalues); + let transformed_eigvectors = &m * eig.eigenvectors.as_ref().unwrap(); + let scaled_eigvectors = eig.eigenvectors.as_ref().unwrap() * &eigvals; - relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7) && - relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7) - }, - None => true - } - } - else { - true + let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.as_ref().unwrap(); + let scaled_left_eigvectors = eig.left_eigenvectors.as_ref().unwrap() * &eigvals; + + prop_assert!(relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7)); } } - fn eigensystem_static(m: Matrix4) -> bool { - match Eigen::new(m, true, true) { - Some(eig) => { - let eigvals = Matrix4::from_diagonal(&eig.eigenvalues); - let transformed_eigvectors = m * eig.eigenvectors.unwrap(); - let scaled_eigvectors = eig.eigenvectors.unwrap() * eigvals; + #[test] + fn eigensystem_static(m in matrix4()) { + if let Some(eig) = Eigen::new(m, true, true) { + let eigvals = Matrix4::from_diagonal(&eig.eigenvalues); + let transformed_eigvectors = m * eig.eigenvectors.unwrap(); + let scaled_eigvectors = eig.eigenvectors.unwrap() * eigvals; - let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.unwrap(); - let scaled_left_eigvectors = eig.left_eigenvectors.unwrap() * eigvals; + let transformed_left_eigvectors = m.transpose() * eig.left_eigenvectors.unwrap(); + let scaled_left_eigvectors = eig.left_eigenvectors.unwrap() * eigvals; - relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7) && - relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7) - }, - None => true + prop_assert!(relative_eq!(transformed_eigvectors, scaled_eigvectors, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(transformed_left_eigvectors, scaled_left_eigvectors, epsilon = 1.0e-7)); } } } diff --git a/nalgebra-lapack/tests/linalg/schur.rs b/nalgebra-lapack/tests/linalg/schur.rs index ccdb0f0b..0fd1cc33 100644 --- a/nalgebra-lapack/tests/linalg/schur.rs +++ b/nalgebra-lapack/tests/linalg/schur.rs @@ -1,20 +1,24 @@ -use na::{DMatrix, Matrix4}; +use na::DMatrix; use nl::Schur; use std::cmp; -quickcheck! { - fn schur(n: usize) -> bool { +use crate::proptest::*; +use proptest::{prop_assert, proptest}; + +proptest! { + #[test] + fn schur(n in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 10)); let m = DMatrix::::new_random(n, n); let (vecs, vals) = Schur::new(m.clone()).unpack(); - relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7) + prop_assert!(relative_eq!(&vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)) } - fn schur_static(m: Matrix4) -> bool { + #[test] + fn schur_static(m in matrix4()) { let (vecs, vals) = Schur::new(m.clone()).unpack(); - - relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7) + prop_assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)) } } diff --git a/nalgebra-lapack/tests/linalg/svd.rs b/nalgebra-lapack/tests/linalg/svd.rs index 20ebd9d5..a9389260 100644 --- a/nalgebra-lapack/tests/linalg/svd.rs +++ b/nalgebra-lapack/tests/linalg/svd.rs @@ -1,57 +1,53 @@ -use na::{DMatrix, Matrix3x4}; +use na::{DMatrix, Matrix3x5}; use nl::SVD; -quickcheck! { - fn svd(m: DMatrix) -> bool { - if m.nrows() != 0 && m.ncols() != 0 { - let svd = SVD::new(m.clone()).unwrap(); - let sm = DMatrix::from_partial_diagonal(m.nrows(), m.ncols(), svd.singular_values.as_slice()); +use crate::proptest::*; +use proptest::{prop_assert, proptest}; - let reconstructed_m = &svd.u * sm * &svd.vt; - let reconstructed_m2 = svd.recompose(); +proptest! { + #[test] + fn svd(m in dmatrix()) { + let svd = SVD::new(m.clone()).unwrap(); + let sm = DMatrix::from_partial_diagonal(m.nrows(), m.ncols(), svd.singular_values.as_slice()); - relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) && - relative_eq!(reconstructed_m2, reconstructed_m, epsilon = 1.0e-7) - } - else { - true - } + let reconstructed_m = &svd.u * sm * &svd.vt; + let reconstructed_m2 = svd.recompose(); + + prop_assert!(relative_eq!(reconstructed_m, m, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(reconstructed_m2, reconstructed_m, epsilon = 1.0e-7)); } - fn svd_static(m: Matrix3x4) -> bool { + #[test] + fn svd_static(m in matrix3x5()) { let svd = SVD::new(m).unwrap(); - let sm = Matrix3x4::from_partial_diagonal(svd.singular_values.as_slice()); + let sm = Matrix3x5::from_partial_diagonal(svd.singular_values.as_slice()); let reconstructed_m = &svd.u * &sm * &svd.vt; let reconstructed_m2 = svd.recompose(); - relative_eq!(reconstructed_m, m, epsilon = 1.0e-7) && - relative_eq!(reconstructed_m2, m, epsilon = 1.0e-7) + prop_assert!(relative_eq!(reconstructed_m, m, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(reconstructed_m2, m, epsilon = 1.0e-7)); } - fn pseudo_inverse(m: DMatrix) -> bool { - if m.nrows() == 0 || m.ncols() == 0 { - return true; - } - + #[test] + fn pseudo_inverse(m in dmatrix()) { let svd = SVD::new(m.clone()).unwrap(); let im = svd.pseudo_inverse(1.0e-7); if m.nrows() <= m.ncols() { - return (&m * &im).is_identity(1.0e-7) + prop_assert!((&m * &im).is_identity(1.0e-7)); } if m.nrows() >= m.ncols() { - return (im * m).is_identity(1.0e-7) + prop_assert!((im * m).is_identity(1.0e-7)); } - - return true; } - fn pseudo_inverse_static(m: Matrix3x4) -> bool { + #[test] + fn pseudo_inverse_static(m in matrix3x5()) { let svd = SVD::new(m).unwrap(); let im = svd.pseudo_inverse(1.0e-7); - (m * im).is_identity(1.0e-7) + prop_assert!((m * im).is_identity(1.0e-7)) } } diff --git a/nalgebra-lapack/tests/linalg/symmetric_eigen.rs b/nalgebra-lapack/tests/linalg/symmetric_eigen.rs index 1d47f982..d57f772e 100644 --- a/nalgebra-lapack/tests/linalg/symmetric_eigen.rs +++ b/nalgebra-lapack/tests/linalg/symmetric_eigen.rs @@ -1,20 +1,25 @@ use std::cmp; -use na::{DMatrix, Matrix4}; +use na::DMatrix; use nl::SymmetricEigen; -quickcheck! { - fn symmetric_eigen(n: usize) -> bool { +use crate::proptest::*; +use proptest::{prop_assert, proptest}; + +proptest! { + #[test] + fn symmetric_eigen(n in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 10)); let m = DMatrix::::new_random(n, n); let eig = SymmetricEigen::new(m.clone()); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } - fn symmetric_eigen_static(m: Matrix4) -> bool { + #[test] + fn symmetric_eigen_static(m in matrix4()) { let eig = SymmetricEigen::new(m); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } } diff --git a/nalgebra-sparse/Cargo.toml b/nalgebra-sparse/Cargo.toml new file mode 100644 index 00000000..cc8d5276 --- /dev/null +++ b/nalgebra-sparse/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "nalgebra-sparse" +version = "0.1.0" +authors = [ "Andreas Longva", "Sébastien Crozet " ] +edition = "2018" + +[features] +proptest-support = ["proptest", "nalgebra/proptest-support"] +compare = [ "matrixcompare-core" ] + +# Enable to enable running some tests that take a lot of time to run +slow-tests = [] + +[dependencies] +nalgebra = { version="0.25", path = "../" } +num-traits = { version = "0.2", default-features = false } +proptest = { version = "1.0", optional = true } +matrixcompare-core = { version = "0.1.0", optional = true } + +[dev-dependencies] +itertools = "0.10" +matrixcompare = { version = "0.2.0", features = [ "proptest-support" ] } +nalgebra = { version="0.25", path = "../", features = ["compare"] } + +[package.metadata.docs.rs] +# Enable certain features when building docs for docs.rs +features = [ "proptest-support", "compare" ] \ No newline at end of file diff --git a/nalgebra-sparse/src/convert/impl_std_ops.rs b/nalgebra-sparse/src/convert/impl_std_ops.rs new file mode 100644 index 00000000..ba4c015b --- /dev/null +++ b/nalgebra-sparse/src/convert/impl_std_ops.rs @@ -0,0 +1,124 @@ +use crate::convert::serial::*; +use crate::coo::CooMatrix; +use crate::csc::CscMatrix; +use crate::csr::CsrMatrix; +use nalgebra::storage::Storage; +use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; +use num_traits::Zero; + +impl<'a, T, R, C, S> From<&'a Matrix> for CooMatrix +where + T: Scalar + Zero, + R: Dim, + C: Dim, + S: Storage, +{ + fn from(matrix: &'a Matrix) -> Self { + convert_dense_coo(matrix) + } +} + +impl<'a, T> From<&'a CooMatrix> for DMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + fn from(coo: &'a CooMatrix) -> Self { + convert_coo_dense(coo) + } +} + +impl<'a, T> From<&'a CooMatrix> for CsrMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + fn from(matrix: &'a CooMatrix) -> Self { + convert_coo_csr(matrix) + } +} + +impl<'a, T> From<&'a CsrMatrix> for CooMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + fn from(matrix: &'a CsrMatrix) -> Self { + convert_csr_coo(matrix) + } +} + +impl<'a, T, R, C, S> From<&'a Matrix> for CsrMatrix +where + T: Scalar + Zero, + R: Dim, + C: Dim, + S: Storage, +{ + fn from(matrix: &'a Matrix) -> Self { + convert_dense_csr(matrix) + } +} + +impl<'a, T> From<&'a CsrMatrix> for DMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + fn from(matrix: &'a CsrMatrix) -> Self { + convert_csr_dense(matrix) + } +} + +impl<'a, T> From<&'a CooMatrix> for CscMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + fn from(matrix: &'a CooMatrix) -> Self { + convert_coo_csc(matrix) + } +} + +impl<'a, T> From<&'a CscMatrix> for CooMatrix +where + T: Scalar + Zero, +{ + fn from(matrix: &'a CscMatrix) -> Self { + convert_csc_coo(matrix) + } +} + +impl<'a, T, R, C, S> From<&'a Matrix> for CscMatrix +where + T: Scalar + Zero, + R: Dim, + C: Dim, + S: Storage, +{ + fn from(matrix: &'a Matrix) -> Self { + convert_dense_csc(matrix) + } +} + +impl<'a, T> From<&'a CscMatrix> for DMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + fn from(matrix: &'a CscMatrix) -> Self { + convert_csc_dense(matrix) + } +} + +impl<'a, T> From<&'a CscMatrix> for CsrMatrix +where + T: Scalar, +{ + fn from(matrix: &'a CscMatrix) -> Self { + convert_csc_csr(matrix) + } +} + +impl<'a, T> From<&'a CsrMatrix> for CscMatrix +where + T: Scalar, +{ + fn from(matrix: &'a CsrMatrix) -> Self { + convert_csr_csc(matrix) + } +} diff --git a/nalgebra-sparse/src/convert/mod.rs b/nalgebra-sparse/src/convert/mod.rs new file mode 100644 index 00000000..77388b22 --- /dev/null +++ b/nalgebra-sparse/src/convert/mod.rs @@ -0,0 +1,40 @@ +//! Routines for converting between sparse matrix formats. +//! +//! Most users should instead use the provided `From` implementations to convert between matrix +//! formats. Note that `From` implementations may not be available between all combinations of +//! sparse matrices. +//! +//! The following example illustrates how to convert between matrix formats with the `From` +//! implementations. +//! +//! ```rust +//! use nalgebra_sparse::{csr::CsrMatrix, csc::CscMatrix, coo::CooMatrix}; +//! use nalgebra::DMatrix; +//! +//! // Conversion from dense +//! let dense = DMatrix::::identity(9, 8); +//! let csr = CsrMatrix::from(&dense); +//! let csc = CscMatrix::from(&dense); +//! let coo = CooMatrix::from(&dense); +//! +//! // CSR <-> CSC +//! let _ = CsrMatrix::from(&csc); +//! let _ = CscMatrix::from(&csr); +//! +//! // CSR <-> COO +//! let _ = CooMatrix::from(&csr); +//! let _ = CsrMatrix::from(&coo); +//! +//! // CSC <-> COO +//! let _ = CooMatrix::from(&csc); +//! let _ = CscMatrix::from(&coo); +//! ``` +//! +//! The routines available here are able to provide more specialized APIs, giving +//! more control over the conversion process. The routines are organized by backends. +//! Currently, only the [`serial`] backend is available. +//! In the future, backends that offer parallel routines may become available. + +pub mod serial; + +mod impl_std_ops; diff --git a/nalgebra-sparse/src/convert/serial.rs b/nalgebra-sparse/src/convert/serial.rs new file mode 100644 index 00000000..7e0da7bc --- /dev/null +++ b/nalgebra-sparse/src/convert/serial.rs @@ -0,0 +1,427 @@ +//! Serial routines for converting between matrix formats. +//! +//! All routines in this module are single-threaded. At present these routines offer no +//! advantage over using the [`From`] trait, but future changes to the API might offer more +//! control to the user. +use std::ops::Add; + +use num_traits::Zero; + +use nalgebra::storage::Storage; +use nalgebra::{ClosedAdd, DMatrix, Dim, Matrix, Scalar}; + +use crate::coo::CooMatrix; +use crate::cs; +use crate::csc::CscMatrix; +use crate::csr::CsrMatrix; + +/// Converts a dense matrix to [`CooMatrix`]. +pub fn convert_dense_coo(dense: &Matrix) -> CooMatrix +where + T: Scalar + Zero, + R: Dim, + C: Dim, + S: Storage, +{ + let mut coo = CooMatrix::new(dense.nrows(), dense.ncols()); + + for (index, v) in dense.iter().enumerate() { + if v != &T::zero() { + // We use the fact that matrix iteration is guaranteed to be column-major + let i = index % dense.nrows(); + let j = index / dense.nrows(); + coo.push(i, j, v.inlined_clone()); + } + } + + coo +} + +/// Converts a [`CooMatrix`] to a dense matrix. +pub fn convert_coo_dense(coo: &CooMatrix) -> DMatrix +where + T: Scalar + Zero + ClosedAdd, +{ + let mut output = DMatrix::repeat(coo.nrows(), coo.ncols(), T::zero()); + for (i, j, v) in coo.triplet_iter() { + output[(i, j)] += v.inlined_clone(); + } + output +} + +/// Converts a [`CooMatrix`] to a [`CsrMatrix`]. +pub fn convert_coo_csr(coo: &CooMatrix) -> CsrMatrix +where + T: Scalar + Zero, +{ + let (offsets, indices, values) = convert_coo_cs( + coo.nrows(), + coo.row_indices(), + coo.col_indices(), + coo.values(), + ); + + // TODO: Avoid "try_from" since it validates the data? (requires unsafe, should benchmark + // to see if it can be justified for performance reasons) + CsrMatrix::try_from_csr_data(coo.nrows(), coo.ncols(), offsets, indices, values) + .expect("Internal error: Invalid CSR data during COO->CSR conversion") +} + +/// Converts a [`CsrMatrix`] to a [`CooMatrix`]. +pub fn convert_csr_coo(csr: &CsrMatrix) -> CooMatrix { + let mut result = CooMatrix::new(csr.nrows(), csr.ncols()); + for (i, j, v) in csr.triplet_iter() { + result.push(i, j, v.inlined_clone()); + } + result +} + +/// Converts a [`CsrMatrix`] to a dense matrix. +pub fn convert_csr_dense(csr: &CsrMatrix) -> DMatrix +where + T: Scalar + ClosedAdd + Zero, +{ + let mut output = DMatrix::zeros(csr.nrows(), csr.ncols()); + + for (i, j, v) in csr.triplet_iter() { + output[(i, j)] += v.inlined_clone(); + } + + output +} + +/// Converts a dense matrix to a [`CsrMatrix`]. +pub fn convert_dense_csr(dense: &Matrix) -> CsrMatrix +where + T: Scalar + Zero, + R: Dim, + C: Dim, + S: Storage, +{ + let mut row_offsets = Vec::with_capacity(dense.nrows() + 1); + let mut col_idx = Vec::new(); + let mut values = Vec::new(); + + // We have to iterate row-by-row to build the CSR matrix, which is at odds with + // nalgebra's column-major storage. The alternative would be to perform an initial sweep + // to count number of non-zeros per row. + row_offsets.push(0); + for i in 0..dense.nrows() { + for j in 0..dense.ncols() { + let v = dense.index((i, j)); + if v != &T::zero() { + col_idx.push(j); + values.push(v.inlined_clone()); + } + } + row_offsets.push(col_idx.len()); + } + + // TODO: Consider circumventing the data validity check here + // (would require unsafe, should benchmark) + CsrMatrix::try_from_csr_data(dense.nrows(), dense.ncols(), row_offsets, col_idx, values) + .expect("Internal error: Invalid CsrMatrix format during dense-> CSR conversion") +} + +/// Converts a [`CooMatrix`] to a [`CscMatrix`]. +pub fn convert_coo_csc(coo: &CooMatrix) -> CscMatrix +where + T: Scalar + Zero, +{ + let (offsets, indices, values) = convert_coo_cs( + coo.ncols(), + coo.col_indices(), + coo.row_indices(), + coo.values(), + ); + + // TODO: Avoid "try_from" since it validates the data? (requires unsafe, should benchmark + // to see if it can be justified for performance reasons) + CscMatrix::try_from_csc_data(coo.nrows(), coo.ncols(), offsets, indices, values) + .expect("Internal error: Invalid CSC data during COO->CSC conversion") +} + +/// Converts a [`CscMatrix`] to a [`CooMatrix`]. +pub fn convert_csc_coo(csc: &CscMatrix) -> CooMatrix +where + T: Scalar, +{ + let mut coo = CooMatrix::new(csc.nrows(), csc.ncols()); + for (i, j, v) in csc.triplet_iter() { + coo.push(i, j, v.inlined_clone()); + } + coo +} + +/// Converts a [`CscMatrix`] to a dense matrix. +pub fn convert_csc_dense(csc: &CscMatrix) -> DMatrix +where + T: Scalar + ClosedAdd + Zero, +{ + let mut output = DMatrix::zeros(csc.nrows(), csc.ncols()); + + for (i, j, v) in csc.triplet_iter() { + output[(i, j)] += v.inlined_clone(); + } + + output +} + +/// Converts a dense matrix to a [`CscMatrix`]. +pub fn convert_dense_csc(dense: &Matrix) -> CscMatrix +where + T: Scalar + Zero, + R: Dim, + C: Dim, + S: Storage, +{ + let mut col_offsets = Vec::with_capacity(dense.ncols() + 1); + let mut row_idx = Vec::new(); + let mut values = Vec::new(); + + col_offsets.push(0); + for j in 0..dense.ncols() { + for i in 0..dense.nrows() { + let v = dense.index((i, j)); + if v != &T::zero() { + row_idx.push(i); + values.push(v.inlined_clone()); + } + } + col_offsets.push(row_idx.len()); + } + + // TODO: Consider circumventing the data validity check here + // (would require unsafe, should benchmark) + CscMatrix::try_from_csc_data(dense.nrows(), dense.ncols(), col_offsets, row_idx, values) + .expect("Internal error: Invalid CscMatrix format during dense-> CSC conversion") +} + +/// Converts a [`CsrMatrix`] to a [`CscMatrix`]. +pub fn convert_csr_csc(csr: &CsrMatrix) -> CscMatrix +where + T: Scalar, +{ + let (offsets, indices, values) = cs::transpose_cs( + csr.nrows(), + csr.ncols(), + csr.row_offsets(), + csr.col_indices(), + csr.values(), + ); + + // TODO: Avoid data validity check? + CscMatrix::try_from_csc_data(csr.nrows(), csr.ncols(), offsets, indices, values) + .expect("Internal error: Invalid CSC data during CSR->CSC conversion") +} + +/// Converts a [`CscMatrix`] to a [`CsrMatrix`]. +pub fn convert_csc_csr(csc: &CscMatrix) -> CsrMatrix +where + T: Scalar, +{ + let (offsets, indices, values) = cs::transpose_cs( + csc.ncols(), + csc.nrows(), + csc.col_offsets(), + csc.row_indices(), + csc.values(), + ); + + // TODO: Avoid data validity check? + CsrMatrix::try_from_csr_data(csc.nrows(), csc.ncols(), offsets, indices, values) + .expect("Internal error: Invalid CSR data during CSC->CSR conversion") +} + +fn convert_coo_cs( + major_dim: usize, + major_indices: &[usize], + minor_indices: &[usize], + values: &[T], +) -> (Vec, Vec, Vec) +where + T: Scalar + Zero, +{ + assert_eq!(major_indices.len(), minor_indices.len()); + assert_eq!(minor_indices.len(), values.len()); + let nnz = major_indices.len(); + + let (unsorted_major_offsets, unsorted_minor_idx, unsorted_vals) = { + let mut offsets = vec![0usize; major_dim + 1]; + let mut minor_idx = vec![0usize; nnz]; + let mut vals = vec![T::zero(); nnz]; + coo_to_unsorted_cs( + &mut offsets, + &mut minor_idx, + &mut vals, + major_dim, + major_indices, + minor_indices, + values, + ); + (offsets, minor_idx, vals) + }; + + // TODO: If input is sorted and/or without duplicates, we can avoid additional allocations + // and work. Might want to take advantage of this. + + // At this point, assembly is essentially complete. However, we must ensure + // that minor indices are sorted within each lane and without duplicates. + let mut sorted_major_offsets = Vec::new(); + let mut sorted_minor_idx = Vec::new(); + let mut sorted_vals = Vec::new(); + + sorted_major_offsets.push(0); + + // We need some temporary storage when working with each lane. Since lanes often have a + // very small number of non-zero entries, we try to amortize allocations across + // lanes by reusing workspace vectors + let mut idx_workspace = Vec::new(); + let mut perm_workspace = Vec::new(); + let mut values_workspace = Vec::new(); + + for lane in 0..major_dim { + let begin = unsorted_major_offsets[lane]; + let end = unsorted_major_offsets[lane + 1]; + let count = end - begin; + let range = begin..end; + + // Ensure that workspaces can hold enough data + perm_workspace.resize(count, 0); + idx_workspace.resize(count, 0); + values_workspace.resize(count, T::zero()); + sort_lane( + &mut idx_workspace[..count], + &mut values_workspace[..count], + &unsorted_minor_idx[range.clone()], + &unsorted_vals[range.clone()], + &mut perm_workspace[..count], + ); + + let sorted_ja_current_len = sorted_minor_idx.len(); + + combine_duplicates( + |idx| sorted_minor_idx.push(idx), + |val| sorted_vals.push(val), + &idx_workspace[..count], + &values_workspace[..count], + &Add::add, + ); + + let new_col_count = sorted_minor_idx.len() - sorted_ja_current_len; + sorted_major_offsets.push(sorted_major_offsets.last().unwrap() + new_col_count); + } + + (sorted_major_offsets, sorted_minor_idx, sorted_vals) +} + +/// Converts matrix data given in triplet format to unsorted CSR/CSC, retaining any duplicated +/// indices. +/// +/// Here `major/minor` is `row/col` for CSR and `col/row` for CSC. +fn coo_to_unsorted_cs( + major_offsets: &mut [usize], + cs_minor_idx: &mut [usize], + cs_values: &mut [T], + major_dim: usize, + major_indices: &[usize], + minor_indices: &[usize], + coo_values: &[T], +) { + assert_eq!(major_offsets.len(), major_dim + 1); + assert_eq!(cs_minor_idx.len(), cs_values.len()); + assert_eq!(cs_values.len(), major_indices.len()); + assert_eq!(major_indices.len(), minor_indices.len()); + assert_eq!(minor_indices.len(), coo_values.len()); + + // Count the number of occurrences of each row + for major_idx in major_indices { + major_offsets[*major_idx] += 1; + } + + cs::convert_counts_to_offsets(major_offsets); + + { + // TODO: Instead of allocating a whole new vector storing the current counts, + // I think it's possible to be a bit more clever by storing each count + // in the last of the column indices for each row + let mut current_counts = vec![0usize; major_dim + 1]; + let triplet_iter = major_indices.iter().zip(minor_indices).zip(coo_values); + for ((i, j), value) in triplet_iter { + let current_offset = major_offsets[*i] + current_counts[*i]; + cs_minor_idx[current_offset] = *j; + cs_values[current_offset] = value.clone(); + current_counts[*i] += 1; + } + } +} + +/// Sort the indices of the given lane. +/// +/// The indices and values in `minor_idx` and `values` are sorted according to the +/// minor indices and stored in `minor_idx_result` and `values_result` respectively. +/// +/// All input slices are expected to be of the same length. The contents of mutable slices +/// can be arbitrary, as they are anyway overwritten. +fn sort_lane( + minor_idx_result: &mut [usize], + values_result: &mut [T], + minor_idx: &[usize], + values: &[T], + workspace: &mut [usize], +) { + assert_eq!(minor_idx_result.len(), values_result.len()); + assert_eq!(values_result.len(), minor_idx.len()); + assert_eq!(minor_idx.len(), values.len()); + assert_eq!(values.len(), workspace.len()); + + let permutation = workspace; + // Set permutation to identity + for (i, p) in permutation.iter_mut().enumerate() { + *p = i; + } + + // Compute permutation needed to bring minor indices into sorted order + // Note: Using sort_unstable here avoids internal allocations, which is crucial since + // each lane might have a small number of elements + permutation.sort_unstable_by_key(|idx| minor_idx[*idx]); + + apply_permutation(minor_idx_result, minor_idx, permutation); + apply_permutation(values_result, values, permutation); +} + +// TODO: Move this into `utils` or something? +fn apply_permutation(out_slice: &mut [T], in_slice: &[T], permutation: &[usize]) { + assert_eq!(out_slice.len(), in_slice.len()); + assert_eq!(out_slice.len(), permutation.len()); + for (out_element, old_pos) in out_slice.iter_mut().zip(permutation) { + *out_element = in_slice[*old_pos].clone(); + } +} + +/// Given *sorted* indices and corresponding scalar values, combines duplicates with the given +/// associative combiner and calls the provided produce methods with combined indices and values. +fn combine_duplicates( + mut produce_idx: impl FnMut(usize), + mut produce_value: impl FnMut(T), + idx_array: &[usize], + values: &[T], + combiner: impl Fn(T, T) -> T, +) { + assert_eq!(idx_array.len(), values.len()); + + let mut i = 0; + while i < idx_array.len() { + let idx = idx_array[i]; + let mut combined_value = values[i].clone(); + let mut j = i + 1; + while j < idx_array.len() && idx_array[j] == idx { + let j_val = values[j].clone(); + combined_value = combiner(combined_value, j_val); + j += 1; + } + produce_idx(idx); + produce_value(combined_value); + i = j; + } +} diff --git a/nalgebra-sparse/src/coo.rs b/nalgebra-sparse/src/coo.rs new file mode 100644 index 00000000..50c68677 --- /dev/null +++ b/nalgebra-sparse/src/coo.rs @@ -0,0 +1,208 @@ +//! An implementation of the COO sparse matrix format. + +use crate::SparseFormatError; + +/// A COO representation of a sparse matrix. +/// +/// A COO matrix stores entries in coordinate-form, that is triplets `(i, j, v)`, where `i` and `j` +/// correspond to row and column indices of the entry, and `v` to the value of the entry. +/// The format is of limited use for standard matrix operations. Its main purpose is to facilitate +/// easy construction of other, more efficient matrix formats (such as CSR/COO), and the +/// conversion between different formats. +/// +/// # Format +/// +/// For given dimensions `nrows` and `ncols`, the matrix is represented by three same-length +/// arrays `row_indices`, `col_indices` and `values` that constitute the coordinate triplets +/// of the matrix. The indices must be in bounds, but *duplicate entries are explicitly allowed*. +/// Upon conversion to other formats, the duplicate entries may be summed together. See the +/// documentation for the respective conversion functions. +/// +/// # Examples +/// +/// ```rust +/// use nalgebra_sparse::{coo::CooMatrix, csr::CsrMatrix, csc::CscMatrix}; +/// +/// // Initialize a matrix with all zeros (no explicitly stored entries). +/// let mut coo = CooMatrix::new(4, 4); +/// // Or initialize it with a set of triplets +/// coo = CooMatrix::try_from_triplets(4, 4, vec![1, 2], vec![0, 1], vec![3.0, 4.0]).unwrap(); +/// +/// // Push a few triplets +/// coo.push(2, 0, 1.0); +/// coo.push(0, 1, 2.0); +/// +/// // Convert to other matrix formats +/// let csr = CsrMatrix::from(&coo); +/// let csc = CscMatrix::from(&coo); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CooMatrix { + nrows: usize, + ncols: usize, + row_indices: Vec, + col_indices: Vec, + values: Vec, +} + +impl CooMatrix { + /// Construct a zero COO matrix of the given dimensions. + /// + /// Specifically, the collection of triplets - corresponding to explicitly stored entries - + /// is empty, so that the matrix (implicitly) represented by the COO matrix consists of all + /// zero entries. + pub fn new(nrows: usize, ncols: usize) -> Self { + Self { + nrows, + ncols, + row_indices: Vec::new(), + col_indices: Vec::new(), + values: Vec::new(), + } + } + + /// Construct a zero COO matrix of the given dimensions. + /// + /// Specifically, the collection of triplets - corresponding to explicitly stored entries - + /// is empty, so that the matrix (implicitly) represented by the COO matrix consists of all + /// zero entries. + pub fn zeros(nrows: usize, ncols: usize) -> Self { + Self::new(nrows, ncols) + } + + /// Try to construct a COO matrix from the given dimensions and a collection of + /// (i, j, v) triplets. + /// + /// Returns an error if either row or column indices contain indices out of bounds, + /// or if the data arrays do not all have the same length. Note that the COO format + /// inherently supports duplicate entries. + pub fn try_from_triplets( + nrows: usize, + ncols: usize, + row_indices: Vec, + col_indices: Vec, + values: Vec, + ) -> Result { + use crate::SparseFormatErrorKind::*; + if row_indices.len() != col_indices.len() { + return Err(SparseFormatError::from_kind_and_msg( + InvalidStructure, + "Number of row and col indices must be the same.", + )); + } else if col_indices.len() != values.len() { + return Err(SparseFormatError::from_kind_and_msg( + InvalidStructure, + "Number of col indices and values must be the same.", + )); + } + + let row_indices_in_bounds = row_indices.iter().all(|i| *i < nrows); + let col_indices_in_bounds = col_indices.iter().all(|j| *j < ncols); + + if !row_indices_in_bounds { + Err(SparseFormatError::from_kind_and_msg( + IndexOutOfBounds, + "Row index out of bounds.", + )) + } else if !col_indices_in_bounds { + Err(SparseFormatError::from_kind_and_msg( + IndexOutOfBounds, + "Col index out of bounds.", + )) + } else { + Ok(Self { + nrows, + ncols, + row_indices, + col_indices, + values, + }) + } + } + + /// An iterator over triplets (i, j, v). + // TODO: Consider giving the iterator a concrete type instead of impl trait...? + pub fn triplet_iter(&self) -> impl Iterator { + self.row_indices + .iter() + .zip(&self.col_indices) + .zip(&self.values) + .map(|((i, j), v)| (*i, *j, v)) + } + + /// Push a single triplet to the matrix. + /// + /// This adds the value `v` to the `i`th row and `j`th column in the matrix. + /// + /// Panics + /// ------ + /// + /// Panics if `i` or `j` is out of bounds. + #[inline] + pub fn push(&mut self, i: usize, j: usize, v: T) { + assert!(i < self.nrows); + assert!(j < self.ncols); + self.row_indices.push(i); + self.col_indices.push(j); + self.values.push(v); + } + + /// The number of rows in the matrix. + #[inline] + pub fn nrows(&self) -> usize { + self.nrows + } + + /// The number of columns in the matrix. + #[inline] + pub fn ncols(&self) -> usize { + self.ncols + } + + /// The number of explicitly stored entries in the matrix. + /// + /// This number *includes* duplicate entries. For example, if the `CooMatrix` contains duplicate + /// entries, then it may have a different number of non-zeros as reported by `nnz()` compared + /// to its CSR representation. + #[inline] + pub fn nnz(&self) -> usize { + self.values.len() + } + + /// The row indices of the explicitly stored entries. + pub fn row_indices(&self) -> &[usize] { + &self.row_indices + } + + /// The column indices of the explicitly stored entries. + pub fn col_indices(&self) -> &[usize] { + &self.col_indices + } + + /// The values of the explicitly stored entries. + pub fn values(&self) -> &[T] { + &self.values + } + + /// Disassembles the matrix into individual triplet arrays. + /// + /// Examples + /// -------- + /// + /// ``` + /// # use nalgebra_sparse::coo::CooMatrix; + /// let row_indices = vec![0, 1]; + /// let col_indices = vec![1, 2]; + /// let values = vec![1.0, 2.0]; + /// let coo = CooMatrix::try_from_triplets(2, 3, row_indices, col_indices, values) + /// .unwrap(); + /// + /// let (row_idx, col_idx, val) = coo.disassemble(); + /// assert_eq!(row_idx, vec![0, 1]); + /// assert_eq!(col_idx, vec![1, 2]); + /// assert_eq!(val, vec![1.0, 2.0]); + /// ``` + pub fn disassemble(self) -> (Vec, Vec, Vec) { + (self.row_indices, self.col_indices, self.values) + } +} diff --git a/nalgebra-sparse/src/cs.rs b/nalgebra-sparse/src/cs.rs new file mode 100644 index 00000000..d6f9b229 --- /dev/null +++ b/nalgebra-sparse/src/cs.rs @@ -0,0 +1,530 @@ +use std::mem::replace; +use std::ops::Range; + +use num_traits::One; + +use nalgebra::Scalar; + +use crate::pattern::SparsityPattern; +use crate::{SparseEntry, SparseEntryMut}; + +/// An abstract compressed matrix. +/// +/// For the time being, this is only used internally to share implementation between +/// CSR and CSC matrices. +/// +/// A CSR matrix is obtained by associating rows with the major dimension, while a CSC matrix +/// is obtained by associating columns with the major dimension. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CsMatrix { + sparsity_pattern: SparsityPattern, + values: Vec, +} + +impl CsMatrix { + /// Create a zero matrix with no explicitly stored entries. + #[inline] + pub fn new(major_dim: usize, minor_dim: usize) -> Self { + Self { + sparsity_pattern: SparsityPattern::zeros(major_dim, minor_dim), + values: vec![], + } + } + + #[inline] + pub fn pattern(&self) -> &SparsityPattern { + &self.sparsity_pattern + } + + #[inline] + pub fn values(&self) -> &[T] { + &self.values + } + + #[inline] + pub fn values_mut(&mut self) -> &mut [T] { + &mut self.values + } + + /// Returns the raw data represented as a tuple `(major_offsets, minor_indices, values)`. + #[inline] + pub fn cs_data(&self) -> (&[usize], &[usize], &[T]) { + let pattern = self.pattern(); + ( + pattern.major_offsets(), + pattern.minor_indices(), + &self.values, + ) + } + + /// Returns the raw data represented as a tuple `(major_offsets, minor_indices, values)`. + #[inline] + pub fn cs_data_mut(&mut self) -> (&[usize], &[usize], &mut [T]) { + let pattern = &mut self.sparsity_pattern; + ( + pattern.major_offsets(), + pattern.minor_indices(), + &mut self.values, + ) + } + + #[inline] + pub fn pattern_and_values_mut(&mut self) -> (&SparsityPattern, &mut [T]) { + (&self.sparsity_pattern, &mut self.values) + } + + #[inline] + pub fn from_pattern_and_values(pattern: SparsityPattern, values: Vec) -> Self { + assert_eq!( + pattern.nnz(), + values.len(), + "Internal error: consumers should verify shape compatibility." + ); + Self { + sparsity_pattern: pattern, + values, + } + } + + /// Internal method for simplifying access to a lane's data + #[inline] + pub fn get_index_range(&self, row_index: usize) -> Option> { + let row_begin = *self.sparsity_pattern.major_offsets().get(row_index)?; + let row_end = *self.sparsity_pattern.major_offsets().get(row_index + 1)?; + Some(row_begin..row_end) + } + + pub fn take_pattern_and_values(self) -> (SparsityPattern, Vec) { + (self.sparsity_pattern, self.values) + } + + #[inline] + pub fn disassemble(self) -> (Vec, Vec, Vec) { + let (offsets, indices) = self.sparsity_pattern.disassemble(); + (offsets, indices, self.values) + } + + #[inline] + pub fn into_pattern_and_values(self) -> (SparsityPattern, Vec) { + (self.sparsity_pattern, self.values) + } + + /// Returns an entry for the given major/minor indices, or `None` if the indices are out + /// of bounds. + pub fn get_entry(&self, major_index: usize, minor_index: usize) -> Option> { + let row_range = self.get_index_range(major_index)?; + let (_, minor_indices, values) = self.cs_data(); + let minor_indices = &minor_indices[row_range.clone()]; + let values = &values[row_range]; + get_entry_from_slices( + self.pattern().minor_dim(), + minor_indices, + values, + minor_index, + ) + } + + /// Returns a mutable entry for the given major/minor indices, or `None` if the indices are out + /// of bounds. + pub fn get_entry_mut( + &mut self, + major_index: usize, + minor_index: usize, + ) -> Option> { + let row_range = self.get_index_range(major_index)?; + let minor_dim = self.pattern().minor_dim(); + let (_, minor_indices, values) = self.cs_data_mut(); + let minor_indices = &minor_indices[row_range.clone()]; + let values = &mut values[row_range]; + get_mut_entry_from_slices(minor_dim, minor_indices, values, minor_index) + } + + pub fn get_lane(&self, index: usize) -> Option> { + let range = self.get_index_range(index)?; + let (_, minor_indices, values) = self.cs_data(); + Some(CsLane { + minor_indices: &minor_indices[range.clone()], + values: &values[range], + minor_dim: self.pattern().minor_dim(), + }) + } + + #[inline] + pub fn get_lane_mut(&mut self, index: usize) -> Option> { + let range = self.get_index_range(index)?; + let minor_dim = self.pattern().minor_dim(); + let (_, minor_indices, values) = self.cs_data_mut(); + Some(CsLaneMut { + minor_dim, + minor_indices: &minor_indices[range.clone()], + values: &mut values[range], + }) + } + + #[inline] + pub fn lane_iter(&self) -> CsLaneIter { + CsLaneIter::new(self.pattern(), self.values()) + } + + #[inline] + pub fn lane_iter_mut(&mut self) -> CsLaneIterMut { + CsLaneIterMut::new(&self.sparsity_pattern, &mut self.values) + } + + #[inline] + pub fn filter

(&self, predicate: P) -> Self + where + T: Clone, + P: Fn(usize, usize, &T) -> bool, + { + let (major_dim, minor_dim) = (self.pattern().major_dim(), self.pattern().minor_dim()); + let mut new_offsets = Vec::with_capacity(self.pattern().major_dim() + 1); + let mut new_indices = Vec::new(); + let mut new_values = Vec::new(); + + new_offsets.push(0); + for (i, lane) in self.lane_iter().enumerate() { + for (&j, value) in lane.minor_indices().iter().zip(lane.values) { + if predicate(i, j, value) { + new_indices.push(j); + new_values.push(value.clone()); + } + } + + new_offsets.push(new_indices.len()); + } + + // TODO: Avoid checks here + let new_pattern = SparsityPattern::try_from_offsets_and_indices( + major_dim, + minor_dim, + new_offsets, + new_indices, + ) + .expect("Internal error: Sparsity pattern must always be valid."); + + Self::from_pattern_and_values(new_pattern, new_values) + } + + /// Returns the diagonal of the matrix as a sparse matrix. + pub fn diagonal_as_matrix(&self) -> Self + where + T: Clone, + { + // TODO: This might be faster with a binary search for each diagonal entry + self.filter(|i, j, _| i == j) + } +} + +impl CsMatrix { + #[inline] + pub fn identity(n: usize) -> Self { + let offsets: Vec<_> = (0..=n).collect(); + let indices: Vec<_> = (0..n).collect(); + let values = vec![T::one(); n]; + + // TODO: We should skip checks here + let pattern = + SparsityPattern::try_from_offsets_and_indices(n, n, offsets, indices).unwrap(); + Self::from_pattern_and_values(pattern, values) + } +} + +fn get_entry_from_slices<'a, T>( + minor_dim: usize, + minor_indices: &'a [usize], + values: &'a [T], + global_minor_index: usize, +) -> Option> { + let local_index = minor_indices.binary_search(&global_minor_index); + if let Ok(local_index) = local_index { + Some(SparseEntry::NonZero(&values[local_index])) + } else if global_minor_index < minor_dim { + Some(SparseEntry::Zero) + } else { + None + } +} + +fn get_mut_entry_from_slices<'a, T>( + minor_dim: usize, + minor_indices: &'a [usize], + values: &'a mut [T], + global_minor_indices: usize, +) -> Option> { + let local_index = minor_indices.binary_search(&global_minor_indices); + if let Ok(local_index) = local_index { + Some(SparseEntryMut::NonZero(&mut values[local_index])) + } else if global_minor_indices < minor_dim { + Some(SparseEntryMut::Zero) + } else { + None + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CsLane<'a, T> { + minor_dim: usize, + minor_indices: &'a [usize], + values: &'a [T], +} + +#[derive(Debug, PartialEq, Eq)] +pub struct CsLaneMut<'a, T> { + minor_dim: usize, + minor_indices: &'a [usize], + values: &'a mut [T], +} + +pub struct CsLaneIter<'a, T> { + // The index of the lane that will be returned on the next iteration + current_lane_idx: usize, + pattern: &'a SparsityPattern, + remaining_values: &'a [T], +} + +impl<'a, T> CsLaneIter<'a, T> { + pub fn new(pattern: &'a SparsityPattern, values: &'a [T]) -> Self { + Self { + current_lane_idx: 0, + pattern, + remaining_values: values, + } + } +} + +impl<'a, T> Iterator for CsLaneIter<'a, T> +where + T: 'a, +{ + type Item = CsLane<'a, T>; + + fn next(&mut self) -> Option { + let lane = self.pattern.get_lane(self.current_lane_idx); + let minor_dim = self.pattern.minor_dim(); + + if let Some(minor_indices) = lane { + let count = minor_indices.len(); + let values_in_lane = &self.remaining_values[..count]; + self.remaining_values = &self.remaining_values[count..]; + self.current_lane_idx += 1; + + Some(CsLane { + minor_dim, + minor_indices, + values: values_in_lane, + }) + } else { + None + } + } +} + +pub struct CsLaneIterMut<'a, T> { + // The index of the lane that will be returned on the next iteration + current_lane_idx: usize, + pattern: &'a SparsityPattern, + remaining_values: &'a mut [T], +} + +impl<'a, T> CsLaneIterMut<'a, T> { + pub fn new(pattern: &'a SparsityPattern, values: &'a mut [T]) -> Self { + Self { + current_lane_idx: 0, + pattern, + remaining_values: values, + } + } +} + +impl<'a, T> Iterator for CsLaneIterMut<'a, T> +where + T: 'a, +{ + type Item = CsLaneMut<'a, T>; + + fn next(&mut self) -> Option { + let lane = self.pattern.get_lane(self.current_lane_idx); + let minor_dim = self.pattern.minor_dim(); + + if let Some(minor_indices) = lane { + let count = minor_indices.len(); + + let remaining = replace(&mut self.remaining_values, &mut []); + let (values_in_lane, remaining) = remaining.split_at_mut(count); + self.remaining_values = remaining; + self.current_lane_idx += 1; + + Some(CsLaneMut { + minor_dim, + minor_indices, + values: values_in_lane, + }) + } else { + None + } + } +} + +/// Implement the methods common to both CsLane and CsLaneMut. See the documentation for the +/// methods delegated here by CsrMatrix and CscMatrix members for more information. +macro_rules! impl_cs_lane_common_methods { + ($name:ty) => { + impl<'a, T> $name { + #[inline] + pub fn minor_dim(&self) -> usize { + self.minor_dim + } + + #[inline] + pub fn nnz(&self) -> usize { + self.minor_indices.len() + } + + #[inline] + pub fn minor_indices(&self) -> &[usize] { + self.minor_indices + } + + #[inline] + pub fn values(&self) -> &[T] { + self.values + } + + #[inline] + pub fn get_entry(&self, global_col_index: usize) -> Option> { + get_entry_from_slices( + self.minor_dim, + self.minor_indices, + self.values, + global_col_index, + ) + } + } + }; +} + +impl_cs_lane_common_methods!(CsLane<'a, T>); +impl_cs_lane_common_methods!(CsLaneMut<'a, T>); + +impl<'a, T> CsLaneMut<'a, T> { + pub fn values_mut(&mut self) -> &mut [T] { + self.values + } + + pub fn indices_and_values_mut(&mut self) -> (&[usize], &mut [T]) { + (self.minor_indices, self.values) + } + + pub fn get_entry_mut(&mut self, global_minor_index: usize) -> Option> { + get_mut_entry_from_slices( + self.minor_dim, + self.minor_indices, + self.values, + global_minor_index, + ) + } +} + +/// Helper struct for working with uninitialized data in vectors. +/// TODO: This doesn't belong here. +struct UninitVec { + vec: Vec, + len: usize, +} + +impl UninitVec { + pub fn from_len(len: usize) -> Self { + Self { + vec: Vec::with_capacity(len), + // We need to store len separately, because for zero-sized types, + // Vec::with_capacity(len) does not give vec.capacity() == len + len, + } + } + + /// Sets the element associated with the given index to the provided value. + /// + /// Must be called exactly once per index, otherwise results in undefined behavior. + pub unsafe fn set(&mut self, index: usize, value: T) { + self.vec.as_mut_ptr().add(index).write(value) + } + + /// Marks the vector data as initialized by returning a full vector. + /// + /// It is undefined behavior to call this function unless *all* elements have been written to + /// exactly once. + pub unsafe fn assume_init(mut self) -> Vec { + self.vec.set_len(self.len); + self.vec + } +} + +/// Transposes the compressed format. +/// +/// This means that major and minor roles are switched. This is used for converting between CSR +/// and CSC formats. +pub fn transpose_cs( + major_dim: usize, + minor_dim: usize, + source_major_offsets: &[usize], + source_minor_indices: &[usize], + values: &[T], +) -> (Vec, Vec, Vec) +where + T: Scalar, +{ + assert_eq!(source_major_offsets.len(), major_dim + 1); + assert_eq!(source_minor_indices.len(), values.len()); + let nnz = values.len(); + + // Count the number of occurences of each minor index + let mut minor_counts = vec![0; minor_dim]; + for minor_idx in source_minor_indices { + minor_counts[*minor_idx] += 1; + } + convert_counts_to_offsets(&mut minor_counts); + let mut target_offsets = minor_counts; + target_offsets.push(nnz); + let mut target_indices = vec![usize::MAX; nnz]; + + // We have to use uninitialized storage, because we don't have any kind of "default" value + // available for `T`. Unfortunately this necessitates some small amount of unsafe code + let mut target_values = UninitVec::from_len(nnz); + + // Keep track of how many entries we have placed in each target major lane + let mut current_target_major_counts = vec![0; minor_dim]; + + for source_major_idx in 0..major_dim { + let source_lane_begin = source_major_offsets[source_major_idx]; + let source_lane_end = source_major_offsets[source_major_idx + 1]; + let source_lane_indices = &source_minor_indices[source_lane_begin..source_lane_end]; + let source_lane_values = &values[source_lane_begin..source_lane_end]; + + for (&source_minor_idx, val) in source_lane_indices.iter().zip(source_lane_values) { + // Compute the offset in the target data for this particular source entry + let target_lane_count = &mut current_target_major_counts[source_minor_idx]; + let entry_offset = target_offsets[source_minor_idx] + *target_lane_count; + target_indices[entry_offset] = source_major_idx; + unsafe { + target_values.set(entry_offset, val.inlined_clone()); + } + *target_lane_count += 1; + } + } + + // At this point, we should have written to each element in target_values exactly once, + // so initialization should be sound + let target_values = unsafe { target_values.assume_init() }; + (target_offsets, target_indices, target_values) +} + +pub fn convert_counts_to_offsets(counts: &mut [usize]) { + // Convert the counts to an offset + let mut offset = 0; + for i_offset in counts.iter_mut() { + let count = *i_offset; + *i_offset = offset; + offset += count; + } +} diff --git a/nalgebra-sparse/src/csc.rs b/nalgebra-sparse/src/csc.rs new file mode 100644 index 00000000..3a4d3f6f --- /dev/null +++ b/nalgebra-sparse/src/csc.rs @@ -0,0 +1,704 @@ +//! An implementation of the CSC sparse matrix format. +//! +//! This is the module-level documentation. See [`CscMatrix`] for the main documentation of the +//! CSC implementation. + +use crate::cs::{CsLane, CsLaneIter, CsLaneIterMut, CsLaneMut, CsMatrix}; +use crate::csr::CsrMatrix; +use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter}; +use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind}; + +use nalgebra::Scalar; +use num_traits::One; +use std::slice::{Iter, IterMut}; + +/// A CSC representation of a sparse matrix. +/// +/// The Compressed Sparse Column (CSC) format is well-suited as a general-purpose storage format +/// for many sparse matrix applications. +/// +/// # Usage +/// +/// ```rust +/// use nalgebra_sparse::csc::CscMatrix; +/// use nalgebra::{DMatrix, Matrix3x4}; +/// use matrixcompare::assert_matrix_eq; +/// +/// // The sparsity patterns of CSC matrices are immutable. This means that you cannot dynamically +/// // change the sparsity pattern of the matrix after it has been constructed. The easiest +/// // way to construct a CSC matrix is to first incrementally construct a COO matrix, +/// // and then convert it to CSC. +/// # use nalgebra_sparse::coo::CooMatrix; +/// # let coo = CooMatrix::::new(3, 3); +/// let csc = CscMatrix::from(&coo); +/// +/// // Alternatively, a CSC matrix can be constructed directly from raw CSC data. +/// // Here, we construct a 3x4 matrix +/// let col_offsets = vec![0, 1, 3, 4, 5]; +/// let row_indices = vec![0, 0, 2, 2, 0]; +/// let values = vec![1.0, 2.0, 3.0, 4.0, 5.0]; +/// +/// // The dense representation of the CSC data, for comparison +/// let dense = Matrix3x4::new(1.0, 2.0, 0.0, 5.0, +/// 0.0, 0.0, 0.0, 0.0, +/// 0.0, 3.0, 4.0, 0.0); +/// +/// // The constructor validates the raw CSC data and returns an error if it is invalid. +/// let csc = CscMatrix::try_from_csc_data(3, 4, col_offsets, row_indices, values) +/// .expect("CSC data must conform to format specifications"); +/// assert_matrix_eq!(csc, dense); +/// +/// // A third approach is to construct a CSC matrix from a pattern and values. Sometimes this is +/// // useful if the sparsity pattern is constructed separately from the values of the matrix. +/// let (pattern, values) = csc.into_pattern_and_values(); +/// let csc = CscMatrix::try_from_pattern_and_values(pattern, values) +/// .expect("The pattern and values must be compatible"); +/// +/// // Once we have constructed our matrix, we can use it for arithmetic operations together with +/// // other CSC matrices and dense matrices/vectors. +/// let x = csc; +/// # #[allow(non_snake_case)] +/// let xTx = x.transpose() * &x; +/// let z = DMatrix::from_fn(4, 8, |i, j| (i as f64) * (j as f64)); +/// let w = 3.0 * xTx * z; +/// +/// // Although the sparsity pattern of a CSC matrix cannot be changed, its values can. +/// // Here are two different ways to scale all values by a constant: +/// let mut x = x; +/// x *= 5.0; +/// x.values_mut().iter_mut().for_each(|x_i| *x_i *= 5.0); +/// ``` +/// +/// # Format +/// +/// An `m x n` sparse matrix with `nnz` non-zeros in CSC format is represented by the +/// following three arrays: +/// +/// - `col_offsets`, an array of integers with length `n + 1`. +/// - `row_indices`, an array of integers with length `nnz`. +/// - `values`, an array of values with length `nnz`. +/// +/// The relationship between the arrays is described below. +/// +/// - Each consecutive pair of entries `col_offsets[j] .. col_offsets[j + 1]` corresponds to an +/// offset range in `row_indices` that holds the row indices in column `j`. +/// - For an entry represented by the index `idx`, `row_indices[idx]` stores its column index and +/// `values[idx]` stores its value. +/// +/// The following invariants must be upheld and are enforced by the data structure: +/// +/// - `col_offsets[0] == 0` +/// - `col_offsets[m] == nnz` +/// - `col_offsets` is monotonically increasing. +/// - `0 <= row_indices[idx] < m` for all `idx < nnz`. +/// - The row indices associated with each column are monotonically increasing (see below). +/// +/// The CSC format is a standard sparse matrix format (see [Wikipedia article]). The format +/// represents the matrix in a column-by-column fashion. The entries associated with column `j` are +/// determined as follows: +/// +/// ```rust +/// # let col_offsets: Vec = vec![0, 0]; +/// # let row_indices: Vec = vec![]; +/// # let values: Vec = vec![]; +/// # let j = 0; +/// let range = col_offsets[j] .. col_offsets[j + 1]; +/// let col_j_rows = &row_indices[range.clone()]; +/// let col_j_vals = &values[range]; +/// +/// // For each pair (i, v) in (col_j_rows, col_j_vals), we obtain a corresponding entry +/// // (i, j, v) in the matrix. +/// assert_eq!(col_j_rows.len(), col_j_vals.len()); +/// ``` +/// +/// In the above example, for each column `j`, the row indices `col_j_cols` must appear in +/// monotonically increasing order. In other words, they must be *sorted*. This criterion is not +/// standard among all sparse matrix libraries, but we enforce this property as it is a crucial +/// assumption for both correctness and performance for many algorithms. +/// +/// Note that the CSR and CSC formats are essentially identical, except that CSC stores the matrix +/// column-by-column instead of row-by-row like CSR. +/// +/// [Wikipedia article]: https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CscMatrix { + // Cols are major, rows are minor in the sparsity pattern + pub(crate) cs: CsMatrix, +} + +impl CscMatrix { + /// Constructs a CSC representation of the (square) `n x n` identity matrix. + #[inline] + pub fn identity(n: usize) -> Self + where + T: Scalar + One, + { + Self { + cs: CsMatrix::identity(n), + } + } + + /// Create a zero CSC matrix with no explicitly stored entries. + pub fn zeros(nrows: usize, ncols: usize) -> Self { + Self { + cs: CsMatrix::new(ncols, nrows), + } + } + + /// Try to construct a CSC matrix from raw CSC data. + /// + /// It is assumed that each column contains unique and sorted row indices that are in + /// bounds with respect to the number of rows in the matrix. If this is not the case, + /// an error is returned to indicate the failure. + /// + /// An error is returned if the data given does not conform to the CSC storage format. + /// See the documentation for [CscMatrix](struct.CscMatrix.html) for more information. + pub fn try_from_csc_data( + num_rows: usize, + num_cols: usize, + col_offsets: Vec, + row_indices: Vec, + values: Vec, + ) -> Result { + let pattern = SparsityPattern::try_from_offsets_and_indices( + num_cols, + num_rows, + col_offsets, + row_indices, + ) + .map_err(pattern_format_error_to_csc_error)?; + Self::try_from_pattern_and_values(pattern, values) + } + + /// Try to construct a CSC matrix from a sparsity pattern and associated non-zero values. + /// + /// Returns an error if the number of values does not match the number of minor indices + /// in the pattern. + pub fn try_from_pattern_and_values( + pattern: SparsityPattern, + values: Vec, + ) -> Result { + if pattern.nnz() == values.len() { + Ok(Self { + cs: CsMatrix::from_pattern_and_values(pattern, values), + }) + } else { + Err(SparseFormatError::from_kind_and_msg( + SparseFormatErrorKind::InvalidStructure, + "Number of values and row indices must be the same", + )) + } + } + + /// The number of rows in the matrix. + #[inline] + pub fn nrows(&self) -> usize { + self.cs.pattern().minor_dim() + } + + /// The number of columns in the matrix. + #[inline] + pub fn ncols(&self) -> usize { + self.cs.pattern().major_dim() + } + + /// The number of non-zeros in the matrix. + /// + /// Note that this corresponds to the number of explicitly stored entries, *not* the actual + /// number of algebraically zero entries in the matrix. Explicitly stored entries can still + /// be zero. Corresponds to the number of entries in the sparsity pattern. + #[inline] + pub fn nnz(&self) -> usize { + self.pattern().nnz() + } + + /// The column offsets defining part of the CSC format. + #[inline] + pub fn col_offsets(&self) -> &[usize] { + self.pattern().major_offsets() + } + + /// The row indices defining part of the CSC format. + #[inline] + pub fn row_indices(&self) -> &[usize] { + self.pattern().minor_indices() + } + + /// The non-zero values defining part of the CSC format. + #[inline] + pub fn values(&self) -> &[T] { + self.cs.values() + } + + /// Mutable access to the non-zero values. + #[inline] + pub fn values_mut(&mut self) -> &mut [T] { + self.cs.values_mut() + } + + /// An iterator over non-zero triplets (i, j, v). + /// + /// The iteration happens in column-major fashion, meaning that j increases monotonically, + /// and i increases monotonically within each row. + /// + /// Examples + /// -------- + /// ``` + /// # use nalgebra_sparse::csc::CscMatrix; + /// let col_offsets = vec![0, 2, 3, 4]; + /// let row_indices = vec![0, 2, 1, 0]; + /// let values = vec![1, 3, 2, 4]; + /// let mut csc = CscMatrix::try_from_csc_data(4, 3, col_offsets, row_indices, values) + /// .unwrap(); + /// + /// let triplets: Vec<_> = csc.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); + /// assert_eq!(triplets, vec![(0, 0, 1), (2, 0, 3), (1, 1, 2), (0, 2, 4)]); + /// ``` + pub fn triplet_iter(&self) -> CscTripletIter { + CscTripletIter { + pattern_iter: self.pattern().entries(), + values_iter: self.values().iter(), + } + } + + /// A mutable iterator over non-zero triplets (i, j, v). + /// + /// Iteration happens in the same order as for [triplet_iter](#method.triplet_iter). + /// + /// Examples + /// -------- + /// ``` + /// # use nalgebra_sparse::csc::CscMatrix; + /// let col_offsets = vec![0, 2, 3, 4]; + /// let row_indices = vec![0, 2, 1, 0]; + /// let values = vec![1, 3, 2, 4]; + /// // Using the same data as in the `triplet_iter` example + /// let mut csc = CscMatrix::try_from_csc_data(4, 3, col_offsets, row_indices, values) + /// .unwrap(); + /// + /// // Zero out lower-triangular terms + /// csc.triplet_iter_mut() + /// .filter(|(i, j, _)| j < i) + /// .for_each(|(_, _, v)| *v = 0); + /// + /// let triplets: Vec<_> = csc.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); + /// assert_eq!(triplets, vec![(0, 0, 1), (2, 0, 0), (1, 1, 2), (0, 2, 4)]); + /// ``` + pub fn triplet_iter_mut(&mut self) -> CscTripletIterMut { + let (pattern, values) = self.cs.pattern_and_values_mut(); + CscTripletIterMut { + pattern_iter: pattern.entries(), + values_mut_iter: values.iter_mut(), + } + } + + /// Return the column at the given column index. + /// + /// Panics + /// ------ + /// Panics if column index is out of bounds. + #[inline] + pub fn col(&self, index: usize) -> CscCol { + self.get_col(index).expect("Row index must be in bounds") + } + + /// Mutable column access for the given column index. + /// + /// Panics + /// ------ + /// Panics if column index is out of bounds. + #[inline] + pub fn col_mut(&mut self, index: usize) -> CscColMut { + self.get_col_mut(index) + .expect("Row index must be in bounds") + } + + /// Return the column at the given column index, or `None` if out of bounds. + #[inline] + pub fn get_col(&self, index: usize) -> Option> { + self.cs.get_lane(index).map(|lane| CscCol { lane }) + } + + /// Mutable column access for the given column index, or `None` if out of bounds. + #[inline] + pub fn get_col_mut(&mut self, index: usize) -> Option> { + self.cs.get_lane_mut(index).map(|lane| CscColMut { lane }) + } + + /// An iterator over columns in the matrix. + pub fn col_iter(&self) -> CscColIter { + CscColIter { + lane_iter: CsLaneIter::new(self.pattern(), self.values()), + } + } + + /// A mutable iterator over columns in the matrix. + pub fn col_iter_mut(&mut self) -> CscColIterMut { + let (pattern, values) = self.cs.pattern_and_values_mut(); + CscColIterMut { + lane_iter: CsLaneIterMut::new(pattern, values), + } + } + + /// Disassembles the CSC matrix into its underlying offset, index and value arrays. + /// + /// If the matrix contains the sole reference to the sparsity pattern, + /// then the data is returned as-is. Otherwise, the sparsity pattern is cloned. + /// + /// Examples + /// -------- + /// + /// ``` + /// # use nalgebra_sparse::csc::CscMatrix; + /// let col_offsets = vec![0, 2, 3, 4]; + /// let row_indices = vec![0, 2, 1, 0]; + /// let values = vec![1, 3, 2, 4]; + /// let mut csc = CscMatrix::try_from_csc_data( + /// 4, + /// 3, + /// col_offsets.clone(), + /// row_indices.clone(), + /// values.clone()) + /// .unwrap(); + /// let (col_offsets2, row_indices2, values2) = csc.disassemble(); + /// assert_eq!(col_offsets2, col_offsets); + /// assert_eq!(row_indices2, row_indices); + /// assert_eq!(values2, values); + /// ``` + pub fn disassemble(self) -> (Vec, Vec, Vec) { + self.cs.disassemble() + } + + /// Returns the sparsity pattern and values associated with this matrix. + pub fn into_pattern_and_values(self) -> (SparsityPattern, Vec) { + self.cs.into_pattern_and_values() + } + + /// Returns a reference to the sparsity pattern and a mutable reference to the values. + #[inline] + pub fn pattern_and_values_mut(&mut self) -> (&SparsityPattern, &mut [T]) { + self.cs.pattern_and_values_mut() + } + + /// Returns a reference to the underlying sparsity pattern. + pub fn pattern(&self) -> &SparsityPattern { + self.cs.pattern() + } + + /// Reinterprets the CSC matrix as its transpose represented by a CSR matrix. + /// + /// This operation does not touch the CSC data, and is effectively a no-op. + pub fn transpose_as_csr(self) -> CsrMatrix { + let (pattern, values) = self.cs.take_pattern_and_values(); + CsrMatrix::try_from_pattern_and_values(pattern, values).unwrap() + } + + /// Returns an entry for the given row/col indices, or `None` if the indices are out of bounds. + /// + /// Each call to this function incurs the cost of a binary search among the explicitly + /// stored row entries for the given column. + pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { + self.cs.get_entry(col_index, row_index) + } + + /// Returns a mutable entry for the given row/col indices, or `None` if the indices are out + /// of bounds. + /// + /// Each call to this function incurs the cost of a binary search among the explicitly + /// stored row entries for the given column. + pub fn get_entry_mut( + &mut self, + row_index: usize, + col_index: usize, + ) -> Option> { + self.cs.get_entry_mut(col_index, row_index) + } + + /// Returns an entry for the given row/col indices. + /// + /// Same as `get_entry`, except that it directly panics upon encountering row/col indices + /// out of bounds. + /// + /// Panics + /// ------ + /// Panics if `row_index` or `col_index` is out of bounds. + pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry { + self.get_entry(row_index, col_index) + .expect("Out of bounds matrix indices encountered") + } + + /// Returns a mutable entry for the given row/col indices. + /// + /// Same as `get_entry_mut`, except that it directly panics upon encountering row/col indices + /// out of bounds. + /// + /// Panics + /// ------ + /// Panics if `row_index` or `col_index` is out of bounds. + pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut { + self.get_entry_mut(row_index, col_index) + .expect("Out of bounds matrix indices encountered") + } + + /// Returns a triplet of slices `(row_offsets, col_indices, values)` that make up the CSC data. + pub fn csc_data(&self) -> (&[usize], &[usize], &[T]) { + self.cs.cs_data() + } + + /// Returns a triplet of slices `(row_offsets, col_indices, values)` that make up the CSC data, + /// where the `values` array is mutable. + pub fn csc_data_mut(&mut self) -> (&[usize], &[usize], &mut [T]) { + self.cs.cs_data_mut() + } + + /// Creates a sparse matrix that contains only the explicit entries decided by the + /// given predicate. + pub fn filter

(&self, predicate: P) -> Self + where + T: Clone, + P: Fn(usize, usize, &T) -> bool, + { + // Note: Predicate uses (row, col, value), so we have to switch around since + // cs uses (major, minor, value) + Self { + cs: self + .cs + .filter(|col_idx, row_idx, v| predicate(row_idx, col_idx, v)), + } + } + + /// Returns a new matrix representing the upper triangular part of this matrix. + /// + /// The result includes the diagonal of the matrix. + pub fn upper_triangle(&self) -> Self + where + T: Clone, + { + self.filter(|i, j, _| i <= j) + } + + /// Returns a new matrix representing the lower triangular part of this matrix. + /// + /// The result includes the diagonal of the matrix. + pub fn lower_triangle(&self) -> Self + where + T: Clone, + { + self.filter(|i, j, _| i >= j) + } + + /// Returns the diagonal of the matrix as a sparse matrix. + pub fn diagonal_as_csc(&self) -> Self + where + T: Clone, + { + Self { + cs: self.cs.diagonal_as_matrix(), + } + } + + /// Compute the transpose of the matrix. + pub fn transpose(&self) -> CscMatrix + where + T: Scalar, + { + CsrMatrix::from(self).transpose_as_csc() + } +} + +/// Convert pattern format errors into more meaningful CSC-specific errors. +/// +/// This ensures that the terminology is consistent: we are talking about rows and columns, +/// not lanes, major and minor dimensions. +fn pattern_format_error_to_csc_error(err: SparsityPatternFormatError) -> SparseFormatError { + use SparseFormatError as E; + use SparseFormatErrorKind as K; + use SparsityPatternFormatError::DuplicateEntry as PatternDuplicateEntry; + use SparsityPatternFormatError::*; + + match err { + InvalidOffsetArrayLength => E::from_kind_and_msg( + K::InvalidStructure, + "Length of col offset array is not equal to ncols + 1.", + ), + InvalidOffsetFirstLast => E::from_kind_and_msg( + K::InvalidStructure, + "First or last col offset is inconsistent with format specification.", + ), + NonmonotonicOffsets => E::from_kind_and_msg( + K::InvalidStructure, + "Col offsets are not monotonically increasing.", + ), + NonmonotonicMinorIndices => E::from_kind_and_msg( + K::InvalidStructure, + "Row indices are not monotonically increasing (sorted) within each column.", + ), + MinorIndexOutOfBounds => { + E::from_kind_and_msg(K::IndexOutOfBounds, "Row indices are out of bounds.") + } + PatternDuplicateEntry => { + E::from_kind_and_msg(K::DuplicateEntry, "Matrix data contains duplicate entries.") + } + } +} + +/// Iterator type for iterating over triplets in a CSC matrix. +#[derive(Debug)] +pub struct CscTripletIter<'a, T> { + pattern_iter: SparsityPatternIter<'a>, + values_iter: Iter<'a, T>, +} + +impl<'a, T: Clone> CscTripletIter<'a, T> { + /// Adapts the triplet iterator to return owned values. + /// + /// The triplet iterator returns references to the values. This method adapts the iterator + /// so that the values are cloned. + #[inline] + pub fn cloned_values(self) -> impl 'a + Iterator { + self.map(|(i, j, v)| (i, j, v.clone())) + } +} + +impl<'a, T> Iterator for CscTripletIter<'a, T> { + type Item = (usize, usize, &'a T); + + fn next(&mut self) -> Option { + let next_entry = self.pattern_iter.next(); + let next_value = self.values_iter.next(); + + match (next_entry, next_value) { + (Some((i, j)), Some(v)) => Some((j, i, v)), + _ => None, + } + } +} + +/// Iterator type for mutably iterating over triplets in a CSC matrix. +#[derive(Debug)] +pub struct CscTripletIterMut<'a, T> { + pattern_iter: SparsityPatternIter<'a>, + values_mut_iter: IterMut<'a, T>, +} + +impl<'a, T> Iterator for CscTripletIterMut<'a, T> { + type Item = (usize, usize, &'a mut T); + + #[inline] + fn next(&mut self) -> Option { + let next_entry = self.pattern_iter.next(); + let next_value = self.values_mut_iter.next(); + + match (next_entry, next_value) { + (Some((i, j)), Some(v)) => Some((j, i, v)), + _ => None, + } + } +} + +/// An immutable representation of a column in a CSC matrix. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CscCol<'a, T> { + lane: CsLane<'a, T>, +} + +/// A mutable representation of a column in a CSC matrix. +/// +/// Note that only explicitly stored entries can be mutated. The sparsity pattern belonging +/// to the column cannot be modified. +#[derive(Debug, PartialEq, Eq)] +pub struct CscColMut<'a, T> { + lane: CsLaneMut<'a, T>, +} + +/// Implement the methods common to both CscCol and CscColMut +macro_rules! impl_csc_col_common_methods { + ($name:ty) => { + impl<'a, T> $name { + /// The number of global rows in the column. + #[inline] + pub fn nrows(&self) -> usize { + self.lane.minor_dim() + } + + /// The number of non-zeros in this column. + #[inline] + pub fn nnz(&self) -> usize { + self.lane.nnz() + } + + /// The row indices corresponding to explicitly stored entries in this column. + #[inline] + pub fn row_indices(&self) -> &[usize] { + self.lane.minor_indices() + } + + /// The values corresponding to explicitly stored entries in this column. + #[inline] + pub fn values(&self) -> &[T] { + self.lane.values() + } + + /// Returns an entry for the given global row index. + /// + /// Each call to this function incurs the cost of a binary search among the explicitly + /// stored row entries. + pub fn get_entry(&self, global_row_index: usize) -> Option> { + self.lane.get_entry(global_row_index) + } + } + }; +} + +impl_csc_col_common_methods!(CscCol<'a, T>); +impl_csc_col_common_methods!(CscColMut<'a, T>); + +impl<'a, T> CscColMut<'a, T> { + /// Mutable access to the values corresponding to explicitly stored entries in this column. + pub fn values_mut(&mut self) -> &mut [T] { + self.lane.values_mut() + } + + /// Provides simultaneous access to row indices and mutable values corresponding to the + /// explicitly stored entries in this column. + /// + /// This method primarily facilitates low-level access for methods that process data stored + /// in CSC format directly. + pub fn rows_and_values_mut(&mut self) -> (&[usize], &mut [T]) { + self.lane.indices_and_values_mut() + } + + /// Returns a mutable entry for the given global row index. + pub fn get_entry_mut(&mut self, global_row_index: usize) -> Option> { + self.lane.get_entry_mut(global_row_index) + } +} + +/// Column iterator for [CscMatrix](struct.CscMatrix.html). +pub struct CscColIter<'a, T> { + lane_iter: CsLaneIter<'a, T>, +} + +impl<'a, T> Iterator for CscColIter<'a, T> { + type Item = CscCol<'a, T>; + + fn next(&mut self) -> Option { + self.lane_iter.next().map(|lane| CscCol { lane }) + } +} + +/// Mutable column iterator for [CscMatrix](struct.CscMatrix.html). +pub struct CscColIterMut<'a, T> { + lane_iter: CsLaneIterMut<'a, T>, +} + +impl<'a, T> Iterator for CscColIterMut<'a, T> +where + T: 'a, +{ + type Item = CscColMut<'a, T>; + + fn next(&mut self) -> Option { + self.lane_iter.next().map(|lane| CscColMut { lane }) + } +} diff --git a/nalgebra-sparse/src/csr.rs b/nalgebra-sparse/src/csr.rs new file mode 100644 index 00000000..ded189eb --- /dev/null +++ b/nalgebra-sparse/src/csr.rs @@ -0,0 +1,708 @@ +//! An implementation of the CSR sparse matrix format. +//! +//! This is the module-level documentation. See [`CsrMatrix`] for the main documentation of the +//! CSC implementation. +use crate::cs::{CsLane, CsLaneIter, CsLaneIterMut, CsLaneMut, CsMatrix}; +use crate::csc::CscMatrix; +use crate::pattern::{SparsityPattern, SparsityPatternFormatError, SparsityPatternIter}; +use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKind}; + +use nalgebra::Scalar; +use num_traits::One; + +use std::slice::{Iter, IterMut}; + +/// A CSR representation of a sparse matrix. +/// +/// The Compressed Sparse Row (CSR) format is well-suited as a general-purpose storage format +/// for many sparse matrix applications. +/// +/// # Usage +/// +/// ```rust +/// use nalgebra_sparse::csr::CsrMatrix; +/// use nalgebra::{DMatrix, Matrix3x4}; +/// use matrixcompare::assert_matrix_eq; +/// +/// // The sparsity patterns of CSR matrices are immutable. This means that you cannot dynamically +/// // change the sparsity pattern of the matrix after it has been constructed. The easiest +/// // way to construct a CSR matrix is to first incrementally construct a COO matrix, +/// // and then convert it to CSR. +/// # use nalgebra_sparse::coo::CooMatrix; +/// # let coo = CooMatrix::::new(3, 3); +/// let csr = CsrMatrix::from(&coo); +/// +/// // Alternatively, a CSR matrix can be constructed directly from raw CSR data. +/// // Here, we construct a 3x4 matrix +/// let row_offsets = vec![0, 3, 3, 5]; +/// let col_indices = vec![0, 1, 3, 1, 2]; +/// let values = vec![1.0, 2.0, 3.0, 4.0, 5.0]; +/// +/// // The dense representation of the CSR data, for comparison +/// let dense = Matrix3x4::new(1.0, 2.0, 0.0, 3.0, +/// 0.0, 0.0, 0.0, 0.0, +/// 0.0, 4.0, 5.0, 0.0); +/// +/// // The constructor validates the raw CSR data and returns an error if it is invalid. +/// let csr = CsrMatrix::try_from_csr_data(3, 4, row_offsets, col_indices, values) +/// .expect("CSR data must conform to format specifications"); +/// assert_matrix_eq!(csr, dense); +/// +/// // A third approach is to construct a CSR matrix from a pattern and values. Sometimes this is +/// // useful if the sparsity pattern is constructed separately from the values of the matrix. +/// let (pattern, values) = csr.into_pattern_and_values(); +/// let csr = CsrMatrix::try_from_pattern_and_values(pattern, values) +/// .expect("The pattern and values must be compatible"); +/// +/// // Once we have constructed our matrix, we can use it for arithmetic operations together with +/// // other CSR matrices and dense matrices/vectors. +/// let x = csr; +/// # #[allow(non_snake_case)] +/// let xTx = x.transpose() * &x; +/// let z = DMatrix::from_fn(4, 8, |i, j| (i as f64) * (j as f64)); +/// let w = 3.0 * xTx * z; +/// +/// // Although the sparsity pattern of a CSR matrix cannot be changed, its values can. +/// // Here are two different ways to scale all values by a constant: +/// let mut x = x; +/// x *= 5.0; +/// x.values_mut().iter_mut().for_each(|x_i| *x_i *= 5.0); +/// ``` +/// +/// # Format +/// +/// An `m x n` sparse matrix with `nnz` non-zeros in CSR format is represented by the +/// following three arrays: +/// +/// - `row_offsets`, an array of integers with length `m + 1`. +/// - `col_indices`, an array of integers with length `nnz`. +/// - `values`, an array of values with length `nnz`. +/// +/// The relationship between the arrays is described below. +/// +/// - Each consecutive pair of entries `row_offsets[i] .. row_offsets[i + 1]` corresponds to an +/// offset range in `col_indices` that holds the column indices in row `i`. +/// - For an entry represented by the index `idx`, `col_indices[idx]` stores its column index and +/// `values[idx]` stores its value. +/// +/// The following invariants must be upheld and are enforced by the data structure: +/// +/// - `row_offsets[0] == 0` +/// - `row_offsets[m] == nnz` +/// - `row_offsets` is monotonically increasing. +/// - `0 <= col_indices[idx] < n` for all `idx < nnz`. +/// - The column indices associated with each row are monotonically increasing (see below). +/// +/// The CSR format is a standard sparse matrix format (see [Wikipedia article]). The format +/// represents the matrix in a row-by-row fashion. The entries associated with row `i` are +/// determined as follows: +/// +/// ```rust +/// # let row_offsets: Vec = vec![0, 0]; +/// # let col_indices: Vec = vec![]; +/// # let values: Vec = vec![]; +/// # let i = 0; +/// let range = row_offsets[i] .. row_offsets[i + 1]; +/// let row_i_cols = &col_indices[range.clone()]; +/// let row_i_vals = &values[range]; +/// +/// // For each pair (j, v) in (row_i_cols, row_i_vals), we obtain a corresponding entry +/// // (i, j, v) in the matrix. +/// assert_eq!(row_i_cols.len(), row_i_vals.len()); +/// ``` +/// +/// In the above example, for each row `i`, the column indices `row_i_cols` must appear in +/// monotonically increasing order. In other words, they must be *sorted*. This criterion is not +/// standard among all sparse matrix libraries, but we enforce this property as it is a crucial +/// assumption for both correctness and performance for many algorithms. +/// +/// Note that the CSR and CSC formats are essentially identical, except that CSC stores the matrix +/// column-by-column instead of row-by-row like CSR. +/// +/// [Wikipedia article]: https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CsrMatrix { + // Rows are major, cols are minor in the sparsity pattern + pub(crate) cs: CsMatrix, +} + +impl CsrMatrix { + /// Constructs a CSR representation of the (square) `n x n` identity matrix. + #[inline] + pub fn identity(n: usize) -> Self + where + T: Scalar + One, + { + Self { + cs: CsMatrix::identity(n), + } + } + + /// Create a zero CSR matrix with no explicitly stored entries. + pub fn zeros(nrows: usize, ncols: usize) -> Self { + Self { + cs: CsMatrix::new(nrows, ncols), + } + } + + /// Try to construct a CSR matrix from raw CSR data. + /// + /// It is assumed that each row contains unique and sorted column indices that are in + /// bounds with respect to the number of columns in the matrix. If this is not the case, + /// an error is returned to indicate the failure. + /// + /// An error is returned if the data given does not conform to the CSR storage format. + /// See the documentation for [CsrMatrix](struct.CsrMatrix.html) for more information. + pub fn try_from_csr_data( + num_rows: usize, + num_cols: usize, + row_offsets: Vec, + col_indices: Vec, + values: Vec, + ) -> Result { + let pattern = SparsityPattern::try_from_offsets_and_indices( + num_rows, + num_cols, + row_offsets, + col_indices, + ) + .map_err(pattern_format_error_to_csr_error)?; + Self::try_from_pattern_and_values(pattern, values) + } + + /// Try to construct a CSR matrix from a sparsity pattern and associated non-zero values. + /// + /// Returns an error if the number of values does not match the number of minor indices + /// in the pattern. + pub fn try_from_pattern_and_values( + pattern: SparsityPattern, + values: Vec, + ) -> Result { + if pattern.nnz() == values.len() { + Ok(Self { + cs: CsMatrix::from_pattern_and_values(pattern, values), + }) + } else { + Err(SparseFormatError::from_kind_and_msg( + SparseFormatErrorKind::InvalidStructure, + "Number of values and column indices must be the same", + )) + } + } + + /// The number of rows in the matrix. + #[inline] + pub fn nrows(&self) -> usize { + self.cs.pattern().major_dim() + } + + /// The number of columns in the matrix. + #[inline] + pub fn ncols(&self) -> usize { + self.cs.pattern().minor_dim() + } + + /// The number of non-zeros in the matrix. + /// + /// Note that this corresponds to the number of explicitly stored entries, *not* the actual + /// number of algebraically zero entries in the matrix. Explicitly stored entries can still + /// be zero. Corresponds to the number of entries in the sparsity pattern. + #[inline] + pub fn nnz(&self) -> usize { + self.cs.pattern().nnz() + } + + /// The row offsets defining part of the CSR format. + #[inline] + pub fn row_offsets(&self) -> &[usize] { + let (offsets, _, _) = self.cs.cs_data(); + offsets + } + + /// The column indices defining part of the CSR format. + #[inline] + pub fn col_indices(&self) -> &[usize] { + let (_, indices, _) = self.cs.cs_data(); + indices + } + + /// The non-zero values defining part of the CSR format. + #[inline] + pub fn values(&self) -> &[T] { + self.cs.values() + } + + /// Mutable access to the non-zero values. + #[inline] + pub fn values_mut(&mut self) -> &mut [T] { + self.cs.values_mut() + } + + /// An iterator over non-zero triplets (i, j, v). + /// + /// The iteration happens in row-major fashion, meaning that i increases monotonically, + /// and j increases monotonically within each row. + /// + /// Examples + /// -------- + /// ``` + /// # use nalgebra_sparse::csr::CsrMatrix; + /// let row_offsets = vec![0, 2, 3, 4]; + /// let col_indices = vec![0, 2, 1, 0]; + /// let values = vec![1, 2, 3, 4]; + /// let mut csr = CsrMatrix::try_from_csr_data(3, 4, row_offsets, col_indices, values) + /// .unwrap(); + /// + /// let triplets: Vec<_> = csr.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); + /// assert_eq!(triplets, vec![(0, 0, 1), (0, 2, 2), (1, 1, 3), (2, 0, 4)]); + /// ``` + pub fn triplet_iter(&self) -> CsrTripletIter { + CsrTripletIter { + pattern_iter: self.pattern().entries(), + values_iter: self.values().iter(), + } + } + + /// A mutable iterator over non-zero triplets (i, j, v). + /// + /// Iteration happens in the same order as for [triplet_iter](#method.triplet_iter). + /// + /// Examples + /// -------- + /// ``` + /// # use nalgebra_sparse::csr::CsrMatrix; + /// # let row_offsets = vec![0, 2, 3, 4]; + /// # let col_indices = vec![0, 2, 1, 0]; + /// # let values = vec![1, 2, 3, 4]; + /// // Using the same data as in the `triplet_iter` example + /// let mut csr = CsrMatrix::try_from_csr_data(3, 4, row_offsets, col_indices, values) + /// .unwrap(); + /// + /// // Zero out lower-triangular terms + /// csr.triplet_iter_mut() + /// .filter(|(i, j, _)| j < i) + /// .for_each(|(_, _, v)| *v = 0); + /// + /// let triplets: Vec<_> = csr.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); + /// assert_eq!(triplets, vec![(0, 0, 1), (0, 2, 2), (1, 1, 3), (2, 0, 0)]); + /// ``` + pub fn triplet_iter_mut(&mut self) -> CsrTripletIterMut { + let (pattern, values) = self.cs.pattern_and_values_mut(); + CsrTripletIterMut { + pattern_iter: pattern.entries(), + values_mut_iter: values.iter_mut(), + } + } + + /// Return the row at the given row index. + /// + /// Panics + /// ------ + /// Panics if row index is out of bounds. + #[inline] + pub fn row(&self, index: usize) -> CsrRow { + self.get_row(index).expect("Row index must be in bounds") + } + + /// Mutable row access for the given row index. + /// + /// Panics + /// ------ + /// Panics if row index is out of bounds. + #[inline] + pub fn row_mut(&mut self, index: usize) -> CsrRowMut { + self.get_row_mut(index) + .expect("Row index must be in bounds") + } + + /// Return the row at the given row index, or `None` if out of bounds. + #[inline] + pub fn get_row(&self, index: usize) -> Option> { + self.cs.get_lane(index).map(|lane| CsrRow { lane }) + } + + /// Mutable row access for the given row index, or `None` if out of bounds. + #[inline] + pub fn get_row_mut(&mut self, index: usize) -> Option> { + self.cs.get_lane_mut(index).map(|lane| CsrRowMut { lane }) + } + + /// An iterator over rows in the matrix. + pub fn row_iter(&self) -> CsrRowIter { + CsrRowIter { + lane_iter: CsLaneIter::new(self.pattern(), self.values()), + } + } + + /// A mutable iterator over rows in the matrix. + pub fn row_iter_mut(&mut self) -> CsrRowIterMut { + let (pattern, values) = self.cs.pattern_and_values_mut(); + CsrRowIterMut { + lane_iter: CsLaneIterMut::new(pattern, values), + } + } + + /// Disassembles the CSR matrix into its underlying offset, index and value arrays. + /// + /// If the matrix contains the sole reference to the sparsity pattern, + /// then the data is returned as-is. Otherwise, the sparsity pattern is cloned. + /// + /// Examples + /// -------- + /// + /// ``` + /// # use nalgebra_sparse::csr::CsrMatrix; + /// let row_offsets = vec![0, 2, 3, 4]; + /// let col_indices = vec![0, 2, 1, 0]; + /// let values = vec![1, 2, 3, 4]; + /// let mut csr = CsrMatrix::try_from_csr_data( + /// 3, + /// 4, + /// row_offsets.clone(), + /// col_indices.clone(), + /// values.clone()) + /// .unwrap(); + /// let (row_offsets2, col_indices2, values2) = csr.disassemble(); + /// assert_eq!(row_offsets2, row_offsets); + /// assert_eq!(col_indices2, col_indices); + /// assert_eq!(values2, values); + /// ``` + pub fn disassemble(self) -> (Vec, Vec, Vec) { + self.cs.disassemble() + } + + /// Returns the sparsity pattern and values associated with this matrix. + pub fn into_pattern_and_values(self) -> (SparsityPattern, Vec) { + self.cs.into_pattern_and_values() + } + + /// Returns a reference to the sparsity pattern and a mutable reference to the values. + #[inline] + pub fn pattern_and_values_mut(&mut self) -> (&SparsityPattern, &mut [T]) { + self.cs.pattern_and_values_mut() + } + + /// Returns a reference to the underlying sparsity pattern. + pub fn pattern(&self) -> &SparsityPattern { + self.cs.pattern() + } + + /// Reinterprets the CSR matrix as its transpose represented by a CSC matrix. + /// + /// This operation does not touch the CSR data, and is effectively a no-op. + pub fn transpose_as_csc(self) -> CscMatrix { + let (pattern, values) = self.cs.take_pattern_and_values(); + CscMatrix::try_from_pattern_and_values(pattern, values).unwrap() + } + + /// Returns an entry for the given row/col indices, or `None` if the indices are out of bounds. + /// + /// Each call to this function incurs the cost of a binary search among the explicitly + /// stored column entries for the given row. + pub fn get_entry(&self, row_index: usize, col_index: usize) -> Option> { + self.cs.get_entry(row_index, col_index) + } + + /// Returns a mutable entry for the given row/col indices, or `None` if the indices are out + /// of bounds. + /// + /// Each call to this function incurs the cost of a binary search among the explicitly + /// stored column entries for the given row. + pub fn get_entry_mut( + &mut self, + row_index: usize, + col_index: usize, + ) -> Option> { + self.cs.get_entry_mut(row_index, col_index) + } + + /// Returns an entry for the given row/col indices. + /// + /// Same as `get_entry`, except that it directly panics upon encountering row/col indices + /// out of bounds. + /// + /// Panics + /// ------ + /// Panics if `row_index` or `col_index` is out of bounds. + pub fn index_entry(&self, row_index: usize, col_index: usize) -> SparseEntry { + self.get_entry(row_index, col_index) + .expect("Out of bounds matrix indices encountered") + } + + /// Returns a mutable entry for the given row/col indices. + /// + /// Same as `get_entry_mut`, except that it directly panics upon encountering row/col indices + /// out of bounds. + /// + /// Panics + /// ------ + /// Panics if `row_index` or `col_index` is out of bounds. + pub fn index_entry_mut(&mut self, row_index: usize, col_index: usize) -> SparseEntryMut { + self.get_entry_mut(row_index, col_index) + .expect("Out of bounds matrix indices encountered") + } + + /// Returns a triplet of slices `(row_offsets, col_indices, values)` that make up the CSR data. + pub fn csr_data(&self) -> (&[usize], &[usize], &[T]) { + self.cs.cs_data() + } + + /// Returns a triplet of slices `(row_offsets, col_indices, values)` that make up the CSR data, + /// where the `values` array is mutable. + pub fn csr_data_mut(&mut self) -> (&[usize], &[usize], &mut [T]) { + self.cs.cs_data_mut() + } + + /// Creates a sparse matrix that contains only the explicit entries decided by the + /// given predicate. + pub fn filter

(&self, predicate: P) -> Self + where + T: Clone, + P: Fn(usize, usize, &T) -> bool, + { + Self { + cs: self + .cs + .filter(|row_idx, col_idx, v| predicate(row_idx, col_idx, v)), + } + } + + /// Returns a new matrix representing the upper triangular part of this matrix. + /// + /// The result includes the diagonal of the matrix. + pub fn upper_triangle(&self) -> Self + where + T: Clone, + { + self.filter(|i, j, _| i <= j) + } + + /// Returns a new matrix representing the lower triangular part of this matrix. + /// + /// The result includes the diagonal of the matrix. + pub fn lower_triangle(&self) -> Self + where + T: Clone, + { + self.filter(|i, j, _| i >= j) + } + + /// Returns the diagonal of the matrix as a sparse matrix. + pub fn diagonal_as_csr(&self) -> Self + where + T: Clone, + { + Self { + cs: self.cs.diagonal_as_matrix(), + } + } + + /// Compute the transpose of the matrix. + pub fn transpose(&self) -> CsrMatrix + where + T: Scalar, + { + CscMatrix::from(self).transpose_as_csr() + } +} + +/// Convert pattern format errors into more meaningful CSR-specific errors. +/// +/// This ensures that the terminology is consistent: we are talking about rows and columns, +/// not lanes, major and minor dimensions. +fn pattern_format_error_to_csr_error(err: SparsityPatternFormatError) -> SparseFormatError { + use SparseFormatError as E; + use SparseFormatErrorKind as K; + use SparsityPatternFormatError::DuplicateEntry as PatternDuplicateEntry; + use SparsityPatternFormatError::*; + + match err { + InvalidOffsetArrayLength => E::from_kind_and_msg( + K::InvalidStructure, + "Length of row offset array is not equal to nrows + 1.", + ), + InvalidOffsetFirstLast => E::from_kind_and_msg( + K::InvalidStructure, + "First or last row offset is inconsistent with format specification.", + ), + NonmonotonicOffsets => E::from_kind_and_msg( + K::InvalidStructure, + "Row offsets are not monotonically increasing.", + ), + NonmonotonicMinorIndices => E::from_kind_and_msg( + K::InvalidStructure, + "Column indices are not monotonically increasing (sorted) within each row.", + ), + MinorIndexOutOfBounds => { + E::from_kind_and_msg(K::IndexOutOfBounds, "Column indices are out of bounds.") + } + PatternDuplicateEntry => { + E::from_kind_and_msg(K::DuplicateEntry, "Matrix data contains duplicate entries.") + } + } +} + +/// Iterator type for iterating over triplets in a CSR matrix. +#[derive(Debug)] +pub struct CsrTripletIter<'a, T> { + pattern_iter: SparsityPatternIter<'a>, + values_iter: Iter<'a, T>, +} + +impl<'a, T: Clone> CsrTripletIter<'a, T> { + /// Adapts the triplet iterator to return owned values. + /// + /// The triplet iterator returns references to the values. This method adapts the iterator + /// so that the values are cloned. + #[inline] + pub fn cloned_values(self) -> impl 'a + Iterator { + self.map(|(i, j, v)| (i, j, v.clone())) + } +} + +impl<'a, T> Iterator for CsrTripletIter<'a, T> { + type Item = (usize, usize, &'a T); + + fn next(&mut self) -> Option { + let next_entry = self.pattern_iter.next(); + let next_value = self.values_iter.next(); + + match (next_entry, next_value) { + (Some((i, j)), Some(v)) => Some((i, j, v)), + _ => None, + } + } +} + +/// Iterator type for mutably iterating over triplets in a CSR matrix. +#[derive(Debug)] +pub struct CsrTripletIterMut<'a, T> { + pattern_iter: SparsityPatternIter<'a>, + values_mut_iter: IterMut<'a, T>, +} + +impl<'a, T> Iterator for CsrTripletIterMut<'a, T> { + type Item = (usize, usize, &'a mut T); + + #[inline] + fn next(&mut self) -> Option { + let next_entry = self.pattern_iter.next(); + let next_value = self.values_mut_iter.next(); + + match (next_entry, next_value) { + (Some((i, j)), Some(v)) => Some((i, j, v)), + _ => None, + } + } +} + +/// An immutable representation of a row in a CSR matrix. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CsrRow<'a, T> { + lane: CsLane<'a, T>, +} + +/// A mutable representation of a row in a CSR matrix. +/// +/// Note that only explicitly stored entries can be mutated. The sparsity pattern belonging +/// to the row cannot be modified. +#[derive(Debug, PartialEq, Eq)] +pub struct CsrRowMut<'a, T> { + lane: CsLaneMut<'a, T>, +} + +/// Implement the methods common to both CsrRow and CsrRowMut +macro_rules! impl_csr_row_common_methods { + ($name:ty) => { + impl<'a, T> $name { + /// The number of global columns in the row. + #[inline] + pub fn ncols(&self) -> usize { + self.lane.minor_dim() + } + + /// The number of non-zeros in this row. + #[inline] + pub fn nnz(&self) -> usize { + self.lane.nnz() + } + + /// The column indices corresponding to explicitly stored entries in this row. + #[inline] + pub fn col_indices(&self) -> &[usize] { + self.lane.minor_indices() + } + + /// The values corresponding to explicitly stored entries in this row. + #[inline] + pub fn values(&self) -> &[T] { + self.lane.values() + } + + /// Returns an entry for the given global column index. + /// + /// Each call to this function incurs the cost of a binary search among the explicitly + /// stored column entries. + #[inline] + pub fn get_entry(&self, global_col_index: usize) -> Option> { + self.lane.get_entry(global_col_index) + } + } + }; +} + +impl_csr_row_common_methods!(CsrRow<'a, T>); +impl_csr_row_common_methods!(CsrRowMut<'a, T>); + +impl<'a, T> CsrRowMut<'a, T> { + /// Mutable access to the values corresponding to explicitly stored entries in this row. + #[inline] + pub fn values_mut(&mut self) -> &mut [T] { + self.lane.values_mut() + } + + /// Provides simultaneous access to column indices and mutable values corresponding to the + /// explicitly stored entries in this row. + /// + /// This method primarily facilitates low-level access for methods that process data stored + /// in CSR format directly. + #[inline] + pub fn cols_and_values_mut(&mut self) -> (&[usize], &mut [T]) { + self.lane.indices_and_values_mut() + } + + /// Returns a mutable entry for the given global column index. + #[inline] + pub fn get_entry_mut(&mut self, global_col_index: usize) -> Option> { + self.lane.get_entry_mut(global_col_index) + } +} + +/// Row iterator for [CsrMatrix](struct.CsrMatrix.html). +pub struct CsrRowIter<'a, T> { + lane_iter: CsLaneIter<'a, T>, +} + +impl<'a, T> Iterator for CsrRowIter<'a, T> { + type Item = CsrRow<'a, T>; + + fn next(&mut self) -> Option { + self.lane_iter.next().map(|lane| CsrRow { lane }) + } +} + +/// Mutable row iterator for [CsrMatrix](struct.CsrMatrix.html). +pub struct CsrRowIterMut<'a, T> { + lane_iter: CsLaneIterMut<'a, T>, +} + +impl<'a, T> Iterator for CsrRowIterMut<'a, T> +where + T: 'a, +{ + type Item = CsrRowMut<'a, T>; + + fn next(&mut self) -> Option { + self.lane_iter.next().map(|lane| CsrRowMut { lane }) + } +} diff --git a/nalgebra-sparse/src/factorization/cholesky.rs b/nalgebra-sparse/src/factorization/cholesky.rs new file mode 100644 index 00000000..a18761c9 --- /dev/null +++ b/nalgebra-sparse/src/factorization/cholesky.rs @@ -0,0 +1,373 @@ +use crate::csc::CscMatrix; +use crate::ops::serial::spsolve_csc_lower_triangular; +use crate::ops::Op; +use crate::pattern::SparsityPattern; +use core::{iter, mem}; +use nalgebra::{DMatrix, DMatrixSlice, DMatrixSliceMut, RealField, Scalar}; +use std::fmt::{Display, Formatter}; + +/// A symbolic sparse Cholesky factorization of a CSC matrix. +/// +/// The symbolic factorization computes the sparsity pattern of `L`, the Cholesky factor. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CscSymbolicCholesky { + // Pattern of the original matrix that was decomposed + m_pattern: SparsityPattern, + l_pattern: SparsityPattern, + // u in this context is L^T, so that M = L L^T + u_pattern: SparsityPattern, +} + +impl CscSymbolicCholesky { + /// Compute the symbolic factorization for a sparsity pattern belonging to a CSC matrix. + /// + /// The sparsity pattern must be symmetric. However, this is not enforced, and it is the + /// responsibility of the user to ensure that this property holds. + /// + /// # Panics + /// + /// Panics if the sparsity pattern is not square. + pub fn factor(pattern: SparsityPattern) -> Self { + assert_eq!( + pattern.major_dim(), + pattern.minor_dim(), + "Major and minor dimensions must be the same (square matrix)." + ); + let (l_pattern, u_pattern) = nonzero_pattern(&pattern); + Self { + m_pattern: pattern, + l_pattern, + u_pattern, + } + } + + /// The pattern of the Cholesky factor `L`. + pub fn l_pattern(&self) -> &SparsityPattern { + &self.l_pattern + } +} + +/// A sparse Cholesky factorization `A = L L^T` of a [`CscMatrix`]. +/// +/// The factor `L` is a sparse, lower-triangular matrix. See the article on [Wikipedia] for +/// more information. +/// +/// The implementation is a port of the `CsCholesky` implementation in `nalgebra`. It is similar +/// to Tim Davis' [`CSparse`]. The current implementation performs no fill-in reduction, and can +/// therefore be expected to produce much too dense Cholesky factors for many matrices. +/// It is therefore not currently recommended to use this implementation for serious projects. +/// +/// [`CSparse`]: https://epubs.siam.org/doi/book/10.1137/1.9780898718881 +/// [Wikipedia]: https://en.wikipedia.org/wiki/Cholesky_decomposition +// TODO: We should probably implement PartialEq/Eq, but in that case we'd probably need a +// custom implementation, due to the need to exclude the workspace arrays +#[derive(Debug, Clone)] +pub struct CscCholesky { + // Pattern of the original matrix + m_pattern: SparsityPattern, + l_factor: CscMatrix, + u_pattern: SparsityPattern, + work_x: Vec, + work_c: Vec, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +#[non_exhaustive] +/// Possible errors produced by the Cholesky factorization. +pub enum CholeskyError { + /// The matrix is not positive definite. + NotPositiveDefinite, +} + +impl Display for CholeskyError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "Matrix is not positive definite") + } +} + +impl std::error::Error for CholeskyError {} + +impl CscCholesky { + /// Computes the numerical Cholesky factorization associated with the given + /// symbolic factorization and the provided values. + /// + /// The values correspond to the non-zero values of the CSC matrix for which the + /// symbolic factorization was computed. + /// + /// # Errors + /// + /// Returns an error if the numerical factorization fails. This can occur if the matrix is not + /// symmetric positive definite. + /// + /// # Panics + /// + /// Panics if the number of values differ from the number of non-zeros of the sparsity pattern + /// of the matrix that was symbolically factored. + pub fn factor_numerical( + symbolic: CscSymbolicCholesky, + values: &[T], + ) -> Result { + assert_eq!( + symbolic.l_pattern.nnz(), + symbolic.u_pattern.nnz(), + "u is just the transpose of l, so should have the same nnz" + ); + + let l_nnz = symbolic.l_pattern.nnz(); + let l_values = vec![T::zero(); l_nnz]; + let l_factor = + CscMatrix::try_from_pattern_and_values(symbolic.l_pattern, l_values).unwrap(); + + let (nrows, ncols) = (l_factor.nrows(), l_factor.ncols()); + + let mut factorization = CscCholesky { + m_pattern: symbolic.m_pattern, + l_factor, + u_pattern: symbolic.u_pattern, + work_x: vec![T::zero(); nrows], + // Fill with MAX so that things hopefully totally fail if values are not + // overwritten. Might be easier to debug this way + work_c: vec![usize::MAX, ncols], + }; + + factorization.refactor(values)?; + Ok(factorization) + } + + /// Computes the Cholesky factorization of the provided matrix. + /// + /// The matrix must be symmetric positive definite. Symmetry is not checked, and it is up + /// to the user to enforce this property. + /// + /// # Errors + /// + /// Returns an error if the numerical factorization fails. This can occur if the matrix is not + /// symmetric positive definite. + /// + /// # Panics + /// + /// Panics if the matrix is not square. + pub fn factor(matrix: &CscMatrix) -> Result { + let symbolic = CscSymbolicCholesky::factor(matrix.pattern().clone()); + Self::factor_numerical(symbolic, matrix.values()) + } + + /// Re-computes the factorization for a new set of non-zero values. + /// + /// This is useful when the values of a matrix changes, but the sparsity pattern remains + /// constant. + /// + /// # Errors + /// + /// Returns an error if the numerical factorization fails. This can occur if the matrix is not + /// symmetric positive definite. + /// + /// # Panics + /// + /// Panics if the number of values does not match the number of non-zeros in the sparsity + /// pattern. + pub fn refactor(&mut self, values: &[T]) -> Result<(), CholeskyError> { + self.decompose_left_looking(values) + } + + /// Returns a reference to the Cholesky factor `L`. + pub fn l(&self) -> &CscMatrix { + &self.l_factor + } + + /// Returns the Cholesky factor `L`. + pub fn take_l(self) -> CscMatrix { + self.l_factor + } + + /// Perform a numerical left-looking cholesky decomposition of a matrix with the same structure as the + /// one used to initialize `self`, but with different non-zero values provided by `values`. + fn decompose_left_looking(&mut self, values: &[T]) -> Result<(), CholeskyError> { + assert!( + values.len() >= self.m_pattern.nnz(), + // TODO: Improve error message + "The set of values is too small." + ); + + let n = self.l_factor.nrows(); + + // Reset `work_c` to the column pointers of `l`. + self.work_c.clear(); + self.work_c.extend_from_slice(self.l_factor.col_offsets()); + + unsafe { + for k in 0..n { + // Scatter the k-th column of the original matrix with the values provided. + let range_begin = *self.m_pattern.major_offsets().get_unchecked(k); + let range_end = *self.m_pattern.major_offsets().get_unchecked(k + 1); + let range_k = range_begin..range_end; + + *self.work_x.get_unchecked_mut(k) = T::zero(); + for p in range_k.clone() { + let irow = *self.m_pattern.minor_indices().get_unchecked(p); + + if irow >= k { + *self.work_x.get_unchecked_mut(irow) = *values.get_unchecked(p); + } + } + + for &j in self.u_pattern.lane(k) { + let factor = -*self + .l_factor + .values() + .get_unchecked(*self.work_c.get_unchecked(j)); + *self.work_c.get_unchecked_mut(j) += 1; + + if j < k { + let col_j = self.l_factor.col(j); + let col_j_entries = col_j.row_indices().iter().zip(col_j.values()); + for (&z, val) in col_j_entries { + if z >= k { + *self.work_x.get_unchecked_mut(z) += val.inlined_clone() * factor; + } + } + } + } + + let diag = *self.work_x.get_unchecked(k); + + if diag > T::zero() { + let denom = diag.sqrt(); + + { + let (offsets, _, values) = self.l_factor.csc_data_mut(); + *values.get_unchecked_mut(*offsets.get_unchecked(k)) = denom; + } + + let mut col_k = self.l_factor.col_mut(k); + let (col_k_rows, col_k_values) = col_k.rows_and_values_mut(); + let col_k_entries = col_k_rows.iter().zip(col_k_values); + for (&p, val) in col_k_entries { + *val = *self.work_x.get_unchecked(p) / denom; + *self.work_x.get_unchecked_mut(p) = T::zero(); + } + } else { + return Err(CholeskyError::NotPositiveDefinite); + } + } + } + + Ok(()) + } + + /// Solves the system `A X = B`, where `X` and `B` are dense matrices. + /// + /// # Panics + /// + /// Panics if `B` is not square. + pub fn solve<'a>(&'a self, b: impl Into>) -> DMatrix { + let b = b.into(); + let mut output = b.clone_owned(); + self.solve_mut(&mut output); + output + } + + /// Solves the system `AX = B`, where `X` and `B` are dense matrices. + /// + /// The result is stored in-place in `b`. + /// + /// # Panics + /// + /// Panics if `b` is not square. + pub fn solve_mut<'a>(&'a self, b: impl Into>) { + let expect_msg = "If the Cholesky factorization succeeded,\ + then the triangular solve should never fail"; + // Solve LY = B + let mut y = b.into(); + spsolve_csc_lower_triangular(Op::NoOp(self.l()), &mut y).expect(expect_msg); + + // Solve L^T X = Y + let mut x = y; + spsolve_csc_lower_triangular(Op::Transpose(self.l()), &mut x).expect(expect_msg); + } +} + +fn reach( + pattern: &SparsityPattern, + j: usize, + max_j: usize, + tree: &[usize], + marks: &mut Vec, + out: &mut Vec, +) { + marks.clear(); + marks.resize(tree.len(), false); + + // TODO: avoid all those allocations. + let mut tmp = Vec::new(); + let mut res = Vec::new(); + + for &irow in pattern.lane(j) { + let mut curr = irow; + while curr != usize::max_value() && curr <= max_j && !marks[curr] { + marks[curr] = true; + tmp.push(curr); + curr = tree[curr]; + } + + tmp.append(&mut res); + mem::swap(&mut tmp, &mut res); + } + + res.sort_unstable(); + + out.append(&mut res); +} + +fn nonzero_pattern(m: &SparsityPattern) -> (SparsityPattern, SparsityPattern) { + let etree = elimination_tree(m); + // Note: We assume CSC, therefore rows == minor and cols == major + let (nrows, ncols) = (m.minor_dim(), m.major_dim()); + let mut rows = Vec::with_capacity(m.nnz()); + let mut col_offsets = Vec::with_capacity(ncols + 1); + let mut marks = Vec::new(); + + // NOTE: the following will actually compute the non-zero pattern of + // the transpose of l. + col_offsets.push(0); + for i in 0..nrows { + reach(m, i, i, &etree, &mut marks, &mut rows); + col_offsets.push(rows.len()); + } + + let u_pattern = + SparsityPattern::try_from_offsets_and_indices(nrows, ncols, col_offsets, rows).unwrap(); + + // TODO: Avoid this transpose? + let l_pattern = u_pattern.transpose(); + + (l_pattern, u_pattern) +} + +fn elimination_tree(pattern: &SparsityPattern) -> Vec { + // Note: The pattern is assumed to of a CSC matrix, so the number of rows is + // given by the minor dimension + let nrows = pattern.minor_dim(); + let mut forest: Vec<_> = iter::repeat(usize::max_value()).take(nrows).collect(); + let mut ancestor: Vec<_> = iter::repeat(usize::max_value()).take(nrows).collect(); + + for k in 0..nrows { + for &irow in pattern.lane(k) { + let mut i = irow; + + while i < k { + let i_ancestor = ancestor[i]; + ancestor[i] = k; + + if i_ancestor == usize::max_value() { + forest[i] = k; + break; + } + + i = i_ancestor; + } + } + } + + forest +} diff --git a/nalgebra-sparse/src/factorization/mod.rs b/nalgebra-sparse/src/factorization/mod.rs new file mode 100644 index 00000000..b77a857c --- /dev/null +++ b/nalgebra-sparse/src/factorization/mod.rs @@ -0,0 +1,6 @@ +//! Matrix factorization for sparse matrices. +//! +//! Currently, the only factorization provided here is the [`CscCholesky`] factorization. +mod cholesky; + +pub use cholesky::*; diff --git a/nalgebra-sparse/src/lib.rs b/nalgebra-sparse/src/lib.rs new file mode 100644 index 00000000..4b96717c --- /dev/null +++ b/nalgebra-sparse/src/lib.rs @@ -0,0 +1,267 @@ +//! Sparse matrices and algorithms for [nalgebra](https://www.nalgebra.org). +//! +//! This crate extends `nalgebra` with sparse matrix formats and operations on sparse matrices. +//! +//! ## Goals +//! The long-term goals for this crate are listed below. +//! +//! - Provide proven sparse matrix formats in an easy-to-use and idiomatic Rust API that +//! naturally integrates with `nalgebra`. +//! - Provide additional expert-level APIs for fine-grained control over operations. +//! - Integrate well with external sparse matrix libraries. +//! - Provide native Rust high-performance routines, including parallel matrix operations. +//! +//! ## Highlighted current features +//! +//! - [CSR](csr::CsrMatrix), [CSC](csc::CscMatrix) and [COO](coo::CooMatrix) formats, and +//! [conversions](`convert`) between them. +//! - Common arithmetic operations are implemented. See the [`ops`] module. +//! - Sparsity patterns in CSR and CSC matrices are explicitly represented by the +//! [SparsityPattern](pattern::SparsityPattern) type, which encodes the invariants of the +//! associated index data structures. +//! - [proptest strategies](`proptest`) for sparse matrices when the feature +//! `proptest-support` is enabled. +//! - [matrixcompare support](https://crates.io/crates/matrixcompare) for effortless +//! (approximate) comparison of matrices in test code (requires the `compare` feature). +//! +//! ## Current state +//! +//! The library is in an early, but usable state. The API has been designed to be extensible, +//! but breaking changes will be necessary to implement several planned features. While it is +//! backed by an extensive test suite, it has yet to be thoroughly battle-tested in real +//! applications. Moreover, the focus so far has been on correctness and API design, with little +//! focus on performance. Future improvements will include incremental performance enhancements. +//! +//! Current limitations: +//! +//! - Limited or no availability of sparse system solvers. +//! - Limited support for complex numbers. Currently only arithmetic operations that do not +//! rely on particular properties of complex numbers, such as e.g. conjugation, are +//! supported. +//! - No integration with external libraries. +//! +//! # Usage +//! +//! Add the following to your `Cargo.toml` file: +//! +//! ```toml +//! [dependencies] +//! nalgebra_sparse = "0.1" +//! ``` +//! +//! # Supported matrix formats +//! +//! | Format | Notes | +//! | ------------------------|--------------------------------------------- | +//! | [COO](`coo::CooMatrix`) | Well-suited for matrix construction.
Ill-suited for algebraic operations. | +//! | [CSR](`csr::CsrMatrix`) | Immutable sparsity pattern, suitable for algebraic operations.
Fast row access. | +//! | [CSC](`csc::CscMatrix`) | Immutable sparsity pattern, suitable for algebraic operations.
Fast column access. | +//! +//! What format is best to use depends on the application. The most common use case for sparse +//! matrices in science is the solution of sparse linear systems. Here we can differentiate between +//! two common cases: +//! +//! - Direct solvers. Typically, direct solvers take their input in CSR or CSC format. +//! - Iterative solvers. Many iterative solvers require only matrix-vector products, +//! for which the CSR or CSC formats are suitable. +//! +//! The [COO](coo::CooMatrix) format is primarily intended for matrix construction. +//! A common pattern is to use COO for construction, before converting to CSR or CSC for use +//! in a direct solver or for computing matrix-vector products in an iterative solver. +//! Some high-performance applications might also directly manipulate the CSR and/or CSC +//! formats. +//! +//! # Example: COO -> CSR -> matrix-vector product +//! +//! ```rust +//! use nalgebra_sparse::{coo::CooMatrix, csr::CsrMatrix}; +//! use nalgebra::{DMatrix, DVector}; +//! use matrixcompare::assert_matrix_eq; +//! +//! // The dense representation of the matrix +//! let dense = DMatrix::from_row_slice(3, 3, +//! &[1.0, 0.0, 3.0, +//! 2.0, 0.0, 1.3, +//! 0.0, 0.0, 4.1]); +//! +//! // Build the equivalent COO representation. We only add the non-zero values +//! let mut coo = CooMatrix::new(3, 3); +//! // We can add elements in any order. For clarity, we do so in row-major order here. +//! coo.push(0, 0, 1.0); +//! coo.push(0, 2, 3.0); +//! coo.push(1, 0, 2.0); +//! coo.push(1, 2, 1.3); +//! coo.push(2, 2, 4.1); +//! +//! // The simplest way to construct a CSR matrix is to first construct a COO matrix, and +//! // then convert it to CSR. The `From` trait is implemented for conversions between different +//! // sparse matrix types. +//! // Alternatively, we can construct a matrix directly from the CSR data. +//! // See the docs for CsrMatrix for how to do that. +//! let csr = CsrMatrix::from(&coo); +//! +//! // Let's check that the CSR matrix and the dense matrix represent the same matrix. +//! // We can use macros from the `matrixcompare` crate to easily do this, despite the fact that +//! // we're comparing across two different matrix formats. Note that these macros are only really +//! // appropriate for writing tests, however. +//! assert_matrix_eq!(csr, dense); +//! +//! let x = DVector::from_column_slice(&[1.3, -4.0, 3.5]); +//! +//! // Compute the matrix-vector product y = A * x. We don't need to specify the type here, +//! // but let's just do it to make sure we get what we expect +//! let y: DVector<_> = &csr * &x; +//! +//! // Verify the result with a small element-wise absolute tolerance +//! let y_expected = DVector::from_column_slice(&[11.8, 7.15, 14.35]); +//! assert_matrix_eq!(y, y_expected, comp = abs, tol = 1e-9); +//! +//! // The above expression is simple, and gives easy to read code, but if we're doing this in a +//! // loop, we'll have to keep allocating new vectors. If we determine that this is a bottleneck, +//! // then we can resort to the lower level APIs for more control over the operations +//! { +//! use nalgebra_sparse::ops::{Op, serial::spmm_csr_dense}; +//! let mut y = y; +//! // Compute y <- 0.0 * y + 1.0 * csr * dense. We store the result directly in `y`, without +//! // any intermediate allocations +//! spmm_csr_dense(0.0, &mut y, 1.0, Op::NoOp(&csr), Op::NoOp(&x)); +//! assert_matrix_eq!(y, y_expected, comp = abs, tol = 1e-9); +//! } +//! ``` +#![deny(non_camel_case_types)] +#![deny(unused_parens)] +#![deny(non_upper_case_globals)] +#![deny(unused_qualifications)] +#![deny(unused_results)] +#![deny(missing_docs)] + +pub extern crate nalgebra as na; +pub mod convert; +pub mod coo; +pub mod csc; +pub mod csr; +pub mod factorization; +pub mod ops; +pub mod pattern; + +pub(crate) mod cs; + +#[cfg(feature = "proptest-support")] +pub mod proptest; + +#[cfg(feature = "compare")] +mod matrixcompare; + +use num_traits::Zero; +use std::error::Error; +use std::fmt; + +pub use self::coo::CooMatrix; +pub use self::csc::CscMatrix; +pub use self::csr::CsrMatrix; + +/// Errors produced by functions that expect well-formed sparse format data. +#[derive(Debug)] +pub struct SparseFormatError { + kind: SparseFormatErrorKind, + // Currently we only use an underlying error for generating the `Display` impl + error: Box, +} + +impl SparseFormatError { + /// The type of error. + pub fn kind(&self) -> &SparseFormatErrorKind { + &self.kind + } + + pub(crate) fn from_kind_and_error(kind: SparseFormatErrorKind, error: Box) -> Self { + Self { kind, error } + } + + /// Helper functionality for more conveniently creating errors. + pub(crate) fn from_kind_and_msg(kind: SparseFormatErrorKind, msg: &'static str) -> Self { + Self::from_kind_and_error(kind, Box::::from(msg)) + } +} + +/// The type of format error described by a [SparseFormatError](struct.SparseFormatError.html). +#[non_exhaustive] +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SparseFormatErrorKind { + /// Indicates that the index data associated with the format contains at least one index + /// out of bounds. + IndexOutOfBounds, + + /// Indicates that the provided data contains at least one duplicate entry, and the + /// current format does not support duplicate entries. + DuplicateEntry, + + /// Indicates that the provided data for the format does not conform to the high-level + /// structure of the format. + /// + /// For example, the arrays defining the format data might have incompatible sizes. + InvalidStructure, +} + +impl fmt::Display for SparseFormatError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.error) + } +} + +impl Error for SparseFormatError {} + +/// An entry in a sparse matrix. +/// +/// Sparse matrices do not store all their entries explicitly. Therefore, entry (i, j) in the matrix +/// can either be a reference to an explicitly stored element, or it is implicitly zero. +#[derive(Debug, PartialEq, Eq)] +pub enum SparseEntry<'a, T> { + /// The entry is a reference to an explicitly stored element. + /// + /// Note that the naming here is a misnomer: The element can still be zero, even though it + /// is explicitly stored (a so-called "explicit zero"). + NonZero(&'a T), + /// The entry is implicitly zero, i.e. it is not explicitly stored. + Zero, +} + +impl<'a, T: Clone + Zero> SparseEntry<'a, T> { + /// Returns the value represented by this entry. + /// + /// Either clones the underlying reference or returns zero if the entry is not explicitly + /// stored. + pub fn into_value(self) -> T { + match self { + SparseEntry::NonZero(value) => value.clone(), + SparseEntry::Zero => T::zero(), + } + } +} + +/// A mutable entry in a sparse matrix. +/// +/// See also `SparseEntry`. +#[derive(Debug, PartialEq, Eq)] +pub enum SparseEntryMut<'a, T> { + /// The entry is a mutable reference to an explicitly stored element. + /// + /// Note that the naming here is a misnomer: The element can still be zero, even though it + /// is explicitly stored (a so-called "explicit zero"). + NonZero(&'a mut T), + /// The entry is implicitly zero i.e. it is not explicitly stored. + Zero, +} + +impl<'a, T: Clone + Zero> SparseEntryMut<'a, T> { + /// Returns the value represented by this entry. + /// + /// Either clones the underlying reference or returns zero if the entry is not explicitly + /// stored. + pub fn into_value(self) -> T { + match self { + SparseEntryMut::NonZero(value) => value.clone(), + SparseEntryMut::Zero => T::zero(), + } + } +} diff --git a/nalgebra-sparse/src/matrixcompare.rs b/nalgebra-sparse/src/matrixcompare.rs new file mode 100644 index 00000000..9c48ae40 --- /dev/null +++ b/nalgebra-sparse/src/matrixcompare.rs @@ -0,0 +1,65 @@ +//! Implements core traits for use with `matrixcompare`. +use crate::coo::CooMatrix; +use crate::csc::CscMatrix; +use crate::csr::CsrMatrix; +use matrixcompare_core; +use matrixcompare_core::{Access, SparseAccess}; + +macro_rules! impl_matrix_for_csr_csc { + ($MatrixType:ident) => { + impl SparseAccess for $MatrixType { + fn nnz(&self) -> usize { + $MatrixType::nnz(self) + } + + fn fetch_triplets(&self) -> Vec<(usize, usize, T)> { + self.triplet_iter() + .map(|(i, j, v)| (i, j, v.clone())) + .collect() + } + } + + impl matrixcompare_core::Matrix for $MatrixType { + fn rows(&self) -> usize { + self.nrows() + } + + fn cols(&self) -> usize { + self.ncols() + } + + fn access(&self) -> Access { + Access::Sparse(self) + } + } + }; +} + +impl_matrix_for_csr_csc!(CsrMatrix); +impl_matrix_for_csr_csc!(CscMatrix); + +impl SparseAccess for CooMatrix { + fn nnz(&self) -> usize { + CooMatrix::nnz(self) + } + + fn fetch_triplets(&self) -> Vec<(usize, usize, T)> { + self.triplet_iter() + .map(|(i, j, v)| (i, j, v.clone())) + .collect() + } +} + +impl matrixcompare_core::Matrix for CooMatrix { + fn rows(&self) -> usize { + self.nrows() + } + + fn cols(&self) -> usize { + self.ncols() + } + + fn access(&self) -> Access { + Access::Sparse(self) + } +} diff --git a/nalgebra-sparse/src/ops/impl_std_ops.rs b/nalgebra-sparse/src/ops/impl_std_ops.rs new file mode 100644 index 00000000..75645bed --- /dev/null +++ b/nalgebra-sparse/src/ops/impl_std_ops.rs @@ -0,0 +1,331 @@ +use crate::csc::CscMatrix; +use crate::csr::CsrMatrix; + +use crate::ops::serial::{ + spadd_csc_prealloc, spadd_csr_prealloc, spadd_pattern, spmm_csc_dense, spmm_csc_pattern, + spmm_csc_prealloc, spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, +}; +use crate::ops::Op; +use nalgebra::allocator::Allocator; +use nalgebra::base::storage::Storage; +use nalgebra::constraint::{DimEq, ShapeConstraint}; +use nalgebra::{ + ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, DefaultAllocator, Dim, Dynamic, Matrix, MatrixMN, + Scalar, U1, +}; +use num_traits::{One, Zero}; +use std::ops::{Add, Div, DivAssign, Mul, MulAssign, Neg, Sub}; + +/// Helper macro for implementing binary operators for different matrix types +/// See below for usage. +macro_rules! impl_bin_op { + ($trait:ident, $method:ident, + <$($life:lifetime),* $(,)? $($scalar_type:ident $(: $bounds:path)?)?>($a:ident : $a_type:ty, $b:ident : $b_type:ty) -> $ret:ty $body:block) + => + { + impl<$($life,)* $($scalar_type)?> $trait<$b_type> for $a_type + where + // Note: The Neg bound is currently required because we delegate e.g. + // Sub to SpAdd with negative coefficients. This is not well-defined for + // unsigned data types. + $($scalar_type: $($bounds + )? Scalar + ClosedAdd + ClosedSub + ClosedMul + Zero + One + Neg)? + { + type Output = $ret; + fn $method(self, $b: $b_type) -> Self::Output { + let $a = self; + $body + } + } + }; +} + +/// Implements a +/- b for all combinations of reference and owned matrices, for +/// CsrMatrix or CscMatrix. +macro_rules! impl_sp_plus_minus { + // We first match on some special-case syntax, and forward to the actual implementation + ($matrix_type:ident, $spadd_fn:ident, +) => { + impl_sp_plus_minus!(Add, add, $matrix_type, $spadd_fn, +, T::one()); + }; + ($matrix_type:ident, $spadd_fn:ident, -) => { + impl_sp_plus_minus!(Sub, sub, $matrix_type, $spadd_fn, -, -T::one()); + }; + ($trait:ident, $method:ident, $matrix_type:ident, $spadd_fn:ident, $sign:tt, $factor:expr) => { + impl_bin_op!($trait, $method, + <'a, T>(a: &'a $matrix_type, b: &'a $matrix_type) -> $matrix_type { + // If both matrices have the same pattern, then we can immediately re-use it + let pattern = spadd_pattern(a.pattern(), b.pattern()); + let values = vec![T::zero(); pattern.nnz()]; + // We are giving data that is valid by definition, so it is safe to unwrap below + let mut result = $matrix_type::try_from_pattern_and_values(pattern, values) + .unwrap(); + $spadd_fn(T::zero(), &mut result, T::one(), Op::NoOp(&a)).unwrap(); + $spadd_fn(T::one(), &mut result, $factor * T::one(), Op::NoOp(&b)).unwrap(); + result + }); + + impl_bin_op!($trait, $method, + <'a, T>(a: $matrix_type, b: &'a $matrix_type) -> $matrix_type { + &a $sign b + }); + + impl_bin_op!($trait, $method, + <'a, T>(a: &'a $matrix_type, b: $matrix_type) -> $matrix_type { + a $sign &b + }); + impl_bin_op!($trait, $method, (a: $matrix_type, b: $matrix_type) -> $matrix_type { + a $sign &b + }); + } +} + +impl_sp_plus_minus!(CsrMatrix, spadd_csr_prealloc, +); +impl_sp_plus_minus!(CsrMatrix, spadd_csr_prealloc, -); +impl_sp_plus_minus!(CscMatrix, spadd_csc_prealloc, +); +impl_sp_plus_minus!(CscMatrix, spadd_csc_prealloc, -); + +macro_rules! impl_mul { + ($($args:tt)*) => { + impl_bin_op!(Mul, mul, $($args)*); + } +} + +/// Implements a + b for all combinations of reference and owned matrices, for +/// CsrMatrix or CscMatrix. +macro_rules! impl_spmm { + ($matrix_type:ident, $pattern_fn:expr, $spmm_fn:expr) => { + impl_mul!(<'a, T>(a: &'a $matrix_type, b: &'a $matrix_type) -> $matrix_type { + let pattern = $pattern_fn(a.pattern(), b.pattern()); + let values = vec![T::zero(); pattern.nnz()]; + let mut result = $matrix_type::try_from_pattern_and_values(pattern, values) + .unwrap(); + $spmm_fn(T::zero(), + &mut result, + T::one(), + Op::NoOp(a), + Op::NoOp(b)) + .expect("Internal error: spmm failed (please debug)."); + result + }); + impl_mul!(<'a, T>(a: &'a $matrix_type, b: $matrix_type) -> $matrix_type { a * &b}); + impl_mul!(<'a, T>(a: $matrix_type, b: &'a $matrix_type) -> $matrix_type { &a * b}); + impl_mul!((a: $matrix_type, b: $matrix_type) -> $matrix_type { &a * &b}); + } +} + +impl_spmm!(CsrMatrix, spmm_csr_pattern, spmm_csr_prealloc); +// Need to switch order of operations for CSC pattern +impl_spmm!(CscMatrix, spmm_csc_pattern, spmm_csc_prealloc); + +/// Implements Scalar * Matrix operations for *concrete* scalar types. The reason this is necessary +/// is that we are not able to implement Mul> for all T generically due to orphan rules. +macro_rules! impl_concrete_scalar_matrix_mul { + ($matrix_type:ident, $($scalar_type:ty),*) => { + // For each concrete scalar type, forward the implementation of scalar * matrix + // to matrix * scalar, which we have already implemented through generics + $( + impl_mul!(<>(a: $scalar_type, b: $matrix_type<$scalar_type>) + -> $matrix_type<$scalar_type> { b * a }); + impl_mul!(<'a>(a: $scalar_type, b: &'a $matrix_type<$scalar_type>) + -> $matrix_type<$scalar_type> { b * a }); + impl_mul!(<'a>(a: &'a $scalar_type, b: $matrix_type<$scalar_type>) + -> $matrix_type<$scalar_type> { b * (*a) }); + impl_mul!(<'a>(a: &'a $scalar_type, b: &'a $matrix_type<$scalar_type>) + -> $matrix_type<$scalar_type> { b * *a }); + )* + } +} + +/// Implements multiplication between matrix and scalar for various matrix types +macro_rules! impl_scalar_mul { + ($matrix_type: ident) => { + impl_mul!(<'a, T>(a: &'a $matrix_type, b: &'a T) -> $matrix_type { + let values: Vec<_> = a.values() + .iter() + .map(|v_i| v_i.inlined_clone() * b.inlined_clone()) + .collect(); + $matrix_type::try_from_pattern_and_values(a.pattern().clone(), values).unwrap() + }); + impl_mul!(<'a, T>(a: &'a $matrix_type, b: T) -> $matrix_type { + a * &b + }); + impl_mul!(<'a, T>(a: $matrix_type, b: &'a T) -> $matrix_type { + let mut a = a; + for value in a.values_mut() { + *value = b.inlined_clone() * value.inlined_clone(); + } + a + }); + impl_mul!((a: $matrix_type, b: T) -> $matrix_type { + a * &b + }); + impl_concrete_scalar_matrix_mul!( + $matrix_type, + i8, i16, i32, i64, isize, f32, f64); + + impl MulAssign for $matrix_type + where + T: Scalar + ClosedAdd + ClosedMul + Zero + One + { + fn mul_assign(&mut self, scalar: T) { + for val in self.values_mut() { + *val *= scalar.inlined_clone(); + } + } + } + + impl<'a, T> MulAssign<&'a T> for $matrix_type + where + T: Scalar + ClosedAdd + ClosedMul + Zero + One + { + fn mul_assign(&mut self, scalar: &'a T) { + for val in self.values_mut() { + *val *= scalar.inlined_clone(); + } + } + } + } +} + +impl_scalar_mul!(CsrMatrix); +impl_scalar_mul!(CscMatrix); + +macro_rules! impl_neg { + ($matrix_type:ident) => { + impl Neg for $matrix_type + where + T: Scalar + Neg, + { + type Output = $matrix_type; + + fn neg(mut self) -> Self::Output { + for v_i in self.values_mut() { + *v_i = -v_i.inlined_clone(); + } + self + } + } + + impl<'a, T> Neg for &'a $matrix_type + where + T: Scalar + Neg, + { + type Output = $matrix_type; + + fn neg(self) -> Self::Output { + // TODO: This is inefficient. Ideally we'd have a method that would let us + // obtain both the sparsity pattern and values from the matrix, + // and then modify the values before creating a new matrix from the pattern + // and negated values. + -self.clone() + } + } + }; +} + +impl_neg!(CsrMatrix); +impl_neg!(CscMatrix); + +macro_rules! impl_div { + ($matrix_type:ident) => { + impl_bin_op!(Div, div, (matrix: $matrix_type, scalar: T) -> $matrix_type { + let mut matrix = matrix; + matrix /= scalar; + matrix + }); + impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: $matrix_type, scalar: &T) -> $matrix_type { + matrix / scalar.inlined_clone() + }); + impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: &'a $matrix_type, scalar: T) -> $matrix_type { + let new_values = matrix.values() + .iter() + .map(|v_i| v_i.inlined_clone() / scalar.inlined_clone()) + .collect(); + $matrix_type::try_from_pattern_and_values(matrix.pattern().clone(), new_values) + .unwrap() + }); + impl_bin_op!(Div, div, <'a, T: ClosedDiv>(matrix: &'a $matrix_type, scalar: &'a T) -> $matrix_type { + matrix / scalar.inlined_clone() + }); + + impl DivAssign for $matrix_type + where T : Scalar + ClosedAdd + ClosedMul + ClosedDiv + Zero + One + { + fn div_assign(&mut self, scalar: T) { + self.values_mut().iter_mut().for_each(|v_i| *v_i /= scalar.inlined_clone()); + } + } + + impl<'a, T> DivAssign<&'a T> for $matrix_type + where T : Scalar + ClosedAdd + ClosedMul + ClosedDiv + Zero + One + { + fn div_assign(&mut self, scalar: &'a T) { + *self /= scalar.inlined_clone(); + } + } + } +} + +impl_div!(CsrMatrix); +impl_div!(CscMatrix); + +macro_rules! impl_spmm_cs_dense { + ($matrix_type_name:ident, $spmm_fn:ident) => { + // Implement ref-ref + impl_spmm_cs_dense!(&'a $matrix_type_name, &'a Matrix, $spmm_fn, |lhs, rhs| { + let (_, ncols) = rhs.data.shape(); + let nrows = Dynamic::new(lhs.nrows()); + let mut result = MatrixMN::::zeros_generic(nrows, ncols); + $spmm_fn(T::zero(), &mut result, T::one(), Op::NoOp(lhs), Op::NoOp(rhs)); + result + }); + + // Implement the other combinations by deferring to ref-ref + impl_spmm_cs_dense!(&'a $matrix_type_name, Matrix, $spmm_fn, |lhs, rhs| { + lhs * &rhs + }); + impl_spmm_cs_dense!($matrix_type_name, &'a Matrix, $spmm_fn, |lhs, rhs| { + &lhs * rhs + }); + impl_spmm_cs_dense!($matrix_type_name, Matrix, $spmm_fn, |lhs, rhs| { + &lhs * &rhs + }); + }; + + // Main body of the macro. The first pattern just forwards to this pattern but with + // different arguments + ($sparse_matrix_type:ty, $dense_matrix_type:ty, $spmm_fn:ident, + |$lhs:ident, $rhs:ident| $body:tt) => + { + impl<'a, T, R, C, S> Mul<$dense_matrix_type> for $sparse_matrix_type + where + T: Scalar + ClosedMul + ClosedAdd + ClosedSub + ClosedDiv + Neg + Zero + One, + R: Dim, + C: Dim, + S: Storage, + DefaultAllocator: Allocator, + // TODO: Is it possible to simplify these bounds? + ShapeConstraint: + // Bounds so that we can turn MatrixMN into a DMatrixSliceMut + DimEq>::Buffer as Storage>::RStride> + + DimEq + + DimEq>::Buffer as Storage>::CStride> + // Bounds so that we can turn &Matrix into a DMatrixSlice + + DimEq + + DimEq + + DimEq + { + // We need the column dimension to be generic, so that if RHS is a vector, then + // we also get a vector (and not a matrix) + type Output = MatrixMN; + + fn mul(self, rhs: $dense_matrix_type) -> Self::Output { + let $lhs = self; + let $rhs = rhs; + $body + } + } + } +} + +impl_spmm_cs_dense!(CsrMatrix, spmm_csr_dense); +impl_spmm_cs_dense!(CscMatrix, spmm_csc_dense); diff --git a/nalgebra-sparse/src/ops/mod.rs b/nalgebra-sparse/src/ops/mod.rs new file mode 100644 index 00000000..a6a21fbc --- /dev/null +++ b/nalgebra-sparse/src/ops/mod.rs @@ -0,0 +1,194 @@ +//! Sparse matrix arithmetic operations. +//! +//! This module contains a number of routines for sparse matrix arithmetic. These routines are +//! primarily intended for "expert usage". Most users should prefer to use standard +//! `std::ops` operations for simple and readable code when possible. The routines provided here +//! offer more control over allocation, and allow fusing some low-level operations for higher +//! performance. +//! +//! The available operations are organized by backend. Currently, only the [`serial`] backend +//! is available. In the future, backends that expose parallel operations may become available. +//! All `std::ops` implementations will remain single-threaded and powered by the +//! `serial` backend. +//! +//! Many routines are able to implicitly transpose matrices involved in the operation. +//! For example, the routine [`spadd_csr_prealloc`](serial::spadd_csr_prealloc) performs the +//! operation `C <- beta * C + alpha * op(A)`. Here `op(A)` indicates that the matrix `A` can +//! either be used as-is or transposed. The notation `op(A)` is represented in code by the +//! [`Op`] enum. +//! +//! # Available `std::ops` implementations +//! +//! ## Binary operators +//! +//! The below table summarizes the currently supported binary operators between matrices. +//! In general, binary operators between sparse matrices are only supported if both matrices +//! are stored in the same format. All supported binary operators are implemented for +//! all four combinations of values and references. +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//! +//!
LHS (down) \ RHS (right)COOCSRCSCDense
COO
CSR+ - **
CSC+ - **
Dense+ - *
+//! +//! As can be seen from the table, only `CSR * Dense` and `CSC * Dense` are supported. +//! The other way around, i.e. `Dense * CSR` and `Dense * CSC` are not implemented. +//! +//! Additionally, [CsrMatrix](`crate::csr::CsrMatrix`) and [CooMatrix](`crate::coo::CooMatrix`) +//! support multiplication with scalars, in addition to division by a scalar. +//! Note that only `Matrix * Scalar` works in a generic context, although `Scalar * Matrix` +//! has been implemented for many of the built-in arithmetic types. This is due to a fundamental +//! restriction of the Rust type system. Therefore, in generic code you will need to always place +//! the matrix on the left-hand side of the multiplication. +//! +//! ## Unary operators +//! +//! The following table lists currently supported unary operators. +//! +//! | Format | AddAssign\ | MulAssign\ | MulAssign\ | Neg | +//! | -------- | ----------------- | ----------------- | ------------------- | ------ | +//! | COO | | | | | +//! | CSR | | | x | x | +//! | CSC | | | x | x | +//! | +//! # Example usage +//! +//! For example, consider the case where you want to compute the expression +//! `C <- 3.0 * C + 2.0 * A^T * B`, where `A`, `B`, `C` are matrices and `A^T` is the transpose +//! of `A`. The simplest way to write this is: +//! +//! ```rust +//! # use nalgebra_sparse::csr::CsrMatrix; +//! # let a = CsrMatrix::identity(10); let b = CsrMatrix::identity(10); +//! # let mut c = CsrMatrix::identity(10); +//! c = 3.0 * c + 2.0 * a.transpose() * b; +//! ``` +//! This is simple and straightforward to read, and therefore the recommended way to implement +//! it. However, if you have determined that this is a performance bottleneck of your application, +//! it may be possible to speed things up. First, let's see what's going on here. The `std` +//! operations are evaluated eagerly. This means that the following steps take place: +//! +//! 1. Evaluate `let c_temp = 3.0 * c`. This requires scaling all values of the matrix. +//! 2. Evaluate `let a_t = a.transpose()` into a new temporary matrix. +//! 3. Evaluate `let a_t_b = a_t * b` into a new temporary matrix. +//! 4. Evaluate `let a_t_b_scaled = 2.0 * a_t_b`. This requires scaling all values of the matrix. +//! 5. Evaluate `c = c_temp + a_t_b_scaled`. +//! +//! An alternative way to implement this expression (here using CSR matrices) is: +//! +//! ```rust +//! # use nalgebra_sparse::csr::CsrMatrix; +//! # let a = CsrMatrix::identity(10); let b = CsrMatrix::identity(10); +//! # let mut c = CsrMatrix::identity(10); +//! use nalgebra_sparse::ops::{Op, serial::spmm_csr_prealloc}; +//! +//! // Evaluate the expression `c <- 3.0 * c + 2.0 * a^T * b +//! spmm_csr_prealloc(3.0, &mut c, 2.0, Op::Transpose(&a), Op::NoOp(&b)) +//! .expect("We assume that the pattern of C is able to accommodate the result."); +//! ``` +//! Compared to the simpler example, this snippet is harder to read, but it calls a single +//! computational kernel that avoids many of the intermediate steps listed out before. Therefore +//! directly calling kernels may sometimes lead to better performance. However, this should +//! always be verified by performance profiling! + +mod impl_std_ops; +pub mod serial; + +/// Determines whether a matrix should be transposed in a given operation. +/// +/// See the [module-level documentation](crate::ops) for the purpose of this enum. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Op { + /// Indicates that the matrix should be used as-is. + NoOp(T), + /// Indicates that the matrix should be transposed. + Transpose(T), +} + +impl Op { + /// Returns a reference to the inner value that the operation applies to. + pub fn inner_ref(&self) -> &T { + self.as_ref().into_inner() + } + + /// Returns an `Op` applied to a reference of the inner value. + pub fn as_ref(&self) -> Op<&T> { + match self { + Op::NoOp(obj) => Op::NoOp(&obj), + Op::Transpose(obj) => Op::Transpose(&obj), + } + } + + /// Converts the underlying data type. + pub fn convert(self) -> Op + where + T: Into, + { + self.map_same_op(T::into) + } + + /// Transforms the inner value with the provided function, but preserves the operation. + pub fn map_same_op U>(self, f: F) -> Op { + match self { + Op::NoOp(obj) => Op::NoOp(f(obj)), + Op::Transpose(obj) => Op::Transpose(f(obj)), + } + } + + /// Consumes the `Op` and returns the inner value. + pub fn into_inner(self) -> T { + match self { + Op::NoOp(obj) | Op::Transpose(obj) => obj, + } + } + + /// Applies the transpose operation. + /// + /// This operation follows the usual semantics of transposition. In particular, double + /// transposition is equivalent to no transposition. + pub fn transposed(self) -> Self { + match self { + Op::NoOp(obj) => Op::Transpose(obj), + Op::Transpose(obj) => Op::NoOp(obj), + } + } +} + +impl From for Op { + fn from(obj: T) -> Self { + Self::NoOp(obj) + } +} diff --git a/nalgebra-sparse/src/ops/serial/cs.rs b/nalgebra-sparse/src/ops/serial/cs.rs new file mode 100644 index 00000000..66b0ad76 --- /dev/null +++ b/nalgebra-sparse/src/ops/serial/cs.rs @@ -0,0 +1,186 @@ +use crate::cs::CsMatrix; +use crate::ops::serial::{OperationError, OperationErrorKind}; +use crate::ops::Op; +use crate::SparseEntryMut; +use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, Scalar}; +use num_traits::{One, Zero}; + +fn spmm_cs_unexpected_entry() -> OperationError { + OperationError::from_kind_and_message( + OperationErrorKind::InvalidPattern, + String::from("Found unexpected entry that is not present in `c`."), + ) +} + +/// Helper functionality for implementing CSR/CSC SPMM. +/// +/// Since CSR/CSC matrices are basically transpositions of each other, which lets us use the same +/// algorithm for the SPMM implementation. The implementation here is written in a CSR-centric +/// manner. This means that when using it for CSC, the order of the matrices needs to be +/// reversed (since transpose(AB) = transpose(B) * transpose(A) and CSC(A) = transpose(CSR(A)). +/// +/// We assume here that the matrices have already been verified to be dimensionally compatible. +pub fn spmm_cs_prealloc( + beta: T, + c: &mut CsMatrix, + alpha: T, + a: &CsMatrix, + b: &CsMatrix, +) -> Result<(), OperationError> +where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + for i in 0..c.pattern().major_dim() { + let a_lane_i = a.get_lane(i).unwrap(); + let mut c_lane_i = c.get_lane_mut(i).unwrap(); + for c_ij in c_lane_i.values_mut() { + *c_ij = beta.inlined_clone() * c_ij.inlined_clone(); + } + + for (&k, a_ik) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) { + let b_lane_k = b.get_lane(k).unwrap(); + let (mut c_lane_i_cols, mut c_lane_i_values) = c_lane_i.indices_and_values_mut(); + let alpha_aik = alpha.inlined_clone() * a_ik.inlined_clone(); + for (j, b_kj) in b_lane_k.minor_indices().iter().zip(b_lane_k.values()) { + // Determine the location in C to append the value + let (c_local_idx, _) = c_lane_i_cols + .iter() + .enumerate() + .find(|(_, c_col)| *c_col == j) + .ok_or_else(spmm_cs_unexpected_entry)?; + + c_lane_i_values[c_local_idx] += alpha_aik.inlined_clone() * b_kj.inlined_clone(); + c_lane_i_cols = &c_lane_i_cols[c_local_idx..]; + c_lane_i_values = &mut c_lane_i_values[c_local_idx..]; + } + } + } + + Ok(()) +} + +fn spadd_cs_unexpected_entry() -> OperationError { + OperationError::from_kind_and_message( + OperationErrorKind::InvalidPattern, + String::from("Found entry in `op(a)` that is not present in `c`."), + ) +} + +/// Helper functionality for implementing CSR/CSC SPADD. +pub fn spadd_cs_prealloc( + beta: T, + c: &mut CsMatrix, + alpha: T, + a: Op<&CsMatrix>, +) -> Result<(), OperationError> +where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + match a { + Op::NoOp(a) => { + for (mut c_lane_i, a_lane_i) in c.lane_iter_mut().zip(a.lane_iter()) { + if beta != T::one() { + for c_ij in c_lane_i.values_mut() { + *c_ij *= beta.inlined_clone(); + } + } + + let (mut c_minors, mut c_vals) = c_lane_i.indices_and_values_mut(); + let (a_minors, a_vals) = (a_lane_i.minor_indices(), a_lane_i.values()); + + for (a_col, a_val) in a_minors.iter().zip(a_vals) { + // TODO: Use exponential search instead of linear search. + // If C has substantially more entries in the row than A, then a line search + // will needlessly visit many entries in C. + let (c_idx, _) = c_minors + .iter() + .enumerate() + .find(|(_, c_col)| *c_col == a_col) + .ok_or_else(spadd_cs_unexpected_entry)?; + c_vals[c_idx] += alpha.inlined_clone() * a_val.inlined_clone(); + c_minors = &c_minors[c_idx..]; + c_vals = &mut c_vals[c_idx..]; + } + } + } + Op::Transpose(a) => { + if beta != T::one() { + for c_ij in c.values_mut() { + *c_ij *= beta.inlined_clone(); + } + } + + for (i, a_lane_i) in a.lane_iter().enumerate() { + for (&j, a_val) in a_lane_i.minor_indices().iter().zip(a_lane_i.values()) { + let a_val = a_val.inlined_clone(); + let alpha = alpha.inlined_clone(); + match c.get_entry_mut(j, i).unwrap() { + SparseEntryMut::NonZero(c_ji) => *c_ji += alpha * a_val, + SparseEntryMut::Zero => return Err(spadd_cs_unexpected_entry()), + } + } + } + } + } + Ok(()) +} + +/// Helper functionality for implementing CSR/CSC SPMM. +/// +/// The implementation essentially assumes that `a` is a CSR matrix. To use it with CSC matrices, +/// the transposed operation must be specified for the CSC matrix. +pub fn spmm_cs_dense( + beta: T, + mut c: DMatrixSliceMut, + alpha: T, + a: Op<&CsMatrix>, + b: Op>, +) where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + match a { + Op::NoOp(a) => { + for j in 0..c.ncols() { + let mut c_col_j = c.column_mut(j); + for (c_ij, a_row_i) in c_col_j.iter_mut().zip(a.lane_iter()) { + let mut dot_ij = T::zero(); + for (&k, a_ik) in a_row_i.minor_indices().iter().zip(a_row_i.values()) { + let b_contrib = match b { + Op::NoOp(ref b) => b.index((k, j)), + Op::Transpose(ref b) => b.index((j, k)), + }; + dot_ij += a_ik.inlined_clone() * b_contrib.inlined_clone(); + } + *c_ij = beta.inlined_clone() * c_ij.inlined_clone() + + alpha.inlined_clone() * dot_ij; + } + } + } + Op::Transpose(a) => { + // In this case, we have to pre-multiply C by beta + c *= beta; + + for k in 0..a.pattern().major_dim() { + let a_row_k = a.get_lane(k).unwrap(); + for (&i, a_ki) in a_row_k.minor_indices().iter().zip(a_row_k.values()) { + let gamma_ki = alpha.inlined_clone() * a_ki.inlined_clone(); + let mut c_row_i = c.row_mut(i); + match b { + Op::NoOp(ref b) => { + let b_row_k = b.row(k); + for (c_ij, b_kj) in c_row_i.iter_mut().zip(b_row_k.iter()) { + *c_ij += gamma_ki.inlined_clone() * b_kj.inlined_clone(); + } + } + Op::Transpose(ref b) => { + let b_col_k = b.column(k); + for (c_ij, b_jk) in c_row_i.iter_mut().zip(b_col_k.iter()) { + *c_ij += gamma_ki.inlined_clone() * b_jk.inlined_clone(); + } + } + } + } + } + } + } +} diff --git a/nalgebra-sparse/src/ops/serial/csc.rs b/nalgebra-sparse/src/ops/serial/csc.rs new file mode 100644 index 00000000..95350d91 --- /dev/null +++ b/nalgebra-sparse/src/ops/serial/csc.rs @@ -0,0 +1,255 @@ +use crate::csc::CscMatrix; +use crate::ops::serial::cs::{spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc}; +use crate::ops::serial::{OperationError, OperationErrorKind}; +use crate::ops::Op; +use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, RealField, Scalar}; +use num_traits::{One, Zero}; + +use std::borrow::Cow; + +/// Sparse-dense matrix-matrix multiplication `C <- beta * C + alpha * op(A) * op(B)`. +/// +/// # Panics +/// +/// Panics if the dimensions of the matrices involved are not compatible with the expression. +pub fn spmm_csc_dense<'a, T>( + beta: T, + c: impl Into>, + alpha: T, + a: Op<&CscMatrix>, + b: Op>>, +) where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + let b = b.convert(); + spmm_csc_dense_(beta, c.into(), alpha, a, b) +} + +fn spmm_csc_dense_( + beta: T, + c: DMatrixSliceMut, + alpha: T, + a: Op<&CscMatrix>, + b: Op>, +) where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + assert_compatible_spmm_dims!(c, a, b); + // Need to interpret matrix as transposed since the spmm_cs_dense function assumes CSR layout + let a = a.transposed().map_same_op(|a| &a.cs); + spmm_cs_dense(beta, c, alpha, a, b) +} + +/// Sparse matrix addition `C <- beta * C + alpha * op(A)`. +/// +/// If the pattern of `c` does not accommodate all the non-zero entries in `a`, an error is +/// returned. +/// +/// # Panics +/// +/// Panics if the dimensions of the matrices involved are not compatible with the expression. +pub fn spadd_csc_prealloc( + beta: T, + c: &mut CscMatrix, + alpha: T, + a: Op<&CscMatrix>, +) -> Result<(), OperationError> +where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + assert_compatible_spadd_dims!(c, a); + spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) +} + +/// Sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`. +/// +/// # Errors +/// +/// If the sparsity pattern of `C` is not able to store the result of the operation, +/// an error is returned. +/// +/// # Panics +/// +/// Panics if the dimensions of the matrices involved are not compatible with the expression. +pub fn spmm_csc_prealloc( + beta: T, + c: &mut CscMatrix, + alpha: T, + a: Op<&CscMatrix>, + b: Op<&CscMatrix>, +) -> Result<(), OperationError> +where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + assert_compatible_spmm_dims!(c, a, b); + + use Op::{NoOp, Transpose}; + + match (&a, &b) { + (NoOp(ref a), NoOp(ref b)) => { + // Note: We have to reverse the order for CSC matrices + spmm_cs_prealloc(beta, &mut c.cs, alpha, &b.cs, &a.cs) + } + _ => { + // Currently we handle transposition by explicitly precomputing transposed matrices + // and calling the operation again without transposition + let a_ref: &CscMatrix = a.inner_ref(); + let b_ref: &CscMatrix = b.inner_ref(); + let (a, b) = { + use Cow::*; + match (&a, &b) { + (NoOp(_), NoOp(_)) => unreachable!(), + (Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), + (NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())), + (Transpose(ref a), Transpose(ref b)) => { + (Owned(a.transpose()), Owned(b.transpose())) + } + } + }; + + spmm_csc_prealloc(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref())) + } + } +} + +/// Solve the lower triangular system `op(L) X = B`. +/// +/// Only the lower triangular part of L is read, and the result is stored in B. +/// +/// # Errors +/// +/// An error is returned if the system can not be solved due to the matrix being singular. +/// +/// # Panics +/// +/// Panics if `L` is not square, or if `L` and `B` are not dimensionally compatible. +pub fn spsolve_csc_lower_triangular<'a, T: RealField>( + l: Op<&CscMatrix>, + b: impl Into>, +) -> Result<(), OperationError> { + let b = b.into(); + let l_matrix = l.into_inner(); + assert_eq!( + l_matrix.nrows(), + l_matrix.ncols(), + "Matrix must be square for triangular solve." + ); + assert_eq!( + l_matrix.nrows(), + b.nrows(), + "Dimension mismatch in sparse lower triangular solver." + ); + match l { + Op::NoOp(a) => spsolve_csc_lower_triangular_no_transpose(a, b), + Op::Transpose(a) => spsolve_csc_lower_triangular_transpose(a, b), + } +} + +fn spsolve_csc_lower_triangular_no_transpose( + l: &CscMatrix, + b: DMatrixSliceMut, +) -> Result<(), OperationError> { + let mut x = b; + + // Solve column-by-column + for j in 0..x.ncols() { + let mut x_col_j = x.column_mut(j); + + for k in 0..l.ncols() { + let l_col_k = l.col(k); + + // Skip entries above the diagonal + // TODO: Can use exponential search here to quickly skip entries + // (we'd like to avoid using binary search as it's very cache unfriendly + // and the matrix might actually *be* lower triangular, which would induce + // a severe penalty) + let diag_csc_index = l_col_k.row_indices().iter().position(|&i| i == k); + if let Some(diag_csc_index) = diag_csc_index { + let l_kk = l_col_k.values()[diag_csc_index]; + + if l_kk != T::zero() { + // Update entry associated with diagonal + x_col_j[k] /= l_kk; + // Copy value after updating (so we don't run into the borrow checker) + let x_kj = x_col_j[k]; + + let row_indices = &l_col_k.row_indices()[(diag_csc_index + 1)..]; + let l_values = &l_col_k.values()[(diag_csc_index + 1)..]; + + // Note: The remaining entries are below the diagonal + for (&i, l_ik) in row_indices.iter().zip(l_values) { + let x_ij = &mut x_col_j[i]; + *x_ij -= l_ik.inlined_clone() * x_kj; + } + + x_col_j[k] = x_kj; + } else { + return spsolve_encountered_zero_diagonal(); + } + } else { + return spsolve_encountered_zero_diagonal(); + } + } + } + + Ok(()) +} + +fn spsolve_encountered_zero_diagonal() -> Result<(), OperationError> { + let message = "Matrix contains at least one diagonal entry that is zero."; + Err(OperationError::from_kind_and_message( + OperationErrorKind::Singular, + String::from(message), + )) +} + +fn spsolve_csc_lower_triangular_transpose( + l: &CscMatrix, + b: DMatrixSliceMut, +) -> Result<(), OperationError> { + let mut x = b; + + // Solve column-by-column + for j in 0..x.ncols() { + let mut x_col_j = x.column_mut(j); + + // Due to the transposition, we're essentially solving an upper triangular system, + // and the columns in our matrix become rows + + for i in (0..l.ncols()).rev() { + let l_col_i = l.col(i); + + // Skip entries above the diagonal + // TODO: Can use exponential search here to quickly skip entries + let diag_csc_index = l_col_i.row_indices().iter().position(|&k| i == k); + if let Some(diag_csc_index) = diag_csc_index { + let l_ii = l_col_i.values()[diag_csc_index]; + + if l_ii != T::zero() { + // // Update entry associated with diagonal + // x_col_j[k] /= a_kk; + + // Copy value after updating (so we don't run into the borrow checker) + let mut x_ii = x_col_j[i]; + + let row_indices = &l_col_i.row_indices()[(diag_csc_index + 1)..]; + let a_values = &l_col_i.values()[(diag_csc_index + 1)..]; + + // Note: The remaining entries are below the diagonal + for (&k, &l_ki) in row_indices.iter().zip(a_values) { + let x_kj = x_col_j[k]; + x_ii -= l_ki * x_kj; + } + + x_col_j[i] = x_ii / l_ii; + } else { + return spsolve_encountered_zero_diagonal(); + } + } else { + return spsolve_encountered_zero_diagonal(); + } + } + } + + Ok(()) +} diff --git a/nalgebra-sparse/src/ops/serial/csr.rs b/nalgebra-sparse/src/ops/serial/csr.rs new file mode 100644 index 00000000..f6fcc62a --- /dev/null +++ b/nalgebra-sparse/src/ops/serial/csr.rs @@ -0,0 +1,106 @@ +use crate::csr::CsrMatrix; +use crate::ops::serial::cs::{spadd_cs_prealloc, spmm_cs_dense, spmm_cs_prealloc}; +use crate::ops::serial::OperationError; +use crate::ops::Op; +use nalgebra::{ClosedAdd, ClosedMul, DMatrixSlice, DMatrixSliceMut, Scalar}; +use num_traits::{One, Zero}; +use std::borrow::Cow; + +/// Sparse-dense matrix-matrix multiplication `C <- beta * C + alpha * op(A) * op(B)`. +pub fn spmm_csr_dense<'a, T>( + beta: T, + c: impl Into>, + alpha: T, + a: Op<&CsrMatrix>, + b: Op>>, +) where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + let b = b.convert(); + spmm_csr_dense_(beta, c.into(), alpha, a, b) +} + +fn spmm_csr_dense_( + beta: T, + c: DMatrixSliceMut, + alpha: T, + a: Op<&CsrMatrix>, + b: Op>, +) where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + assert_compatible_spmm_dims!(c, a, b); + spmm_cs_dense(beta, c, alpha, a.map_same_op(|a| &a.cs), b) +} + +/// Sparse matrix addition `C <- beta * C + alpha * op(A)`. +/// +/// # Errors +/// +/// If the pattern of `c` does not accommodate all the non-zero entries in `a`, an error is +/// returned. +/// +/// # Panics +/// +/// Panics if the dimensions of the matrices involved are not compatible with the expression. +pub fn spadd_csr_prealloc( + beta: T, + c: &mut CsrMatrix, + alpha: T, + a: Op<&CsrMatrix>, +) -> Result<(), OperationError> +where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + assert_compatible_spadd_dims!(c, a); + spadd_cs_prealloc(beta, &mut c.cs, alpha, a.map_same_op(|a| &a.cs)) +} + +/// Sparse-sparse matrix multiplication, `C <- beta * C + alpha * op(A) * op(B)`. +/// +/// # Errors +/// +/// If the pattern of `C` is not able to hold the result of the operation, an error is returned. +/// +/// # Panics +/// +/// Panics if the dimensions of the matrices involved are not compatible with the expression. +pub fn spmm_csr_prealloc( + beta: T, + c: &mut CsrMatrix, + alpha: T, + a: Op<&CsrMatrix>, + b: Op<&CsrMatrix>, +) -> Result<(), OperationError> +where + T: Scalar + ClosedAdd + ClosedMul + Zero + One, +{ + assert_compatible_spmm_dims!(c, a, b); + + use Op::{NoOp, Transpose}; + + match (&a, &b) { + (NoOp(ref a), NoOp(ref b)) => spmm_cs_prealloc(beta, &mut c.cs, alpha, &a.cs, &b.cs), + _ => { + // Currently we handle transposition by explicitly precomputing transposed matrices + // and calling the operation again without transposition + // TODO: At least use workspaces to allow control of allocations. Maybe + // consider implementing certain patterns (like A^T * B) explicitly + let a_ref: &CsrMatrix = a.inner_ref(); + let b_ref: &CsrMatrix = b.inner_ref(); + let (a, b) = { + use Cow::*; + match (&a, &b) { + (NoOp(_), NoOp(_)) => unreachable!(), + (Transpose(ref a), NoOp(_)) => (Owned(a.transpose()), Borrowed(b_ref)), + (NoOp(_), Transpose(ref b)) => (Borrowed(a_ref), Owned(b.transpose())), + (Transpose(ref a), Transpose(ref b)) => { + (Owned(a.transpose()), Owned(b.transpose())) + } + } + }; + + spmm_csr_prealloc(beta, c, alpha, NoOp(a.as_ref()), NoOp(b.as_ref())) + } + } +} diff --git a/nalgebra-sparse/src/ops/serial/mod.rs b/nalgebra-sparse/src/ops/serial/mod.rs new file mode 100644 index 00000000..82285e86 --- /dev/null +++ b/nalgebra-sparse/src/ops/serial/mod.rs @@ -0,0 +1,124 @@ +//! Serial sparse matrix arithmetic routines. +//! +//! All routines are single-threaded. +//! +//! Some operations have the `prealloc` suffix. This means that they expect that the sparsity +//! pattern of the output matrix has already been pre-allocated: that is, the pattern of the result +//! of the operation fits entirely in the output pattern. In the future, there will also be +//! some operations which will be able to dynamically adapt the output pattern to fit the +//! result, but these have yet to be implemented. + +#[macro_use] +macro_rules! assert_compatible_spmm_dims { + ($c:expr, $a:expr, $b:expr) => {{ + use crate::ops::Op::{NoOp, Transpose}; + match (&$a, &$b) { + (NoOp(ref a), NoOp(ref b)) => { + assert_eq!($c.nrows(), a.nrows(), "C.nrows() != A.nrows()"); + assert_eq!($c.ncols(), b.ncols(), "C.ncols() != B.ncols()"); + assert_eq!(a.ncols(), b.nrows(), "A.ncols() != B.nrows()"); + } + (Transpose(ref a), NoOp(ref b)) => { + assert_eq!($c.nrows(), a.ncols(), "C.nrows() != A.ncols()"); + assert_eq!($c.ncols(), b.ncols(), "C.ncols() != B.ncols()"); + assert_eq!(a.nrows(), b.nrows(), "A.nrows() != B.nrows()"); + } + (NoOp(ref a), Transpose(ref b)) => { + assert_eq!($c.nrows(), a.nrows(), "C.nrows() != A.nrows()"); + assert_eq!($c.ncols(), b.nrows(), "C.ncols() != B.nrows()"); + assert_eq!(a.ncols(), b.ncols(), "A.ncols() != B.ncols()"); + } + (Transpose(ref a), Transpose(ref b)) => { + assert_eq!($c.nrows(), a.ncols(), "C.nrows() != A.ncols()"); + assert_eq!($c.ncols(), b.nrows(), "C.ncols() != B.nrows()"); + assert_eq!(a.nrows(), b.ncols(), "A.nrows() != B.ncols()"); + } + } + }}; +} + +#[macro_use] +macro_rules! assert_compatible_spadd_dims { + ($c:expr, $a:expr) => { + use crate::ops::Op; + match $a { + Op::NoOp(a) => { + assert_eq!($c.nrows(), a.nrows(), "C.nrows() != A.nrows()"); + assert_eq!($c.ncols(), a.ncols(), "C.ncols() != A.ncols()"); + } + Op::Transpose(a) => { + assert_eq!($c.nrows(), a.ncols(), "C.nrows() != A.ncols()"); + assert_eq!($c.ncols(), a.nrows(), "C.ncols() != A.nrows()"); + } + } + }; +} + +mod cs; +mod csc; +mod csr; +mod pattern; + +pub use csc::*; +pub use csr::*; +pub use pattern::*; +use std::fmt; +use std::fmt::Formatter; + +/// A description of the error that occurred during an arithmetic operation. +#[derive(Clone, Debug)] +pub struct OperationError { + error_kind: OperationErrorKind, + message: String, +} + +/// The different kinds of operation errors that may occur. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum OperationErrorKind { + /// Indicates that one or more sparsity patterns involved in the operation violate the + /// expectations of the routine. + /// + /// For example, this could indicate that the sparsity pattern of the output is not able to + /// contain the result of the operation. + InvalidPattern, + + /// Indicates that a matrix is singular when it is expected to be invertible. + Singular, +} + +impl OperationError { + fn from_kind_and_message(error_type: OperationErrorKind, message: String) -> Self { + Self { + error_kind: error_type, + message, + } + } + + /// The operation error kind. + pub fn kind(&self) -> &OperationErrorKind { + &self.error_kind + } + + /// The underlying error message. + pub fn message(&self) -> &str { + self.message.as_str() + } +} + +impl fmt::Display for OperationError { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "Sparse matrix operation error: ")?; + match self.kind() { + OperationErrorKind::InvalidPattern => { + write!(f, "InvalidPattern")?; + } + OperationErrorKind::Singular => { + write!(f, "Singular")?; + } + } + write!(f, " Message: {}", self.message) + } +} + +impl std::error::Error for OperationError {} diff --git a/nalgebra-sparse/src/ops/serial/pattern.rs b/nalgebra-sparse/src/ops/serial/pattern.rs new file mode 100644 index 00000000..b73f3375 --- /dev/null +++ b/nalgebra-sparse/src/ops/serial/pattern.rs @@ -0,0 +1,152 @@ +use crate::pattern::SparsityPattern; + +use std::iter; + +/// Sparse matrix addition pattern construction, `C <- A + B`. +/// +/// Builds the pattern for `C`, which is able to hold the result of the sum `A + B`. +/// The patterns are assumed to have the same major and minor dimensions. In other words, +/// both patterns `A` and `B` must both stem from the same kind of compressed matrix: +/// CSR or CSC. +/// +/// # Panics +/// +/// Panics if the patterns do not have the same major and minor dimensions. +pub fn spadd_pattern(a: &SparsityPattern, b: &SparsityPattern) -> SparsityPattern { + assert_eq!( + a.major_dim(), + b.major_dim(), + "Patterns must have identical major dimensions." + ); + assert_eq!( + a.minor_dim(), + b.minor_dim(), + "Patterns must have identical minor dimensions." + ); + + let mut offsets = Vec::new(); + let mut indices = Vec::new(); + offsets.reserve(a.major_dim() + 1); + indices.clear(); + + offsets.push(0); + + for lane_idx in 0..a.major_dim() { + let lane_a = a.lane(lane_idx); + let lane_b = b.lane(lane_idx); + indices.extend(iterate_union(lane_a, lane_b)); + offsets.push(indices.len()); + } + + // TODO: Consider circumventing format checks? (requires unsafe, should benchmark first) + SparsityPattern::try_from_offsets_and_indices(a.major_dim(), a.minor_dim(), offsets, indices) + .expect("Internal error: Pattern must be valid by definition") +} + +/// Sparse matrix multiplication pattern construction, `C <- A * B`. +/// +/// Assumes that the sparsity patterns both represent CSC matrices, and the result is also +/// represented as the sparsity pattern of a CSC matrix. +/// +/// # Panics +/// +/// Panics if the patterns, when interpreted as CSC patterns, are not compatible for +/// matrix multiplication. +pub fn spmm_csc_pattern(a: &SparsityPattern, b: &SparsityPattern) -> SparsityPattern { + // Let C = A * B in CSC format. We note that + // C^T = B^T * A^T. + // Since the interpretation of a CSC matrix in CSR format represents the transpose of the + // matrix in CSR, we can compute C^T in *CSR format* by switching the order of a and b, + // which lets us obtain C^T in CSR format. Re-interpreting this as CSC gives us C in CSC format + spmm_csr_pattern(b, a) +} + +/// Sparse matrix multiplication pattern construction, `C <- A * B`. +/// +/// Assumes that the sparsity patterns both represent CSR matrices, and the result is also +/// represented as the sparsity pattern of a CSR matrix. +/// +/// # Panics +/// +/// Panics if the patterns, when interpreted as CSR patterns, are not compatible for +/// matrix multiplication. +pub fn spmm_csr_pattern(a: &SparsityPattern, b: &SparsityPattern) -> SparsityPattern { + assert_eq!( + a.minor_dim(), + b.major_dim(), + "a and b must have compatible dimensions" + ); + + let mut offsets = Vec::new(); + let mut indices = Vec::new(); + offsets.push(0); + + // Keep a vector of whether we have visited a particular minor index when working + // on a major lane + // TODO: Consider using a bitvec or similar here to reduce pressure on memory + // (would cut memory use to 1/8, which might help reduce cache misses) + let mut visited = vec![false; b.minor_dim()]; + + for i in 0..a.major_dim() { + let a_lane_i = a.lane(i); + let c_lane_i_offset = *offsets.last().unwrap(); + for &k in a_lane_i { + let b_lane_k = b.lane(k); + + for &j in b_lane_k { + let have_visited_j = &mut visited[j]; + if !*have_visited_j { + indices.push(j); + *have_visited_j = true; + } + } + } + + let c_lane_i = &mut indices[c_lane_i_offset..]; + c_lane_i.sort_unstable(); + + // Reset visits so that visited[j] == false for all j for the next major lane + for j in c_lane_i { + visited[*j] = false; + } + + offsets.push(indices.len()); + } + + SparsityPattern::try_from_offsets_and_indices(a.major_dim(), b.minor_dim(), offsets, indices) + .expect("Internal error: Invalid pattern during matrix multiplication pattern construction") +} + +/// Iterate over the union of the two sets represented by sorted slices +/// (with unique elements) +fn iterate_union<'a>( + mut sorted_a: &'a [usize], + mut sorted_b: &'a [usize], +) -> impl Iterator + 'a { + iter::from_fn(move || { + if let (Some(a_item), Some(b_item)) = (sorted_a.first(), sorted_b.first()) { + let item = if a_item < b_item { + sorted_a = &sorted_a[1..]; + a_item + } else if b_item < a_item { + sorted_b = &sorted_b[1..]; + b_item + } else { + // Both lists contain the same element, advance both slices to avoid + // duplicate entries in the result + sorted_a = &sorted_a[1..]; + sorted_b = &sorted_b[1..]; + a_item + }; + Some(*item) + } else if let Some(a_item) = sorted_a.first() { + sorted_a = &sorted_a[1..]; + Some(*a_item) + } else if let Some(b_item) = sorted_b.first() { + sorted_b = &sorted_b[1..]; + Some(*b_item) + } else { + None + } + }) +} diff --git a/nalgebra-sparse/src/pattern.rs b/nalgebra-sparse/src/pattern.rs new file mode 100644 index 00000000..08552d6a --- /dev/null +++ b/nalgebra-sparse/src/pattern.rs @@ -0,0 +1,393 @@ +//! Sparsity patterns for CSR and CSC matrices. +use crate::cs::transpose_cs; +use crate::SparseFormatError; +use std::error::Error; +use std::fmt; + +/// A representation of the sparsity pattern of a CSR or CSC matrix. +/// +/// CSR and CSC matrices store matrices in a very similar fashion. In fact, in a certain sense, +/// they are transposed. More precisely, when reinterpreting the three data arrays of a CSR +/// matrix as a CSC matrix, we obtain the CSC representation of its transpose. +/// +/// [`SparsityPattern`] is an abstraction built on this observation. Whereas CSR matrices +/// store a matrix row-by-row, and a CSC matrix stores a matrix column-by-column, a +/// `SparsityPattern` represents only the index data structure of a matrix *lane-by-lane*. +/// Here, a *lane* is a generalization of rows and columns. We further define *major lanes* +/// and *minor lanes*. The sparsity pattern of a CSR matrix is then obtained by interpreting +/// major/minor as row/column. Conversely, we obtain the sparsity pattern of a CSC matrix by +/// interpreting major/minor as column/row. +/// +/// This allows us to use a common abstraction to talk about sparsity patterns of CSR and CSC +/// matrices. This is convenient, because at the abstract level, the invariants of the formats +/// are the same. Hence we may encode the invariants of the index data structure separately from +/// the scalar values of the matrix. This is especially useful in applications where the +/// sparsity pattern is built ahead of the matrix values, or the same sparsity pattern is re-used +/// between different matrices. Finally, we can use `SparsityPattern` to encode adjacency +/// information in graphs. +/// +/// # Format +/// +/// The format is exactly the same as for the index data structures of CSR and CSC matrices. +/// This means that the sparsity pattern of an `m x n` sparse matrix with `nnz` non-zeros, +/// where in this case `m x n` does *not* mean `rows x columns`, but rather `majors x minors`, +/// is represented by the following two arrays: +/// +/// - `major_offsets`, an array of integers with length `m + 1`. +/// - `minor_indices`, an array of integers with length `nnz`. +/// +/// The invariants and relationship between `major_offsets` and `minor_indices` remain the same +/// as for `row_offsets` and `col_indices` in the [CSR](`crate::csr::CsrMatrix`) format +/// specification. +#[derive(Debug, Clone, PartialEq, Eq)] +// TODO: Make SparsityPattern parametrized by index type +// (need a solid abstraction for index types though) +pub struct SparsityPattern { + major_offsets: Vec, + minor_indices: Vec, + minor_dim: usize, +} + +impl SparsityPattern { + /// Create a sparsity pattern of the given dimensions without explicitly stored entries. + pub fn zeros(major_dim: usize, minor_dim: usize) -> Self { + Self { + major_offsets: vec![0; major_dim + 1], + minor_indices: vec![], + minor_dim, + } + } + + /// The offsets for the major dimension. + #[inline] + pub fn major_offsets(&self) -> &[usize] { + &self.major_offsets + } + + /// The indices for the minor dimension. + #[inline] + pub fn minor_indices(&self) -> &[usize] { + &self.minor_indices + } + + /// The number of major lanes in the pattern. + #[inline] + pub fn major_dim(&self) -> usize { + assert!(self.major_offsets.len() > 0); + self.major_offsets.len() - 1 + } + + /// The number of minor lanes in the pattern. + #[inline] + pub fn minor_dim(&self) -> usize { + self.minor_dim + } + + /// The number of "non-zeros", i.e. explicitly stored entries in the pattern. + #[inline] + pub fn nnz(&self) -> usize { + self.minor_indices.len() + } + + /// Get the lane at the given index. + /// + /// Panics + /// ------ + /// + /// Panics if `major_index` is out of bounds. + #[inline] + pub fn lane(&self, major_index: usize) -> &[usize] { + self.get_lane(major_index).unwrap() + } + + /// Get the lane at the given index, or `None` if out of bounds. + #[inline] + pub fn get_lane(&self, major_index: usize) -> Option<&[usize]> { + let offset_begin = *self.major_offsets().get(major_index)?; + let offset_end = *self.major_offsets().get(major_index + 1)?; + Some(&self.minor_indices()[offset_begin..offset_end]) + } + + /// Try to construct a sparsity pattern from the given dimensions, major offsets + /// and minor indices. + /// + /// Returns an error if the data does not conform to the requirements. + pub fn try_from_offsets_and_indices( + major_dim: usize, + minor_dim: usize, + major_offsets: Vec, + minor_indices: Vec, + ) -> Result { + use SparsityPatternFormatError::*; + + if major_offsets.len() != major_dim + 1 { + return Err(InvalidOffsetArrayLength); + } + + // Check that the first and last offsets conform to the specification + { + let first_offset_ok = *major_offsets.first().unwrap() == 0; + let last_offset_ok = *major_offsets.last().unwrap() == minor_indices.len(); + if !first_offset_ok || !last_offset_ok { + return Err(InvalidOffsetFirstLast); + } + } + + // Test that each lane has strictly monotonically increasing minor indices, i.e. + // minor indices within a lane are sorted, unique. In addition, each minor index + // must be in bounds with respect to the minor dimension. + { + for lane_idx in 0..major_dim { + let range_start = major_offsets[lane_idx]; + let range_end = major_offsets[lane_idx + 1]; + + // Test that major offsets are monotonically increasing + if range_start > range_end { + return Err(NonmonotonicOffsets); + } + + let minor_indices = &minor_indices[range_start..range_end]; + + // We test for in-bounds, uniqueness and monotonicity at the same time + // to ensure that we only visit each minor index once + let mut iter = minor_indices.iter(); + let mut prev = None; + + while let Some(next) = iter.next().copied() { + if next >= minor_dim { + return Err(MinorIndexOutOfBounds); + } + + if let Some(prev) = prev { + if prev > next { + return Err(NonmonotonicMinorIndices); + } else if prev == next { + return Err(DuplicateEntry); + } + } + prev = Some(next); + } + } + } + + Ok(Self { + major_offsets, + minor_indices, + minor_dim, + }) + } + + /// An iterator over the explicitly stored "non-zero" entries (i, j). + /// + /// The iteration happens in a lane-major fashion, meaning that the lane index i + /// increases monotonically, and the minor index j increases monotonically within each + /// lane i. + /// + /// Examples + /// -------- + /// + /// ``` + /// # use nalgebra_sparse::pattern::SparsityPattern; + /// let offsets = vec![0, 2, 3, 4]; + /// let minor_indices = vec![0, 2, 1, 0]; + /// let pattern = SparsityPattern::try_from_offsets_and_indices(3, 4, offsets, minor_indices) + /// .unwrap(); + /// + /// let entries: Vec<_> = pattern.entries().collect(); + /// assert_eq!(entries, vec![(0, 0), (0, 2), (1, 1), (2, 0)]); + /// ``` + /// + pub fn entries(&self) -> SparsityPatternIter { + SparsityPatternIter::from_pattern(self) + } + + /// Returns the raw offset and index data for the sparsity pattern. + /// + /// Examples + /// -------- + /// + /// ``` + /// # use nalgebra_sparse::pattern::SparsityPattern; + /// let offsets = vec![0, 2, 3, 4]; + /// let minor_indices = vec![0, 2, 1, 0]; + /// let pattern = SparsityPattern::try_from_offsets_and_indices( + /// 3, + /// 4, + /// offsets.clone(), + /// minor_indices.clone()) + /// .unwrap(); + /// let (offsets2, minor_indices2) = pattern.disassemble(); + /// assert_eq!(offsets2, offsets); + /// assert_eq!(minor_indices2, minor_indices); + /// ``` + pub fn disassemble(self) -> (Vec, Vec) { + (self.major_offsets, self.minor_indices) + } + + /// Computes the transpose of the sparsity pattern. + /// + /// This is analogous to matrix transposition, i.e. an entry `(i, j)` becomes `(j, i)` in the + /// new pattern. + pub fn transpose(&self) -> Self { + // By using unit () values, we can use the same routines as for CSR/CSC matrices + let values = vec![(); self.nnz()]; + let (new_offsets, new_indices, _) = transpose_cs( + self.major_dim(), + self.minor_dim(), + self.major_offsets(), + self.minor_indices(), + &values, + ); + // TODO: Skip checks + Self::try_from_offsets_and_indices( + self.minor_dim(), + self.major_dim(), + new_offsets, + new_indices, + ) + .expect("Internal error: Transpose should never fail.") + } +} + +/// Error type for `SparsityPattern` format errors. +#[non_exhaustive] +#[derive(Debug, PartialEq, Eq)] +pub enum SparsityPatternFormatError { + /// Indicates an invalid number of offsets. + /// + /// The number of offsets must be equal to (major_dim + 1). + InvalidOffsetArrayLength, + /// Indicates that the first or last entry in the offset array did not conform to + /// specifications. + /// + /// The first entry must be 0, and the last entry must be exactly one greater than the + /// major dimension. + InvalidOffsetFirstLast, + /// Indicates that the major offsets are not monotonically increasing. + NonmonotonicOffsets, + /// One or more minor indices are out of bounds. + MinorIndexOutOfBounds, + /// One or more duplicate entries were detected. + /// + /// Two entries are considered duplicates if they are part of the same major lane and have + /// the same minor index. + DuplicateEntry, + /// Indicates that minor indices are not monotonically increasing within each lane. + NonmonotonicMinorIndices, +} + +impl From for SparseFormatError { + fn from(err: SparsityPatternFormatError) -> Self { + use crate::SparseFormatErrorKind; + use crate::SparseFormatErrorKind::*; + use SparsityPatternFormatError::DuplicateEntry as PatternDuplicateEntry; + use SparsityPatternFormatError::*; + match err { + InvalidOffsetArrayLength + | InvalidOffsetFirstLast + | NonmonotonicOffsets + | NonmonotonicMinorIndices => { + SparseFormatError::from_kind_and_error(InvalidStructure, Box::from(err)) + } + MinorIndexOutOfBounds => { + SparseFormatError::from_kind_and_error(IndexOutOfBounds, Box::from(err)) + } + PatternDuplicateEntry => SparseFormatError::from_kind_and_error( + #[allow(unused_qualifications)] + SparseFormatErrorKind::DuplicateEntry, + Box::from(err), + ), + } + } +} + +impl fmt::Display for SparsityPatternFormatError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SparsityPatternFormatError::InvalidOffsetArrayLength => { + write!(f, "Length of offset array is not equal to (major_dim + 1).") + } + SparsityPatternFormatError::InvalidOffsetFirstLast => { + write!(f, "First or last offset is incompatible with format.") + } + SparsityPatternFormatError::NonmonotonicOffsets => { + write!(f, "Offsets are not monotonically increasing.") + } + SparsityPatternFormatError::MinorIndexOutOfBounds => { + write!(f, "A minor index is out of bounds.") + } + SparsityPatternFormatError::DuplicateEntry => { + write!(f, "Input data contains duplicate entries.") + } + SparsityPatternFormatError::NonmonotonicMinorIndices => { + write!( + f, + "Minor indices are not monotonically increasing within each lane." + ) + } + } + } +} + +impl Error for SparsityPatternFormatError {} + +/// Iterator type for iterating over entries in a sparsity pattern. +#[derive(Debug, Clone)] +pub struct SparsityPatternIter<'a> { + // See implementation of Iterator::next for an explanation of how these members are used + major_offsets: &'a [usize], + minor_indices: &'a [usize], + current_lane_idx: usize, + remaining_minors_in_lane: &'a [usize], +} + +impl<'a> SparsityPatternIter<'a> { + fn from_pattern(pattern: &'a SparsityPattern) -> Self { + let first_lane_end = pattern.major_offsets().get(1).unwrap_or(&0); + let minors_in_first_lane = &pattern.minor_indices()[0..*first_lane_end]; + Self { + major_offsets: pattern.major_offsets(), + minor_indices: pattern.minor_indices(), + current_lane_idx: 0, + remaining_minors_in_lane: minors_in_first_lane, + } + } +} + +impl<'a> Iterator for SparsityPatternIter<'a> { + type Item = (usize, usize); + + #[inline] + fn next(&mut self) -> Option { + // We ensure fast iteration across each lane by iteratively "draining" a slice + // corresponding to the remaining column indices in the particular lane. + // When we reach the end of this slice, we are at the end of a lane, + // and we must do some bookkeeping for preparing the iteration of the next lane + // (or stop iteration if we're through all lanes). + // This way we can avoid doing unnecessary bookkeeping on every iteration, + // instead paying a small price whenever we jump to a new lane. + if let Some(minor_idx) = self.remaining_minors_in_lane.first() { + let item = Some((self.current_lane_idx, *minor_idx)); + self.remaining_minors_in_lane = &self.remaining_minors_in_lane[1..]; + item + } else { + loop { + // Keep skipping lanes until we found a non-empty lane or there are no more lanes + if self.current_lane_idx + 2 >= self.major_offsets.len() { + // We've processed all lanes, so we're at the end of the iterator + // (note: keep in mind that offsets.len() == major_dim() + 1, hence we need +2) + return None; + } else { + // Bump lane index and check if the lane is non-empty + self.current_lane_idx += 1; + let lower = self.major_offsets[self.current_lane_idx]; + let upper = self.major_offsets[self.current_lane_idx + 1]; + if upper > lower { + self.remaining_minors_in_lane = &self.minor_indices[(lower + 1)..upper]; + return Some((self.current_lane_idx, self.minor_indices[lower])); + } + } + } + } + } +} diff --git a/nalgebra-sparse/src/proptest.rs b/nalgebra-sparse/src/proptest.rs new file mode 100644 index 00000000..472c466f --- /dev/null +++ b/nalgebra-sparse/src/proptest.rs @@ -0,0 +1,374 @@ +//! Functionality for integrating `nalgebra-sparse` with `proptest`. +//! +//! **This module is only available if the `proptest-support` feature is enabled**. +//! +//! The strategies provided here are generally expected to be able to generate the entire range +//! of possible outputs given the constraints on dimensions and values. However, there are no +//! particular guarantees on the distribution of possible values. + +// Contains some patched code from proptest that we can remove in the (hopefully near) future. +// See docs in file for more details. +mod proptest_patched; + +use crate::coo::CooMatrix; +use crate::csc::CscMatrix; +use crate::csr::CsrMatrix; +use crate::pattern::SparsityPattern; +use nalgebra::proptest::DimRange; +use nalgebra::{Dim, Scalar}; +use proptest::collection::{btree_set, hash_map, vec}; +use proptest::prelude::*; +use proptest::sample::Index; +use std::cmp::min; +use std::iter::repeat; + +fn dense_row_major_coord_strategy( + nrows: usize, + ncols: usize, + nnz: usize, +) -> impl Strategy> { + assert!(nnz <= nrows * ncols); + let mut booleans = vec![true; nnz]; + booleans.append(&mut vec![false; (nrows * ncols) - nnz]); + // Make sure that exactly `nnz` of the booleans are true + + // TODO: We cannot use the below code because of a bug in proptest, see + // https://github.com/AltSysrq/proptest/pull/217 + // so for now we're using a patched version of the Shuffle adapter + // (see also docs in `proptest_patched` + // Just(booleans) + // // Need to shuffle to make sure they are randomly distributed + // .prop_shuffle() + + proptest_patched::Shuffle(Just(booleans)).prop_map(move |booleans| { + booleans + .into_iter() + .enumerate() + .filter_map(|(index, is_entry)| { + if is_entry { + // Convert linear index to row/col pair + let i = index / ncols; + let j = index % ncols; + Some((i, j)) + } else { + None + } + }) + .collect::>() + }) +} + +/// A strategy for generating `nnz` triplets. +/// +/// This strategy should generally only be used when `nnz` is close to `nrows * ncols`. +fn dense_triplet_strategy( + value_strategy: T, + nrows: usize, + ncols: usize, + nnz: usize, +) -> impl Strategy> +where + T: Strategy + Clone + 'static, + T::Value: Scalar, +{ + assert!(nnz <= nrows * ncols); + + // Construct a number of booleans of which exactly `nnz` are true. + let booleans: Vec<_> = repeat(true) + .take(nnz) + .chain(repeat(false)) + .take(nrows * ncols) + .collect(); + + Just(booleans) + // Shuffle the booleans so that they are randomly distributed + .prop_shuffle() + // Convert the booleans into a list of coordinate pairs + .prop_map(move |booleans| { + booleans + .into_iter() + .enumerate() + .filter_map(|(index, is_entry)| { + if is_entry { + // Convert linear index to row/col pair + let i = index / ncols; + let j = index % ncols; + Some((i, j)) + } else { + None + } + }) + .collect::>() + }) + // Assign values to each coordinate pair in order to generate a list of triplets + .prop_flat_map(move |coords| { + vec![value_strategy.clone(); coords.len()].prop_map(move |values| { + coords + .clone() + .into_iter() + .zip(values) + .map(|((i, j), v)| (i, j, v)) + .collect::>() + }) + }) +} + +/// A strategy for generating `nnz` triplets. +/// +/// This strategy should generally only be used when `nnz << nrows * ncols`. If `nnz` is too +/// close to `nrows * ncols` it may fail due to excessive rejected samples. +fn sparse_triplet_strategy( + value_strategy: T, + nrows: usize, + ncols: usize, + nnz: usize, +) -> impl Strategy> +where + T: Strategy + Clone + 'static, + T::Value: Scalar, +{ + // Have to handle the zero case: proptest doesn't like empty ranges (i.e. 0 .. 0) + let row_index_strategy = if nrows > 0 { 0..nrows } else { 0..1 }; + let col_index_strategy = if ncols > 0 { 0..ncols } else { 0..1 }; + let coord_strategy = (row_index_strategy, col_index_strategy); + hash_map(coord_strategy, value_strategy.clone(), nnz) + .prop_map(|hash_map| { + let triplets: Vec<_> = hash_map.into_iter().map(|((i, j), v)| (i, j, v)).collect(); + triplets + }) + // Although order in the hash map is unspecified, it's not necessarily *random* + // - or, in particular, it does not necessarily sample the whole space of possible outcomes - + // so we additionally shuffle the triplets + .prop_shuffle() +} + +/// A strategy for producing COO matrices without duplicate entries. +/// +/// The values of the matrix are picked from the provided `value_strategy`, while the size of the +/// generated matrices is determined by the ranges `rows` and `cols`. The number of explicitly +/// stored entries is bounded from above by `max_nonzeros`. Note that the matrix might still +/// contain explicitly stored zeroes if the value strategy is capable of generating zero values. +pub fn coo_no_duplicates( + value_strategy: T, + rows: impl Into, + cols: impl Into, + max_nonzeros: usize, +) -> impl Strategy> +where + T: Strategy + Clone + 'static, + T::Value: Scalar, +{ + ( + rows.into().to_range_inclusive(), + cols.into().to_range_inclusive(), + ) + .prop_flat_map(move |(nrows, ncols)| { + let max_nonzeros = min(max_nonzeros, nrows * ncols); + let size_range = 0..=max_nonzeros; + let value_strategy = value_strategy.clone(); + + size_range + .prop_flat_map(move |nnz| { + let value_strategy = value_strategy.clone(); + if nnz as f64 > 0.10 * (nrows as f64) * (ncols as f64) { + // If the number of nnz is sufficiently dense, then use the dense + // sample strategy + dense_triplet_strategy(value_strategy, nrows, ncols, nnz).boxed() + } else { + // Otherwise, use a hash map strategy so that we can get a sparse sampling + // (so that complexity is rather on the order of max_nnz than nrows * ncols) + sparse_triplet_strategy(value_strategy, nrows, ncols, nnz).boxed() + } + }) + .prop_map(move |triplets| { + let mut coo = CooMatrix::new(nrows, ncols); + for (i, j, v) in triplets { + coo.push(i, j, v); + } + coo + }) + }) +} + +/// A strategy for producing COO matrices with duplicate entries. +/// +/// The values of the matrix are picked from the provided `value_strategy`, while the size of the +/// generated matrices is determined by the ranges `rows` and `cols`. Note that the values +/// only apply to individual entries, and since this strategy can generate duplicate entries, +/// the matrix will generally have values outside the range determined by `value_strategy` when +/// converted to other formats, since the duplicate entries are summed together in this case. +/// +/// The number of explicitly stored entries is bounded from above by `max_nonzeros`. The maximum +/// number of duplicate entries is determined by `max_duplicates`. Note that the matrix might still +/// contain explicitly stored zeroes if the value strategy is capable of generating zero values. +pub fn coo_with_duplicates( + value_strategy: T, + rows: impl Into, + cols: impl Into, + max_nonzeros: usize, + max_duplicates: usize, +) -> impl Strategy> +where + T: Strategy + Clone + 'static, + T::Value: Scalar, +{ + let coo_strategy = coo_no_duplicates(value_strategy.clone(), rows, cols, max_nonzeros); + let duplicate_strategy = vec((any::(), value_strategy.clone()), 0..=max_duplicates); + (coo_strategy, duplicate_strategy) + .prop_flat_map(|(coo, duplicates)| { + let mut triplets: Vec<(usize, usize, T::Value)> = coo + .triplet_iter() + .map(|(i, j, v)| (i, j, v.clone())) + .collect(); + if !triplets.is_empty() { + let duplicates_iter: Vec<_> = duplicates + .into_iter() + .map(|(idx, val)| { + let (i, j, _) = idx.get(&triplets); + (*i, *j, val) + }) + .collect(); + triplets.extend(duplicates_iter); + } + // Make sure to shuffle so that the duplicates get mixed in with the non-duplicates + let shuffled = Just(triplets).prop_shuffle(); + (Just(coo.nrows()), Just(coo.ncols()), shuffled) + }) + .prop_map(move |(nrows, ncols, triplets)| { + let mut coo = CooMatrix::new(nrows, ncols); + for (i, j, v) in triplets { + coo.push(i, j, v); + } + coo + }) +} + +fn sparsity_pattern_from_row_major_coords( + nmajor: usize, + nminor: usize, + coords: I, +) -> SparsityPattern +where + I: Iterator + ExactSizeIterator, +{ + let mut minors = Vec::with_capacity(coords.len()); + let mut offsets = Vec::with_capacity(nmajor + 1); + let mut current_major = 0; + offsets.push(0); + for (idx, (i, j)) in coords.enumerate() { + assert!(i >= current_major); + assert!( + i < nmajor && j < nminor, + "Generated coords are out of bounds" + ); + while current_major < i { + offsets.push(idx); + current_major += 1; + } + minors.push(j); + } + + while current_major < nmajor { + offsets.push(minors.len()); + current_major += 1; + } + + assert_eq!(offsets.first().unwrap(), &0); + assert_eq!(offsets.len(), nmajor + 1); + + SparsityPattern::try_from_offsets_and_indices(nmajor, nminor, offsets, minors) + .expect("Internal error: Generated sparsity pattern is invalid") +} + +/// A strategy for generating sparsity patterns. +pub fn sparsity_pattern( + major_lanes: impl Into, + minor_lanes: impl Into, + max_nonzeros: usize, +) -> impl Strategy { + ( + major_lanes.into().to_range_inclusive(), + minor_lanes.into().to_range_inclusive(), + ) + .prop_flat_map(move |(nmajor, nminor)| { + let max_nonzeros = min(nmajor * nminor, max_nonzeros); + (Just(nmajor), Just(nminor), 0..=max_nonzeros) + }) + .prop_flat_map(move |(nmajor, nminor, nnz)| { + if 10 * nnz < nmajor * nminor { + // If nnz is small compared to a dense matrix, then use a sparse sampling strategy + btree_set((0..nmajor, 0..nminor), nnz) + .prop_map(move |coords| { + sparsity_pattern_from_row_major_coords(nmajor, nminor, coords.into_iter()) + }) + .boxed() + } else { + // If the required number of nonzeros is sufficiently dense, + // we instead use a dense sampling + dense_row_major_coord_strategy(nmajor, nminor, nnz) + .prop_map(move |coords| { + let coords = coords.into_iter(); + sparsity_pattern_from_row_major_coords(nmajor, nminor, coords) + }) + .boxed() + } + }) +} + +/// A strategy for generating CSR matrices. +pub fn csr( + value_strategy: T, + rows: impl Into, + cols: impl Into, + max_nonzeros: usize, +) -> impl Strategy> +where + T: Strategy + Clone + 'static, + T::Value: Scalar, +{ + let rows = rows.into(); + let cols = cols.into(); + sparsity_pattern( + rows.lower_bound().value()..=rows.upper_bound().value(), + cols.lower_bound().value()..=cols.upper_bound().value(), + max_nonzeros, + ) + .prop_flat_map(move |pattern| { + let nnz = pattern.nnz(); + let values = vec![value_strategy.clone(); nnz]; + (Just(pattern), values) + }) + .prop_map(|(pattern, values)| { + CsrMatrix::try_from_pattern_and_values(pattern, values) + .expect("Internal error: Generated CsrMatrix is invalid") + }) +} + +/// A strategy for generating CSC matrices. +pub fn csc( + value_strategy: T, + rows: impl Into, + cols: impl Into, + max_nonzeros: usize, +) -> impl Strategy> +where + T: Strategy + Clone + 'static, + T::Value: Scalar, +{ + let rows = rows.into(); + let cols = cols.into(); + sparsity_pattern( + cols.lower_bound().value()..=cols.upper_bound().value(), + rows.lower_bound().value()..=rows.upper_bound().value(), + max_nonzeros, + ) + .prop_flat_map(move |pattern| { + let nnz = pattern.nnz(); + let values = vec![value_strategy.clone(); nnz]; + (Just(pattern), values) + }) + .prop_map(|(pattern, values)| { + CscMatrix::try_from_pattern_and_values(pattern, values) + .expect("Internal error: Generated CscMatrix is invalid") + }) +} diff --git a/nalgebra-sparse/src/proptest/proptest_patched.rs b/nalgebra-sparse/src/proptest/proptest_patched.rs new file mode 100644 index 00000000..37c71262 --- /dev/null +++ b/nalgebra-sparse/src/proptest/proptest_patched.rs @@ -0,0 +1,146 @@ +//! Contains a modified implementation of `proptest::strategy::Shuffle`. +//! +//! The current implementation in `proptest` does not generate all permutations, which is +//! problematic for our proptest generators. The issue has been fixed in +//! https://github.com/AltSysrq/proptest/pull/217 +//! but it has yet to be merged and released. As soon as this fix makes it into a new release, +//! the modified code here can be removed. +//! +/*! + This code has been copied and adapted from + https://github.com/AltSysrq/proptest/blob/master/proptest/src/strategy/shuffle.rs + The original licensing text is: + + //- + // Copyright 2017 Jason Lingle + // + // Licensed under the Apache License, Version 2.0 or the MIT license + // , at your + // option. This file may not be copied, modified, or distributed + // except according to those terms. + +*/ + +use proptest::num; +use proptest::prelude::Rng; +use proptest::strategy::{NewTree, Shuffleable, Strategy, ValueTree}; +use proptest::test_runner::{TestRng, TestRunner}; +use std::cell::Cell; + +#[derive(Clone, Debug)] +#[must_use = "strategies do nothing unless used"] +pub struct Shuffle(pub(super) S); + +impl Strategy for Shuffle +where + S::Value: Shuffleable, +{ + type Tree = ShuffleValueTree; + type Value = S::Value; + + fn new_tree(&self, runner: &mut TestRunner) -> NewTree { + let rng = runner.new_rng(); + + self.0.new_tree(runner).map(|inner| ShuffleValueTree { + inner, + rng, + dist: Cell::new(None), + simplifying_inner: false, + }) + } +} + +#[derive(Clone, Debug)] +pub struct ShuffleValueTree { + inner: V, + rng: TestRng, + dist: Cell>, + simplifying_inner: bool, +} + +impl ShuffleValueTree +where + V::Value: Shuffleable, +{ + fn init_dist(&self, dflt: usize) -> usize { + if self.dist.get().is_none() { + self.dist.set(Some(num::usize::BinarySearch::new(dflt))); + } + + self.dist.get().unwrap().current() + } + + fn force_init_dist(&self) { + if self.dist.get().is_none() { + let _ = self.init_dist(self.current().shuffle_len()); + } + } +} + +impl ValueTree for ShuffleValueTree +where + V::Value: Shuffleable, +{ + type Value = V::Value; + + fn current(&self) -> V::Value { + let mut value = self.inner.current(); + let len = value.shuffle_len(); + // The maximum distance to swap elements. This could be larger than + // `value` if `value` has reduced size during shrinking; that's OK, + // since we only use this to filter swaps. + let max_swap = self.init_dist(len); + + // If empty collection or all swaps will be filtered out, there's + // nothing to shuffle. + if 0 == len || 0 == max_swap { + return value; + } + + let mut rng = self.rng.clone(); + + for start_index in 0..len - 1 { + // Determine the other index to be swapped, then skip the swap if + // it is too far. This ordering is critical, as it ensures that we + // generate the same sequence of random numbers every time. + + // NOTE: The below line is the whole reason for the existence of this adapted code + // We need to be able to swap with the same element, so that some elements remain in + // place rather being swapped + // let end_index = rng.gen_range(start_index + 1..len); + let end_index = rng.gen_range(start_index..len); + if end_index - start_index <= max_swap { + value.shuffle_swap(start_index, end_index); + } + } + + value + } + + fn simplify(&mut self) -> bool { + if self.simplifying_inner { + self.inner.simplify() + } else { + // Ensure that we've initialised `dist` to *something* to give + // consistent non-panicking behaviour even if called in an + // unexpected sequence. + self.force_init_dist(); + if self.dist.get_mut().as_mut().unwrap().simplify() { + true + } else { + self.simplifying_inner = true; + self.inner.simplify() + } + } + } + + fn complicate(&mut self) -> bool { + if self.simplifying_inner { + self.inner.complicate() + } else { + self.force_init_dist(); + self.dist.get_mut().as_mut().unwrap().complicate() + } + } +} diff --git a/nalgebra-sparse/tests/common/mod.rs b/nalgebra-sparse/tests/common/mod.rs new file mode 100644 index 00000000..053fe756 --- /dev/null +++ b/nalgebra-sparse/tests/common/mod.rs @@ -0,0 +1,77 @@ +use nalgebra_sparse::csc::CscMatrix; +use nalgebra_sparse::csr::CsrMatrix; +use nalgebra_sparse::proptest::{csc, csr}; +use proptest::strategy::Strategy; +use std::convert::TryFrom; +use std::fmt::Debug; +use std::ops::RangeInclusive; + +#[macro_export] +macro_rules! assert_panics { + ($e:expr) => {{ + use std::panic::catch_unwind; + use std::stringify; + let expr_string = stringify!($e); + + // Note: We cannot manipulate the panic hook here, because it is global and the test + // suite is run in parallel, which leads to race conditions in the sense + // that some regular tests that panic might not output anything anymore. + // Unfortunately this means that output is still printed to stdout if + // we run cargo test -- --nocapture. But Cargo does not forward this if the test + // binary is not run with nocapture, so it is somewhat acceptable nonetheless. + + let result = catch_unwind(|| $e); + if result.is_ok() { + panic!( + "assert_panics!({}) failed: the expression did not panic.", + expr_string + ); + } + }}; +} + +pub const PROPTEST_MATRIX_DIM: RangeInclusive = 0..=6; +pub const PROPTEST_MAX_NNZ: usize = 40; +pub const PROPTEST_I32_VALUE_STRATEGY: RangeInclusive = -5..=5; + +pub fn value_strategy() -> RangeInclusive +where + T: TryFrom, + T::Error: Debug, +{ + let (start, end) = ( + PROPTEST_I32_VALUE_STRATEGY.start(), + PROPTEST_I32_VALUE_STRATEGY.end(), + ); + T::try_from(*start).unwrap()..=T::try_from(*end).unwrap() +} + +pub fn non_zero_i32_value_strategy() -> impl Strategy { + let (start, end) = ( + PROPTEST_I32_VALUE_STRATEGY.start(), + PROPTEST_I32_VALUE_STRATEGY.end(), + ); + assert!(start < &0); + assert!(end > &0); + // Note: we don't use RangeInclusive for the second range, because then we'd have different + // types, which would require boxing + (*start..0).prop_union(1..*end + 1) +} + +pub fn csr_strategy() -> impl Strategy> { + csr( + PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_MATRIX_DIM, + PROPTEST_MATRIX_DIM, + PROPTEST_MAX_NNZ, + ) +} + +pub fn csc_strategy() -> impl Strategy> { + csc( + PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_MATRIX_DIM, + PROPTEST_MATRIX_DIM, + PROPTEST_MAX_NNZ, + ) +} diff --git a/nalgebra-sparse/tests/unit.rs b/nalgebra-sparse/tests/unit.rs new file mode 100644 index 00000000..73a95cd7 --- /dev/null +++ b/nalgebra-sparse/tests/unit.rs @@ -0,0 +1,8 @@ +//! Unit tests +#[cfg(any(not(feature = "proptest-support"), not(feature = "compare")))] +compile_error!("Tests must be run with features `proptest-support` and `compare`"); + +mod unit_tests; + +#[macro_use] +pub mod common; diff --git a/nalgebra-sparse/tests/unit_tests/cholesky.proptest-regressions b/nalgebra-sparse/tests/unit_tests/cholesky.proptest-regressions new file mode 100644 index 00000000..c07db97b --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/cholesky.proptest-regressions @@ -0,0 +1,8 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 3f71c8edc555965e521e3aaf58c736240a0e333c3a9d54e8a836d7768c371215 # shrinks to matrix = CscMatrix { cs: CsMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0], minor_indices: [], minor_dim: 0 }, values: [] } } +cc aef645e3184b814ef39fbb10234f12e6ff502ab515dabefafeedab5895e22b12 # shrinks to (matrix, rhs) = (CscMatrix { cs: CsMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 4, 7, 11, 14], minor_indices: [0, 1, 2, 3, 0, 1, 2, 0, 1, 2, 3, 0, 2, 3], minor_dim: 4 }, values: [1.0, 0.0, 0.0, 0.0, 0.0, 40.90124126326177, 36.975170911665906, 0.0, 36.975170911665906, 42.51062858727923, -12.984115201530539, 0.0, -12.984115201530539, 27.73953543265418] } }, Matrix { data: VecStorage { data: [0.0, 0.0, 0.0, -4.05763092330143], nrows: Dynamic { value: 4 }, ncols: Dynamic { value: 1 } } }) diff --git a/nalgebra-sparse/tests/unit_tests/cholesky.rs b/nalgebra-sparse/tests/unit_tests/cholesky.rs new file mode 100644 index 00000000..82cb2c42 --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/cholesky.rs @@ -0,0 +1,117 @@ +#![cfg_attr(rustfmt, rustfmt_skip)] +use crate::common::{value_strategy, PROPTEST_MATRIX_DIM, PROPTEST_MAX_NNZ}; +use nalgebra_sparse::csc::CscMatrix; +use nalgebra_sparse::factorization::{CscCholesky}; +use nalgebra_sparse::proptest::csc; +use nalgebra::{Matrix5, Vector5, Cholesky, DMatrix}; +use nalgebra::proptest::matrix; + +use proptest::prelude::*; +use matrixcompare::{assert_matrix_eq, prop_assert_matrix_eq}; + +fn positive_definite() -> impl Strategy> { + let csc_f64 = csc(value_strategy::(), + PROPTEST_MATRIX_DIM, + PROPTEST_MATRIX_DIM, + PROPTEST_MAX_NNZ); + csc_f64 + .prop_map(|x| { + // Add a small multiple of the identity to ensure positive definiteness + x.transpose() * &x + CscMatrix::identity(x.ncols()) + }) +} + +proptest! { + #[test] + fn cholesky_correct_for_positive_definite_matrices( + matrix in positive_definite() + ) { + let cholesky = CscCholesky::factor(&matrix).unwrap(); + let l = cholesky.take_l(); + let matrix_reconstructed = &l * l.transpose(); + + prop_assert_matrix_eq!(matrix_reconstructed, matrix, comp = abs, tol = 1e-8); + + let is_lower_triangular = l.triplet_iter().all(|(i, j, _)| j <= i); + prop_assert!(is_lower_triangular); + } + + #[test] + fn cholesky_solve_positive_definite( + (matrix, rhs) in positive_definite() + .prop_flat_map(|csc| { + let rhs = matrix(value_strategy::(), csc.nrows(), PROPTEST_MATRIX_DIM); + (Just(csc), rhs) + }) + ) { + let cholesky = CscCholesky::factor(&matrix).unwrap(); + + // solve_mut + { + let mut x = rhs.clone(); + cholesky.solve_mut(&mut x); + prop_assert_matrix_eq!(&matrix * &x, rhs, comp=abs, tol=1e-12); + } + + // solve + { + let x = cholesky.solve(&rhs); + prop_assert_matrix_eq!(&matrix * &x, rhs, comp=abs, tol=1e-12); + } + } + +} + +// This is a test ported from nalgebra's "sparse" module, for the original CsCholesky impl +#[test] +fn cs_cholesky() { + let mut a = Matrix5::new( + 40.0, 0.0, 0.0, 0.0, 0.0, + 2.0, 60.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 11.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 50.0, 0.0, + 1.0, 0.0, 0.0, 4.0, 10.0 + ); + a.fill_upper_triangle_with_lower_triangle(); + test_cholesky(a); + + let a = Matrix5::from_diagonal(&Vector5::new(40.0, 60.0, 11.0, 50.0, 10.0)); + test_cholesky(a); + + let mut a = Matrix5::new( + 40.0, 0.0, 0.0, 0.0, 0.0, + 2.0, 60.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 11.0, 0.0, 0.0, + 1.0, 0.0, 0.0, 50.0, 0.0, + 0.0, 0.0, 0.0, 4.0, 10.0 + ); + a.fill_upper_triangle_with_lower_triangle(); + test_cholesky(a); + + let mut a = Matrix5::new( + 2.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 2.0, 0.0, 0.0, 0.0, + 1.0, 1.0, 2.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 2.0, 0.0, + 1.0, 1.0, 0.0, 0.0, 2.0 + ); + a.fill_upper_triangle_with_lower_triangle(); + // Test crate::new, left_looking, and up_looking implementations. + test_cholesky(a); +} + +fn test_cholesky(a: Matrix5) { + // TODO: Test "refactor" + + let cs_a = CscMatrix::from(&a); + + let chol_a = Cholesky::new(a).unwrap(); + let chol_cs_a = CscCholesky::factor(&cs_a).unwrap(); + + let l = chol_a.l(); + let cs_l = chol_cs_a.take_l(); + + let l = DMatrix::from_iterator(l.nrows(), l.ncols(), l.iter().cloned()); + let cs_l_mat = DMatrix::from(&cs_l); + assert_matrix_eq!(l, cs_l_mat, comp = abs, tol = 1e-12); +} \ No newline at end of file diff --git a/nalgebra-sparse/tests/unit_tests/convert_serial.proptest-regressions b/nalgebra-sparse/tests/unit_tests/convert_serial.proptest-regressions new file mode 100644 index 00000000..bfd41ead --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/convert_serial.proptest-regressions @@ -0,0 +1,10 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 07cb95127d2700ff2000157938e351ce2b43f3e6419d69b00726abfc03e682bd # shrinks to coo = CooMatrix { nrows: 4, ncols: 5, row_indices: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0], col_indices: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 4, 3], values: [1, -5, -4, -5, 1, 2, 4, -4, -4, -5, 2, -2, 4, -4] } +cc 8fdaf70d6091d89a6617573547745e9802bb9c1ce7c6ec7ad4f301cd05d54c5d # shrinks to dense = Matrix { data: VecStorage { data: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], nrows: Dynamic { value: 4 }, ncols: Dynamic { value: 5 } } } +cc 6961760ac7915b57a28230524cea7e9bfcea4f31790e3c0569ea74af904c2d79 # shrinks to coo = CooMatrix { nrows: 6, ncols: 6, row_indices: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0], col_indices: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0], values: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] } +cc c9a1af218f7a974f1fda7b8909c2635d735eedbfe953082ef6b0b92702bf6d1b # shrinks to dense = Matrix { data: VecStorage { data: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], nrows: Dynamic { value: 6 }, ncols: Dynamic { value: 5 } } } diff --git a/nalgebra-sparse/tests/unit_tests/convert_serial.rs b/nalgebra-sparse/tests/unit_tests/convert_serial.rs new file mode 100644 index 00000000..b895c945 --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/convert_serial.rs @@ -0,0 +1,452 @@ +use crate::common::csc_strategy; +use nalgebra::proptest::matrix; +use nalgebra::DMatrix; +use nalgebra_sparse::convert::serial::{ + convert_coo_csc, convert_coo_csr, convert_coo_dense, convert_csc_coo, convert_csc_csr, + convert_csc_dense, convert_csr_coo, convert_csr_csc, convert_csr_dense, convert_dense_coo, + convert_dense_csc, convert_dense_csr, +}; +use nalgebra_sparse::coo::CooMatrix; +use nalgebra_sparse::csc::CscMatrix; +use nalgebra_sparse::csr::CsrMatrix; +use nalgebra_sparse::proptest::{coo_no_duplicates, coo_with_duplicates, csc, csr}; +use proptest::prelude::*; + +#[test] +fn test_convert_dense_coo() { + // No duplicates + { + #[rustfmt::skip] + let entries = &[1, 0, 3, + 0, 5, 0]; + // The COO representation of a dense matrix is not unique. + // Here we implicitly test that the coo matrix is indeed constructed from column-major + // iteration of the dense matrix. + let dense = DMatrix::from_row_slice(2, 3, entries); + let coo = CooMatrix::try_from_triplets(2, 3, vec![0, 1, 0], vec![0, 1, 2], vec![1, 5, 3]) + .unwrap(); + + assert_eq!(CooMatrix::from(&dense), coo); + assert_eq!(DMatrix::from(&coo), dense); + } + + // Duplicates + // No duplicates + { + #[rustfmt::skip] + let entries = &[1, 0, 3, + 0, 5, 0]; + // The COO representation of a dense matrix is not unique. + // Here we implicitly test that the coo matrix is indeed constructed from column-major + // iteration of the dense matrix. + let dense = DMatrix::from_row_slice(2, 3, entries); + let coo_no_dup = + CooMatrix::try_from_triplets(2, 3, vec![0, 1, 0], vec![0, 1, 2], vec![1, 5, 3]) + .unwrap(); + let coo_dup = CooMatrix::try_from_triplets( + 2, + 3, + vec![0, 1, 0, 1], + vec![0, 1, 2, 1], + vec![1, -2, 3, 7], + ) + .unwrap(); + + assert_eq!(CooMatrix::from(&dense), coo_no_dup); + assert_eq!(DMatrix::from(&coo_dup), dense); + } +} + +#[test] +fn test_convert_coo_csr() { + // No duplicates + { + let coo = { + let mut coo = CooMatrix::new(3, 4); + coo.push(1, 3, 4); + coo.push(0, 1, 2); + coo.push(2, 0, 1); + coo.push(2, 3, 2); + coo.push(2, 2, 1); + coo + }; + + let expected_csr = CsrMatrix::try_from_csr_data( + 3, + 4, + vec![0, 1, 2, 5], + vec![1, 3, 0, 2, 3], + vec![2, 4, 1, 1, 2], + ) + .unwrap(); + + assert_eq!(convert_coo_csr(&coo), expected_csr); + } + + // Duplicates + { + let coo = { + let mut coo = CooMatrix::new(3, 4); + coo.push(1, 3, 4); + coo.push(2, 3, 2); + coo.push(0, 1, 2); + coo.push(2, 0, 1); + coo.push(2, 3, 2); + coo.push(0, 1, 3); + coo.push(2, 2, 1); + coo + }; + + let expected_csr = CsrMatrix::try_from_csr_data( + 3, + 4, + vec![0, 1, 2, 5], + vec![1, 3, 0, 2, 3], + vec![5, 4, 1, 1, 4], + ) + .unwrap(); + + assert_eq!(convert_coo_csr(&coo), expected_csr); + } +} + +#[test] +fn test_convert_csr_coo() { + let csr = CsrMatrix::try_from_csr_data( + 3, + 4, + vec![0, 1, 2, 5], + vec![1, 3, 0, 2, 3], + vec![5, 4, 1, 1, 4], + ) + .unwrap(); + + let expected_coo = CooMatrix::try_from_triplets( + 3, + 4, + vec![0, 1, 2, 2, 2], + vec![1, 3, 0, 2, 3], + vec![5, 4, 1, 1, 4], + ) + .unwrap(); + + assert_eq!(convert_csr_coo(&csr), expected_coo); +} + +#[test] +fn test_convert_coo_csc() { + // No duplicates + { + let coo = { + let mut coo = CooMatrix::new(3, 4); + coo.push(1, 3, 4); + coo.push(0, 1, 2); + coo.push(2, 0, 1); + coo.push(2, 3, 2); + coo.push(2, 2, 1); + coo + }; + + let expected_csc = CscMatrix::try_from_csc_data( + 3, + 4, + vec![0, 1, 2, 3, 5], + vec![2, 0, 2, 1, 2], + vec![1, 2, 1, 4, 2], + ) + .unwrap(); + + assert_eq!(convert_coo_csc(&coo), expected_csc); + } + + // Duplicates + { + let coo = { + let mut coo = CooMatrix::new(3, 4); + coo.push(1, 3, 4); + coo.push(2, 3, 2); + coo.push(0, 1, 2); + coo.push(2, 0, 1); + coo.push(2, 3, 2); + coo.push(0, 1, 3); + coo.push(2, 2, 1); + coo + }; + + let expected_csc = CscMatrix::try_from_csc_data( + 3, + 4, + vec![0, 1, 2, 3, 5], + vec![2, 0, 2, 1, 2], + vec![1, 5, 1, 4, 4], + ) + .unwrap(); + + assert_eq!(convert_coo_csc(&coo), expected_csc); + } +} + +#[test] +fn test_convert_csc_coo() { + let csc = CscMatrix::try_from_csc_data( + 3, + 4, + vec![0, 1, 2, 3, 5], + vec![2, 0, 2, 1, 2], + vec![1, 2, 1, 4, 2], + ) + .unwrap(); + + let expected_coo = CooMatrix::try_from_triplets( + 3, + 4, + vec![2, 0, 2, 1, 2], + vec![0, 1, 2, 3, 3], + vec![1, 2, 1, 4, 2], + ) + .unwrap(); + + assert_eq!(convert_csc_coo(&csc), expected_coo); +} + +#[test] +fn test_convert_csr_csc_bidirectional() { + let csr = CsrMatrix::try_from_csr_data( + 3, + 4, + vec![0, 3, 4, 6], + vec![1, 2, 3, 0, 1, 3], + vec![5, 3, 2, 2, 1, 4], + ) + .unwrap(); + + let csc = CscMatrix::try_from_csc_data( + 3, + 4, + vec![0, 1, 3, 4, 6], + vec![1, 0, 2, 0, 0, 2], + vec![2, 5, 1, 3, 2, 4], + ) + .unwrap(); + + assert_eq!(convert_csr_csc(&csr), csc); + assert_eq!(convert_csc_csr(&csc), csr); +} + +#[test] +fn test_convert_csr_dense_bidirectional() { + let csr = CsrMatrix::try_from_csr_data( + 3, + 4, + vec![0, 3, 4, 6], + vec![1, 2, 3, 0, 1, 3], + vec![5, 3, 2, 2, 1, 4], + ) + .unwrap(); + + #[rustfmt::skip] + let dense = DMatrix::from_row_slice(3, 4, &[ + 0, 5, 3, 2, + 2, 0, 0, 0, + 0, 1, 0, 4 + ]); + + assert_eq!(convert_csr_dense(&csr), dense); + assert_eq!(convert_dense_csr(&dense), csr); +} + +#[test] +fn test_convert_csc_dense_bidirectional() { + let csc = CscMatrix::try_from_csc_data( + 3, + 4, + vec![0, 1, 3, 4, 6], + vec![1, 0, 2, 0, 0, 2], + vec![2, 5, 1, 3, 2, 4], + ) + .unwrap(); + + #[rustfmt::skip] + let dense = DMatrix::from_row_slice(3, 4, &[ + 0, 5, 3, 2, + 2, 0, 0, 0, + 0, 1, 0, 4 + ]); + + assert_eq!(convert_csc_dense(&csc), dense); + assert_eq!(convert_dense_csc(&dense), csc); +} + +fn coo_strategy() -> impl Strategy> { + coo_with_duplicates(-5..=5, 0..=6usize, 0..=6usize, 40, 2) +} + +fn coo_no_duplicates_strategy() -> impl Strategy> { + coo_no_duplicates(-5..=5, 0..=6usize, 0..=6usize, 40) +} + +fn csr_strategy() -> impl Strategy> { + csr(-5..=5, 0..=6usize, 0..=6usize, 40) +} + +/// Avoid generating explicit zero values so that it is possible to reason about sparsity patterns +fn non_zero_csr_strategy() -> impl Strategy> { + csr(1..=5, 0..=6usize, 0..=6usize, 40) +} + +/// Avoid generating explicit zero values so that it is possible to reason about sparsity patterns +fn non_zero_csc_strategy() -> impl Strategy> { + csc(1..=5, 0..=6usize, 0..=6usize, 40) +} + +fn dense_strategy() -> impl Strategy> { + matrix(-5..=5, 0..=6, 0..=6) +} + +proptest! { + + #[test] + fn convert_dense_coo_roundtrip(dense in matrix(-5 ..= 5, 0 ..=6, 0..=6)) { + let coo = convert_dense_coo(&dense); + let dense2 = convert_coo_dense(&coo); + prop_assert_eq!(&dense, &dense2); + } + + #[test] + fn convert_coo_dense_coo_roundtrip(coo in coo_strategy()) { + // We cannot compare the result of the roundtrip coo -> dense -> coo directly for + // two reasons: + // 1. the COO matrices will generally have different ordering of elements + // 2. explicitly stored zero entries in the original matrix will be discarded + // when converting back to COO + // Therefore we instead compare the results of converting the COO matrix + // at the end of the roundtrip with its dense representation + let dense = convert_coo_dense(&coo); + let coo2 = convert_dense_coo(&dense); + let dense2 = convert_coo_dense(&coo2); + prop_assert_eq!(dense, dense2); + } + + #[test] + fn coo_from_dense_roundtrip(dense in dense_strategy()) { + prop_assert_eq!(&dense, &DMatrix::from(&CooMatrix::from(&dense))); + } + + #[test] + fn convert_coo_csr_agrees_with_csr_dense(coo in coo_strategy()) { + let coo_dense = convert_coo_dense(&coo); + let csr = convert_coo_csr(&coo); + let csr_dense = convert_csr_dense(&csr); + prop_assert_eq!(csr_dense, coo_dense); + + // It might be that COO matrices have a higher nnz due to duplicates, + // so we can only check that the CSR matrix has no more than the original COO matrix + prop_assert!(csr.nnz() <= coo.nnz()); + } + + #[test] + fn convert_coo_csr_nnz(coo in coo_no_duplicates_strategy()) { + // Check that the NNZ are equal when converting from a CooMatrix without + // duplicates to a CSR matrix + let csr = convert_coo_csr(&coo); + prop_assert_eq!(csr.nnz(), coo.nnz()); + } + + #[test] + fn convert_csr_coo_roundtrip(csr in csr_strategy()) { + let coo = convert_csr_coo(&csr); + let csr2 = convert_coo_csr(&coo); + prop_assert_eq!(csr2, csr); + } + + #[test] + fn coo_from_csr_roundtrip(csr in csr_strategy()) { + prop_assert_eq!(&csr, &CsrMatrix::from(&CooMatrix::from(&csr))); + } + + #[test] + fn csr_from_dense_roundtrip(dense in dense_strategy()) { + prop_assert_eq!(&dense, &DMatrix::from(&CsrMatrix::from(&dense))); + } + + #[test] + fn convert_csr_dense_roundtrip(csr in non_zero_csr_strategy()) { + // Since we only generate CSR matrices with non-zero values, we know that the + // number of explicitly stored entries when converting CSR->Dense->CSR should be + // unchanged, so that we can verify that the result is the same as the input + let dense = convert_csr_dense(&csr); + let csr2 = convert_dense_csr(&dense); + prop_assert_eq!(csr2, csr); + } + + #[test] + fn convert_csc_coo_roundtrip(csc in csc_strategy()) { + let coo = convert_csc_coo(&csc); + let csc2 = convert_coo_csc(&coo); + prop_assert_eq!(csc2, csc); + } + + #[test] + fn coo_from_csc_roundtrip(csc in csc_strategy()) { + prop_assert_eq!(&csc, &CscMatrix::from(&CooMatrix::from(&csc))); + } + + #[test] + fn convert_csc_dense_roundtrip(csc in non_zero_csc_strategy()) { + // Since we only generate CSC matrices with non-zero values, we know that the + // number of explicitly stored entries when converting CSC->Dense->CSC should be + // unchanged, so that we can verify that the result is the same as the input + let dense = convert_csc_dense(&csc); + let csc2 = convert_dense_csc(&dense); + prop_assert_eq!(csc2, csc); + } + + #[test] + fn csc_from_dense_roundtrip(dense in dense_strategy()) { + prop_assert_eq!(&dense, &DMatrix::from(&CscMatrix::from(&dense))); + } + + #[test] + fn convert_coo_csc_agrees_with_csc_dense(coo in coo_strategy()) { + let coo_dense = convert_coo_dense(&coo); + let csc = convert_coo_csc(&coo); + let csc_dense = convert_csc_dense(&csc); + prop_assert_eq!(csc_dense, coo_dense); + + // It might be that COO matrices have a higher nnz due to duplicates, + // so we can only check that the CSR matrix has no more than the original COO matrix + prop_assert!(csc.nnz() <= coo.nnz()); + } + + #[test] + fn convert_coo_csc_nnz(coo in coo_no_duplicates_strategy()) { + // Check that the NNZ are equal when converting from a CooMatrix without + // duplicates to a CSR matrix + let csc = convert_coo_csc(&coo); + prop_assert_eq!(csc.nnz(), coo.nnz()); + } + + #[test] + fn convert_csc_csr_roundtrip(csc in csc_strategy()) { + let csr = convert_csc_csr(&csc); + let csc2 = convert_csr_csc(&csr); + prop_assert_eq!(csc2, csc); + } + + #[test] + fn convert_csr_csc_roundtrip(csr in csr_strategy()) { + let csc = convert_csr_csc(&csr); + let csr2 = convert_csc_csr(&csc); + prop_assert_eq!(csr2, csr); + } + + #[test] + fn csc_from_csr_roundtrip(csr in csr_strategy()) { + prop_assert_eq!(&csr, &CsrMatrix::from(&CscMatrix::from(&csr))); + } + + #[test] + fn csr_from_csc_roundtrip(csc in csc_strategy()) { + prop_assert_eq!(&csc, &CscMatrix::from(&CsrMatrix::from(&csc))); + } +} diff --git a/nalgebra-sparse/tests/unit_tests/coo.rs b/nalgebra-sparse/tests/unit_tests/coo.rs new file mode 100644 index 00000000..c9fa1778 --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/coo.rs @@ -0,0 +1,254 @@ +use crate::assert_panics; +use nalgebra::DMatrix; +use nalgebra_sparse::coo::CooMatrix; +use nalgebra_sparse::SparseFormatErrorKind; + +#[test] +fn coo_construction_for_valid_data() { + // Test that construction with try_from_triplets succeeds, that the state of the + // matrix afterwards is as expected, and that the dense representation matches expectations. + + { + // Zero matrix + let coo = + CooMatrix::::try_from_triplets(3, 2, Vec::new(), Vec::new(), Vec::new()).unwrap(); + assert_eq!(coo.nrows(), 3); + assert_eq!(coo.ncols(), 2); + assert!(coo.triplet_iter().next().is_none()); + assert!(coo.row_indices().is_empty()); + assert!(coo.col_indices().is_empty()); + assert!(coo.values().is_empty()); + + assert_eq!(DMatrix::from(&coo), DMatrix::repeat(3, 2, 0)); + } + + { + // Arbitrary matrix, no duplicates + let i = vec![0, 1, 0, 0, 2]; + let j = vec![0, 2, 1, 3, 3]; + let v = vec![2, 3, 7, 3, 1]; + let coo = + CooMatrix::::try_from_triplets(3, 5, i.clone(), j.clone(), v.clone()).unwrap(); + assert_eq!(coo.nrows(), 3); + assert_eq!(coo.ncols(), 5); + + assert_eq!(i.as_slice(), coo.row_indices()); + assert_eq!(j.as_slice(), coo.col_indices()); + assert_eq!(v.as_slice(), coo.values()); + + let expected_triplets: Vec<_> = i + .iter() + .zip(&j) + .zip(&v) + .map(|((i, j), v)| (*i, *j, *v)) + .collect(); + let actual_triplets: Vec<_> = coo.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); + assert_eq!(actual_triplets, expected_triplets); + + #[rustfmt::skip] + let expected_dense = DMatrix::from_row_slice(3, 5, &[ + 2, 7, 0, 3, 0, + 0, 0, 3, 0, 0, + 0, 0, 0, 1, 0 + ]); + assert_eq!(DMatrix::from(&coo), expected_dense); + } + + { + // Arbitrary matrix, with duplicates + let i = vec![0, 1, 0, 0, 0, 0, 2, 1]; + let j = vec![0, 2, 0, 1, 0, 3, 3, 2]; + let v = vec![2, 3, 4, 7, 1, 3, 1, 5]; + let coo = + CooMatrix::::try_from_triplets(3, 5, i.clone(), j.clone(), v.clone()).unwrap(); + assert_eq!(coo.nrows(), 3); + assert_eq!(coo.ncols(), 5); + + assert_eq!(i.as_slice(), coo.row_indices()); + assert_eq!(j.as_slice(), coo.col_indices()); + assert_eq!(v.as_slice(), coo.values()); + + let expected_triplets: Vec<_> = i + .iter() + .zip(&j) + .zip(&v) + .map(|((i, j), v)| (*i, *j, *v)) + .collect(); + let actual_triplets: Vec<_> = coo.triplet_iter().map(|(i, j, v)| (i, j, *v)).collect(); + assert_eq!(actual_triplets, expected_triplets); + + #[rustfmt::skip] + let expected_dense = DMatrix::from_row_slice(3, 5, &[ + 7, 7, 0, 3, 0, + 0, 0, 8, 0, 0, + 0, 0, 0, 1, 0 + ]); + assert_eq!(DMatrix::from(&coo), expected_dense); + } +} + +#[test] +fn coo_try_from_triplets_reports_out_of_bounds_indices() { + { + // 0x0 matrix + let result = CooMatrix::::try_from_triplets(0, 0, vec![0], vec![0], vec![2]); + assert!(matches!( + result.unwrap_err().kind(), + SparseFormatErrorKind::IndexOutOfBounds + )); + } + + { + // 1x1 matrix, row out of bounds + let result = CooMatrix::::try_from_triplets(1, 1, vec![1], vec![0], vec![2]); + assert!(matches!( + result.unwrap_err().kind(), + SparseFormatErrorKind::IndexOutOfBounds + )); + } + + { + // 1x1 matrix, col out of bounds + let result = CooMatrix::::try_from_triplets(1, 1, vec![0], vec![1], vec![2]); + assert!(matches!( + result.unwrap_err().kind(), + SparseFormatErrorKind::IndexOutOfBounds + )); + } + + { + // 1x1 matrix, row and col out of bounds + let result = CooMatrix::::try_from_triplets(1, 1, vec![1], vec![1], vec![2]); + assert!(matches!( + result.unwrap_err().kind(), + SparseFormatErrorKind::IndexOutOfBounds + )); + } + + { + // Arbitrary matrix, row out of bounds + let i = vec![0, 1, 0, 3, 2]; + let j = vec![0, 2, 1, 3, 3]; + let v = vec![2, 3, 7, 3, 1]; + let result = CooMatrix::::try_from_triplets(3, 5, i, j, v); + assert!(matches!( + result.unwrap_err().kind(), + SparseFormatErrorKind::IndexOutOfBounds + )); + } + + { + // Arbitrary matrix, col out of bounds + let i = vec![0, 1, 0, 0, 2]; + let j = vec![0, 2, 1, 5, 3]; + let v = vec![2, 3, 7, 3, 1]; + let result = CooMatrix::::try_from_triplets(3, 5, i, j, v); + assert!(matches!( + result.unwrap_err().kind(), + SparseFormatErrorKind::IndexOutOfBounds + )); + } +} + +#[test] +fn coo_try_from_triplets_panics_on_mismatched_vectors() { + // Check that try_from_triplets panics when the triplet vectors have different lengths + macro_rules! assert_errs { + ($result:expr) => { + assert!(matches!( + $result.unwrap_err().kind(), + SparseFormatErrorKind::InvalidStructure + )) + }; + } + + assert_errs!(CooMatrix::::try_from_triplets( + 3, + 5, + vec![1, 2], + vec![0], + vec![0] + )); + assert_errs!(CooMatrix::::try_from_triplets( + 3, + 5, + vec![1], + vec![0, 0], + vec![0] + )); + assert_errs!(CooMatrix::::try_from_triplets( + 3, + 5, + vec![1], + vec![0], + vec![0, 1] + )); + assert_errs!(CooMatrix::::try_from_triplets( + 3, + 5, + vec![1, 2], + vec![0, 1], + vec![0] + )); + assert_errs!(CooMatrix::::try_from_triplets( + 3, + 5, + vec![1], + vec![0, 1], + vec![0, 1] + )); + assert_errs!(CooMatrix::::try_from_triplets( + 3, + 5, + vec![1, 1], + vec![0], + vec![0, 1] + )); +} + +#[test] +fn coo_push_valid_entries() { + let mut coo = CooMatrix::new(3, 3); + + coo.push(0, 0, 1); + assert_eq!(coo.triplet_iter().collect::>(), vec![(0, 0, &1)]); + + coo.push(0, 0, 2); + assert_eq!( + coo.triplet_iter().collect::>(), + vec![(0, 0, &1), (0, 0, &2)] + ); + + coo.push(2, 2, 3); + assert_eq!( + coo.triplet_iter().collect::>(), + vec![(0, 0, &1), (0, 0, &2), (2, 2, &3)] + ); +} + +#[test] +fn coo_push_out_of_bounds_entries() { + { + // 0x0 matrix + let coo = CooMatrix::new(0, 0); + assert_panics!(coo.clone().push(0, 0, 1)); + } + + { + // 0x1 matrix + assert_panics!(CooMatrix::new(0, 1).push(0, 0, 1)); + } + + { + // 1x0 matrix + assert_panics!(CooMatrix::new(1, 0).push(0, 0, 1)); + } + + { + // Arbitrary matrix dimensions + let coo = CooMatrix::new(3, 2); + assert_panics!(coo.clone().push(3, 0, 1)); + assert_panics!(coo.clone().push(2, 2, 1)); + assert_panics!(coo.clone().push(3, 2, 1)); + } +} diff --git a/nalgebra-sparse/tests/unit_tests/csc.proptest-regressions b/nalgebra-sparse/tests/unit_tests/csc.proptest-regressions new file mode 100644 index 00000000..a9997df0 --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/csc.proptest-regressions @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc a71b4654827840ed539b82cd7083615b0fb3f75933de6a7d91d8148a2bf34960 # shrinks to (csc, triplet_subset) = (CscMatrix { cs: CsMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 1, 1, 1, 1, 1, 1], minor_indices: [0], minor_dim: 4 }, values: [0] } }, {}) diff --git a/nalgebra-sparse/tests/unit_tests/csc.rs b/nalgebra-sparse/tests/unit_tests/csc.rs new file mode 100644 index 00000000..7fb0de54 --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/csc.rs @@ -0,0 +1,605 @@ +use nalgebra::DMatrix; +use nalgebra_sparse::csc::CscMatrix; +use nalgebra_sparse::{SparseEntry, SparseEntryMut, SparseFormatErrorKind}; + +use proptest::prelude::*; +use proptest::sample::subsequence; + +use crate::assert_panics; +use crate::common::csc_strategy; + +use std::collections::HashSet; + +#[test] +fn csc_matrix_valid_data() { + // Construct matrix from valid data and check that selected methods return results + // that agree with expectations. + + { + // A CSC matrix with zero explicitly stored entries + let offsets = vec![0, 0, 0, 0]; + let indices = vec![]; + let values = Vec::::new(); + let mut matrix = CscMatrix::try_from_csc_data(2, 3, offsets, indices, values).unwrap(); + + assert_eq!(matrix, CscMatrix::zeros(2, 3)); + + assert_eq!(matrix.nrows(), 2); + assert_eq!(matrix.ncols(), 3); + assert_eq!(matrix.nnz(), 0); + assert_eq!(matrix.col_offsets(), &[0, 0, 0, 0]); + assert_eq!(matrix.row_indices(), &[]); + assert_eq!(matrix.values(), &[]); + + assert!(matrix.triplet_iter().next().is_none()); + assert!(matrix.triplet_iter_mut().next().is_none()); + + assert_eq!(matrix.col(0).nrows(), 2); + assert_eq!(matrix.col(0).nnz(), 0); + assert_eq!(matrix.col(0).row_indices(), &[]); + assert_eq!(matrix.col(0).values(), &[]); + assert_eq!(matrix.col_mut(0).nrows(), 2); + assert_eq!(matrix.col_mut(0).nnz(), 0); + assert_eq!(matrix.col_mut(0).row_indices(), &[]); + assert_eq!(matrix.col_mut(0).values(), &[]); + assert_eq!(matrix.col_mut(0).values_mut(), &[]); + assert_eq!( + matrix.col_mut(0).rows_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert_eq!(matrix.col(1).nrows(), 2); + assert_eq!(matrix.col(1).nnz(), 0); + assert_eq!(matrix.col(1).row_indices(), &[]); + assert_eq!(matrix.col(1).values(), &[]); + assert_eq!(matrix.col_mut(1).nrows(), 2); + assert_eq!(matrix.col_mut(1).nnz(), 0); + assert_eq!(matrix.col_mut(1).row_indices(), &[]); + assert_eq!(matrix.col_mut(1).values(), &[]); + assert_eq!(matrix.col_mut(1).values_mut(), &[]); + assert_eq!( + matrix.col_mut(1).rows_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert_eq!(matrix.col(2).nrows(), 2); + assert_eq!(matrix.col(2).nnz(), 0); + assert_eq!(matrix.col(2).row_indices(), &[]); + assert_eq!(matrix.col(2).values(), &[]); + assert_eq!(matrix.col_mut(2).nrows(), 2); + assert_eq!(matrix.col_mut(2).nnz(), 0); + assert_eq!(matrix.col_mut(2).row_indices(), &[]); + assert_eq!(matrix.col_mut(2).values(), &[]); + assert_eq!(matrix.col_mut(2).values_mut(), &[]); + assert_eq!( + matrix.col_mut(2).rows_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert!(matrix.get_col(3).is_none()); + assert!(matrix.get_col_mut(3).is_none()); + + let (offsets, indices, values) = matrix.disassemble(); + + assert_eq!(offsets, vec![0, 0, 0, 0]); + assert_eq!(indices, vec![]); + assert_eq!(values, vec![]); + } + + { + // An arbitrary CSC matrix + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let mut matrix = + CscMatrix::try_from_csc_data(6, 3, offsets.clone(), indices.clone(), values.clone()) + .unwrap(); + + assert_eq!(matrix.nrows(), 6); + assert_eq!(matrix.ncols(), 3); + assert_eq!(matrix.nnz(), 5); + assert_eq!(matrix.col_offsets(), &[0, 2, 2, 5]); + assert_eq!(matrix.row_indices(), &[0, 5, 1, 2, 3]); + assert_eq!(matrix.values(), &[0, 1, 2, 3, 4]); + + let expected_triplets = vec![(0, 0, 0), (5, 0, 1), (1, 2, 2), (2, 2, 3), (3, 2, 4)]; + assert_eq!( + matrix + .triplet_iter() + .map(|(i, j, v)| (i, j, *v)) + .collect::>(), + expected_triplets + ); + assert_eq!( + matrix + .triplet_iter_mut() + .map(|(i, j, v)| (i, j, *v)) + .collect::>(), + expected_triplets + ); + + assert_eq!(matrix.col(0).nrows(), 6); + assert_eq!(matrix.col(0).nnz(), 2); + assert_eq!(matrix.col(0).row_indices(), &[0, 5]); + assert_eq!(matrix.col(0).values(), &[0, 1]); + assert_eq!(matrix.col_mut(0).nrows(), 6); + assert_eq!(matrix.col_mut(0).nnz(), 2); + assert_eq!(matrix.col_mut(0).row_indices(), &[0, 5]); + assert_eq!(matrix.col_mut(0).values(), &[0, 1]); + assert_eq!(matrix.col_mut(0).values_mut(), &[0, 1]); + assert_eq!( + matrix.col_mut(0).rows_and_values_mut(), + ([0, 5].as_ref(), [0, 1].as_mut()) + ); + + assert_eq!(matrix.col(1).nrows(), 6); + assert_eq!(matrix.col(1).nnz(), 0); + assert_eq!(matrix.col(1).row_indices(), &[]); + assert_eq!(matrix.col(1).values(), &[]); + assert_eq!(matrix.col_mut(1).nrows(), 6); + assert_eq!(matrix.col_mut(1).nnz(), 0); + assert_eq!(matrix.col_mut(1).row_indices(), &[]); + assert_eq!(matrix.col_mut(1).values(), &[]); + assert_eq!(matrix.col_mut(1).values_mut(), &[]); + assert_eq!( + matrix.col_mut(1).rows_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert_eq!(matrix.col(2).nrows(), 6); + assert_eq!(matrix.col(2).nnz(), 3); + assert_eq!(matrix.col(2).row_indices(), &[1, 2, 3]); + assert_eq!(matrix.col(2).values(), &[2, 3, 4]); + assert_eq!(matrix.col_mut(2).nrows(), 6); + assert_eq!(matrix.col_mut(2).nnz(), 3); + assert_eq!(matrix.col_mut(2).row_indices(), &[1, 2, 3]); + assert_eq!(matrix.col_mut(2).values(), &[2, 3, 4]); + assert_eq!(matrix.col_mut(2).values_mut(), &[2, 3, 4]); + assert_eq!( + matrix.col_mut(2).rows_and_values_mut(), + ([1, 2, 3].as_ref(), [2, 3, 4].as_mut()) + ); + + assert!(matrix.get_col(3).is_none()); + assert!(matrix.get_col_mut(3).is_none()); + + let (offsets2, indices2, values2) = matrix.disassemble(); + + assert_eq!(offsets2, offsets); + assert_eq!(indices2, indices); + assert_eq!(values2, values); + } +} + +#[test] +fn csc_matrix_try_from_invalid_csc_data() { + { + // Empty offset array (invalid length) + let matrix = CscMatrix::try_from_csc_data(0, 0, Vec::new(), Vec::new(), Vec::::new()); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Offset array invalid length for arbitrary data + let offsets = vec![0, 3, 5]; + let indices = vec![0, 1, 2, 3, 5]; + let values = vec![0, 1, 2, 3, 4]; + + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Invalid first entry in offsets array + let offsets = vec![1, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Invalid last entry in offsets array + let offsets = vec![0, 2, 2, 4]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Invalid length of offsets array + let offsets = vec![0, 2, 2]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Nonmonotonic offsets + let offsets = vec![0, 3, 2, 5]; + let indices = vec![0, 1, 2, 3, 4]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Nonmonotonic minor indices + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 2, 3, 1, 4]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Minor index out of bounds + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 6, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::IndexOutOfBounds + ); + } + + { + // Duplicate entry + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 2, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::DuplicateEntry + ); + } +} + +#[test] +fn csc_disassemble_avoids_clone_when_owned() { + // Test that disassemble avoids cloning the sparsity pattern when it holds the sole reference + // to the pattern. We do so by checking that the pointer to the data is unchanged. + + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let offsets_ptr = offsets.as_ptr(); + let indices_ptr = indices.as_ptr(); + let values_ptr = values.as_ptr(); + let matrix = CscMatrix::try_from_csc_data(6, 3, offsets, indices, values).unwrap(); + + let (offsets, indices, values) = matrix.disassemble(); + assert_eq!(offsets.as_ptr(), offsets_ptr); + assert_eq!(indices.as_ptr(), indices_ptr); + assert_eq!(values.as_ptr(), values_ptr); +} + +// Rustfmt makes this test much harder to read by expanding some of the one-liners to 4-liners, +// so for now we skip rustfmt... +#[rustfmt::skip] +#[test] +fn csc_matrix_get_index_entry() { + // Test .get_entry(_mut) and .index_entry(_mut) methods + + #[rustfmt::skip] + let dense = DMatrix::from_row_slice(2, 3, &[ + 1, 0, 3, + 0, 5, 6 + ]); + let csc = CscMatrix::from(&dense); + + assert_eq!(csc.get_entry(0, 0), Some(SparseEntry::NonZero(&1))); + assert_eq!(csc.index_entry(0, 0), SparseEntry::NonZero(&1)); + assert_eq!(csc.get_entry(0, 1), Some(SparseEntry::Zero)); + assert_eq!(csc.index_entry(0, 1), SparseEntry::Zero); + assert_eq!(csc.get_entry(0, 2), Some(SparseEntry::NonZero(&3))); + assert_eq!(csc.index_entry(0, 2), SparseEntry::NonZero(&3)); + assert_eq!(csc.get_entry(1, 0), Some(SparseEntry::Zero)); + assert_eq!(csc.index_entry(1, 0), SparseEntry::Zero); + assert_eq!(csc.get_entry(1, 1), Some(SparseEntry::NonZero(&5))); + assert_eq!(csc.index_entry(1, 1), SparseEntry::NonZero(&5)); + assert_eq!(csc.get_entry(1, 2), Some(SparseEntry::NonZero(&6))); + assert_eq!(csc.index_entry(1, 2), SparseEntry::NonZero(&6)); + + // Check some out of bounds with .get_entry + assert_eq!(csc.get_entry(0, 3), None); + assert_eq!(csc.get_entry(0, 4), None); + assert_eq!(csc.get_entry(1, 3), None); + assert_eq!(csc.get_entry(1, 4), None); + assert_eq!(csc.get_entry(2, 0), None); + assert_eq!(csc.get_entry(2, 1), None); + assert_eq!(csc.get_entry(2, 2), None); + assert_eq!(csc.get_entry(2, 3), None); + assert_eq!(csc.get_entry(2, 4), None); + + // Check that out of bounds with .index_entry panics + assert_panics!(csc.index_entry(0, 3)); + assert_panics!(csc.index_entry(0, 4)); + assert_panics!(csc.index_entry(1, 3)); + assert_panics!(csc.index_entry(1, 4)); + assert_panics!(csc.index_entry(2, 0)); + assert_panics!(csc.index_entry(2, 1)); + assert_panics!(csc.index_entry(2, 2)); + assert_panics!(csc.index_entry(2, 3)); + assert_panics!(csc.index_entry(2, 4)); + + { + // Check mutable versions of the above functions + let mut csc = csc; + + assert_eq!(csc.get_entry_mut(0, 0), Some(SparseEntryMut::NonZero(&mut 1))); + assert_eq!(csc.index_entry_mut(0, 0), SparseEntryMut::NonZero(&mut 1)); + assert_eq!(csc.get_entry_mut(0, 1), Some(SparseEntryMut::Zero)); + assert_eq!(csc.index_entry_mut(0, 1), SparseEntryMut::Zero); + assert_eq!(csc.get_entry_mut(0, 2), Some(SparseEntryMut::NonZero(&mut 3))); + assert_eq!(csc.index_entry_mut(0, 2), SparseEntryMut::NonZero(&mut 3)); + assert_eq!(csc.get_entry_mut(1, 0), Some(SparseEntryMut::Zero)); + assert_eq!(csc.index_entry_mut(1, 0), SparseEntryMut::Zero); + assert_eq!(csc.get_entry_mut(1, 1), Some(SparseEntryMut::NonZero(&mut 5))); + assert_eq!(csc.index_entry_mut(1, 1), SparseEntryMut::NonZero(&mut 5)); + assert_eq!(csc.get_entry_mut(1, 2), Some(SparseEntryMut::NonZero(&mut 6))); + assert_eq!(csc.index_entry_mut(1, 2), SparseEntryMut::NonZero(&mut 6)); + + // Check some out of bounds with .get_entry_mut + assert_eq!(csc.get_entry_mut(0, 3), None); + assert_eq!(csc.get_entry_mut(0, 4), None); + assert_eq!(csc.get_entry_mut(1, 3), None); + assert_eq!(csc.get_entry_mut(1, 4), None); + assert_eq!(csc.get_entry_mut(2, 0), None); + assert_eq!(csc.get_entry_mut(2, 1), None); + assert_eq!(csc.get_entry_mut(2, 2), None); + assert_eq!(csc.get_entry_mut(2, 3), None); + assert_eq!(csc.get_entry_mut(2, 4), None); + + // Check that out of bounds with .index_entry_mut panics + // Note: the cloning is necessary because a mutable reference is not UnwindSafe + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(0, 3); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(0, 4); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(1, 3); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(1, 4); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(2, 0); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(2, 1); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(2, 2); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(2, 3); }); + assert_panics!({ let mut csc = csc.clone(); csc.index_entry_mut(2, 4); }); + } +} + +#[test] +fn csc_matrix_col_iter() { + // Note: this is the transpose of the matrix used for the similar csr_matrix_row_iter test + // (this way the actual tests are almost identical, due to the transposed relationship + // between CSR and CSC) + #[rustfmt::skip] + let dense = DMatrix::from_row_slice(4, 3, &[ + 0, 3, 0, + 1, 0, 4, + 2, 0, 0, + 0, 0, 5, + ]); + let csc = CscMatrix::from(&dense); + + // Immutable iterator + { + let mut col_iter = csc.col_iter(); + + { + let col = col_iter.next().unwrap(); + assert_eq!(col.nrows(), 4); + assert_eq!(col.nnz(), 2); + assert_eq!(col.row_indices(), &[1, 2]); + assert_eq!(col.values(), &[1, 2]); + assert_eq!(col.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(1), Some(SparseEntry::NonZero(&1))); + assert_eq!(col.get_entry(2), Some(SparseEntry::NonZero(&2))); + assert_eq!(col.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(4), None); + } + + { + let col = col_iter.next().unwrap(); + assert_eq!(col.nrows(), 4); + assert_eq!(col.nnz(), 1); + assert_eq!(col.row_indices(), &[0]); + assert_eq!(col.values(), &[3]); + assert_eq!(col.get_entry(0), Some(SparseEntry::NonZero(&3))); + assert_eq!(col.get_entry(1), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(4), None); + } + + { + let col = col_iter.next().unwrap(); + assert_eq!(col.nrows(), 4); + assert_eq!(col.nnz(), 2); + assert_eq!(col.row_indices(), &[1, 3]); + assert_eq!(col.values(), &[4, 5]); + assert_eq!(col.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(1), Some(SparseEntry::NonZero(&4))); + assert_eq!(col.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(3), Some(SparseEntry::NonZero(&5))); + assert_eq!(col.get_entry(4), None); + } + + assert!(col_iter.next().is_none()); + } + + // Mutable iterator + { + let mut csc = csc; + let mut col_iter = csc.col_iter_mut(); + + { + let mut col = col_iter.next().unwrap(); + assert_eq!(col.nrows(), 4); + assert_eq!(col.nnz(), 2); + assert_eq!(col.row_indices(), &[1, 2]); + assert_eq!(col.values(), &[1, 2]); + assert_eq!(col.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(1), Some(SparseEntry::NonZero(&1))); + assert_eq!(col.get_entry(2), Some(SparseEntry::NonZero(&2))); + assert_eq!(col.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(4), None); + + assert_eq!(col.values_mut(), &mut [1, 2]); + assert_eq!( + col.rows_and_values_mut(), + ([1, 2].as_ref(), [1, 2].as_mut()) + ); + assert_eq!(col.get_entry_mut(0), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(1), Some(SparseEntryMut::NonZero(&mut 1))); + assert_eq!(col.get_entry_mut(2), Some(SparseEntryMut::NonZero(&mut 2))); + assert_eq!(col.get_entry_mut(3), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(4), None); + } + + { + let mut col = col_iter.next().unwrap(); + assert_eq!(col.nrows(), 4); + assert_eq!(col.nnz(), 1); + assert_eq!(col.row_indices(), &[0]); + assert_eq!(col.values(), &[3]); + assert_eq!(col.get_entry(0), Some(SparseEntry::NonZero(&3))); + assert_eq!(col.get_entry(1), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(4), None); + + assert_eq!(col.values_mut(), &mut [3]); + assert_eq!(col.rows_and_values_mut(), ([0].as_ref(), [3].as_mut())); + assert_eq!(col.get_entry_mut(0), Some(SparseEntryMut::NonZero(&mut 3))); + assert_eq!(col.get_entry_mut(1), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(2), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(3), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(4), None); + } + + { + let mut col = col_iter.next().unwrap(); + assert_eq!(col.nrows(), 4); + assert_eq!(col.nnz(), 2); + assert_eq!(col.row_indices(), &[1, 3]); + assert_eq!(col.values(), &[4, 5]); + assert_eq!(col.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(1), Some(SparseEntry::NonZero(&4))); + assert_eq!(col.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(col.get_entry(3), Some(SparseEntry::NonZero(&5))); + assert_eq!(col.get_entry(4), None); + + assert_eq!(col.values_mut(), &mut [4, 5]); + assert_eq!( + col.rows_and_values_mut(), + ([1, 3].as_ref(), [4, 5].as_mut()) + ); + assert_eq!(col.get_entry_mut(0), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(1), Some(SparseEntryMut::NonZero(&mut 4))); + assert_eq!(col.get_entry_mut(2), Some(SparseEntryMut::Zero)); + assert_eq!(col.get_entry_mut(3), Some(SparseEntryMut::NonZero(&mut 5))); + assert_eq!(col.get_entry_mut(4), None); + } + + assert!(col_iter.next().is_none()); + } +} + +proptest! { + #[test] + fn csc_double_transpose_is_identity(csc in csc_strategy()) { + prop_assert_eq!(csc.transpose().transpose(), csc); + } + + #[test] + fn csc_transpose_agrees_with_dense(csc in csc_strategy()) { + let dense_transpose = DMatrix::from(&csc).transpose(); + let csc_transpose = csc.transpose(); + prop_assert_eq!(dense_transpose, DMatrix::from(&csc_transpose)); + prop_assert_eq!(csc.nnz(), csc_transpose.nnz()); + } + + #[test] + fn csc_filter( + (csc, triplet_subset) + in csc_strategy() + .prop_flat_map(|matrix| { + let triplets: Vec<_> = matrix.triplet_iter().cloned_values().collect(); + let subset = subsequence(triplets, 0 ..= matrix.nnz()) + .prop_map(|triplet_subset| { + let set: HashSet<_> = triplet_subset.into_iter().collect(); + set + }); + (Just(matrix), subset) + })) + { + // We generate a CscMatrix and a HashSet corresponding to a subset of the (i, j, v) + // values in the matrix, which we use for filtering the matrix entries. + // The resulting triplets in the filtered matrix must then be exactly equal to + // the subset. + let filtered = csc.filter(|i, j, v| triplet_subset.contains(&(i, j, *v))); + let filtered_triplets: HashSet<_> = filtered + .triplet_iter() + .cloned_values() + .collect(); + + prop_assert_eq!(filtered_triplets, triplet_subset); + } + + #[test] + fn csc_lower_triangle_agrees_with_dense(csc in csc_strategy()) { + let csc_lower_triangle = csc.lower_triangle(); + prop_assert_eq!(DMatrix::from(&csc_lower_triangle), DMatrix::from(&csc).lower_triangle()); + prop_assert!(csc_lower_triangle.nnz() <= csc.nnz()); + } + + #[test] + fn csc_upper_triangle_agrees_with_dense(csc in csc_strategy()) { + let csc_upper_triangle = csc.upper_triangle(); + prop_assert_eq!(DMatrix::from(&csc_upper_triangle), DMatrix::from(&csc).upper_triangle()); + prop_assert!(csc_upper_triangle.nnz() <= csc.nnz()); + } + + #[test] + fn csc_diagonal_as_csc(csc in csc_strategy()) { + let d = csc.diagonal_as_csc(); + let d_entries: HashSet<_> = d.triplet_iter().cloned_values().collect(); + let csc_diagonal_entries: HashSet<_> = csc + .triplet_iter() + .cloned_values() + .filter(|&(i, j, _)| i == j) + .collect(); + + prop_assert_eq!(d_entries, csc_diagonal_entries); + } + + #[test] + fn csc_identity(n in 0 ..= 6usize) { + let csc = CscMatrix::::identity(n); + prop_assert_eq!(csc.nnz(), n); + prop_assert_eq!(DMatrix::from(&csc), DMatrix::identity(n, n)); + } +} diff --git a/nalgebra-sparse/tests/unit_tests/csr.rs b/nalgebra-sparse/tests/unit_tests/csr.rs new file mode 100644 index 00000000..dee1ae1e --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/csr.rs @@ -0,0 +1,601 @@ +use nalgebra::DMatrix; +use nalgebra_sparse::csr::CsrMatrix; +use nalgebra_sparse::{SparseEntry, SparseEntryMut, SparseFormatErrorKind}; + +use proptest::prelude::*; +use proptest::sample::subsequence; + +use crate::assert_panics; +use crate::common::csr_strategy; + +use std::collections::HashSet; + +#[test] +fn csr_matrix_valid_data() { + // Construct matrix from valid data and check that selected methods return results + // that agree with expectations. + + { + // A CSR matrix with zero explicitly stored entries + let offsets = vec![0, 0, 0, 0]; + let indices = vec![]; + let values = Vec::::new(); + let mut matrix = CsrMatrix::try_from_csr_data(3, 2, offsets, indices, values).unwrap(); + + assert_eq!(matrix, CsrMatrix::zeros(3, 2)); + + assert_eq!(matrix.nrows(), 3); + assert_eq!(matrix.ncols(), 2); + assert_eq!(matrix.nnz(), 0); + assert_eq!(matrix.row_offsets(), &[0, 0, 0, 0]); + assert_eq!(matrix.col_indices(), &[]); + assert_eq!(matrix.values(), &[]); + + assert!(matrix.triplet_iter().next().is_none()); + assert!(matrix.triplet_iter_mut().next().is_none()); + + assert_eq!(matrix.row(0).ncols(), 2); + assert_eq!(matrix.row(0).nnz(), 0); + assert_eq!(matrix.row(0).col_indices(), &[]); + assert_eq!(matrix.row(0).values(), &[]); + assert_eq!(matrix.row_mut(0).ncols(), 2); + assert_eq!(matrix.row_mut(0).nnz(), 0); + assert_eq!(matrix.row_mut(0).col_indices(), &[]); + assert_eq!(matrix.row_mut(0).values(), &[]); + assert_eq!(matrix.row_mut(0).values_mut(), &[]); + assert_eq!( + matrix.row_mut(0).cols_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert_eq!(matrix.row(1).ncols(), 2); + assert_eq!(matrix.row(1).nnz(), 0); + assert_eq!(matrix.row(1).col_indices(), &[]); + assert_eq!(matrix.row(1).values(), &[]); + assert_eq!(matrix.row_mut(1).ncols(), 2); + assert_eq!(matrix.row_mut(1).nnz(), 0); + assert_eq!(matrix.row_mut(1).col_indices(), &[]); + assert_eq!(matrix.row_mut(1).values(), &[]); + assert_eq!(matrix.row_mut(1).values_mut(), &[]); + assert_eq!( + matrix.row_mut(1).cols_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert_eq!(matrix.row(2).ncols(), 2); + assert_eq!(matrix.row(2).nnz(), 0); + assert_eq!(matrix.row(2).col_indices(), &[]); + assert_eq!(matrix.row(2).values(), &[]); + assert_eq!(matrix.row_mut(2).ncols(), 2); + assert_eq!(matrix.row_mut(2).nnz(), 0); + assert_eq!(matrix.row_mut(2).col_indices(), &[]); + assert_eq!(matrix.row_mut(2).values(), &[]); + assert_eq!(matrix.row_mut(2).values_mut(), &[]); + assert_eq!( + matrix.row_mut(2).cols_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert!(matrix.get_row(3).is_none()); + assert!(matrix.get_row_mut(3).is_none()); + + let (offsets, indices, values) = matrix.disassemble(); + + assert_eq!(offsets, vec![0, 0, 0, 0]); + assert_eq!(indices, vec![]); + assert_eq!(values, vec![]); + } + + { + // An arbitrary CSR matrix + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let mut matrix = + CsrMatrix::try_from_csr_data(3, 6, offsets.clone(), indices.clone(), values.clone()) + .unwrap(); + + assert_eq!(matrix.nrows(), 3); + assert_eq!(matrix.ncols(), 6); + assert_eq!(matrix.nnz(), 5); + assert_eq!(matrix.row_offsets(), &[0, 2, 2, 5]); + assert_eq!(matrix.col_indices(), &[0, 5, 1, 2, 3]); + assert_eq!(matrix.values(), &[0, 1, 2, 3, 4]); + + let expected_triplets = vec![(0, 0, 0), (0, 5, 1), (2, 1, 2), (2, 2, 3), (2, 3, 4)]; + assert_eq!( + matrix + .triplet_iter() + .map(|(i, j, v)| (i, j, *v)) + .collect::>(), + expected_triplets + ); + assert_eq!( + matrix + .triplet_iter_mut() + .map(|(i, j, v)| (i, j, *v)) + .collect::>(), + expected_triplets + ); + + assert_eq!(matrix.row(0).ncols(), 6); + assert_eq!(matrix.row(0).nnz(), 2); + assert_eq!(matrix.row(0).col_indices(), &[0, 5]); + assert_eq!(matrix.row(0).values(), &[0, 1]); + assert_eq!(matrix.row_mut(0).ncols(), 6); + assert_eq!(matrix.row_mut(0).nnz(), 2); + assert_eq!(matrix.row_mut(0).col_indices(), &[0, 5]); + assert_eq!(matrix.row_mut(0).values(), &[0, 1]); + assert_eq!(matrix.row_mut(0).values_mut(), &[0, 1]); + assert_eq!( + matrix.row_mut(0).cols_and_values_mut(), + ([0, 5].as_ref(), [0, 1].as_mut()) + ); + + assert_eq!(matrix.row(1).ncols(), 6); + assert_eq!(matrix.row(1).nnz(), 0); + assert_eq!(matrix.row(1).col_indices(), &[]); + assert_eq!(matrix.row(1).values(), &[]); + assert_eq!(matrix.row_mut(1).ncols(), 6); + assert_eq!(matrix.row_mut(1).nnz(), 0); + assert_eq!(matrix.row_mut(1).col_indices(), &[]); + assert_eq!(matrix.row_mut(1).values(), &[]); + assert_eq!(matrix.row_mut(1).values_mut(), &[]); + assert_eq!( + matrix.row_mut(1).cols_and_values_mut(), + ([].as_ref(), [].as_mut()) + ); + + assert_eq!(matrix.row(2).ncols(), 6); + assert_eq!(matrix.row(2).nnz(), 3); + assert_eq!(matrix.row(2).col_indices(), &[1, 2, 3]); + assert_eq!(matrix.row(2).values(), &[2, 3, 4]); + assert_eq!(matrix.row_mut(2).ncols(), 6); + assert_eq!(matrix.row_mut(2).nnz(), 3); + assert_eq!(matrix.row_mut(2).col_indices(), &[1, 2, 3]); + assert_eq!(matrix.row_mut(2).values(), &[2, 3, 4]); + assert_eq!(matrix.row_mut(2).values_mut(), &[2, 3, 4]); + assert_eq!( + matrix.row_mut(2).cols_and_values_mut(), + ([1, 2, 3].as_ref(), [2, 3, 4].as_mut()) + ); + + assert!(matrix.get_row(3).is_none()); + assert!(matrix.get_row_mut(3).is_none()); + + let (offsets2, indices2, values2) = matrix.disassemble(); + + assert_eq!(offsets2, offsets); + assert_eq!(indices2, indices); + assert_eq!(values2, values); + } +} + +#[test] +fn csr_matrix_try_from_invalid_csr_data() { + { + // Empty offset array (invalid length) + let matrix = CsrMatrix::try_from_csr_data(0, 0, Vec::new(), Vec::new(), Vec::::new()); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Offset array invalid length for arbitrary data + let offsets = vec![0, 3, 5]; + let indices = vec![0, 1, 2, 3, 5]; + let values = vec![0, 1, 2, 3, 4]; + + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Invalid first entry in offsets array + let offsets = vec![1, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Invalid last entry in offsets array + let offsets = vec![0, 2, 2, 4]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Invalid length of offsets array + let offsets = vec![0, 2, 2]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Nonmonotonic offsets + let offsets = vec![0, 3, 2, 5]; + let indices = vec![0, 1, 2, 3, 4]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Nonmonotonic minor indices + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 2, 3, 1, 4]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::InvalidStructure + ); + } + + { + // Minor index out of bounds + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 6, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::IndexOutOfBounds + ); + } + + { + // Duplicate entry + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 2, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values); + assert_eq!( + matrix.unwrap_err().kind(), + &SparseFormatErrorKind::DuplicateEntry + ); + } +} + +#[test] +fn csr_disassemble_avoids_clone_when_owned() { + // Test that disassemble avoids cloning the sparsity pattern when it holds the sole reference + // to the pattern. We do so by checking that the pointer to the data is unchanged. + + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let values = vec![0, 1, 2, 3, 4]; + let offsets_ptr = offsets.as_ptr(); + let indices_ptr = indices.as_ptr(); + let values_ptr = values.as_ptr(); + let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values).unwrap(); + + let (offsets, indices, values) = matrix.disassemble(); + assert_eq!(offsets.as_ptr(), offsets_ptr); + assert_eq!(indices.as_ptr(), indices_ptr); + assert_eq!(values.as_ptr(), values_ptr); +} + +// Rustfmt makes this test much harder to read by expanding some of the one-liners to 4-liners, +// so for now we skip rustfmt... +#[rustfmt::skip] +#[test] +fn csr_matrix_get_index_entry() { + // Test .get_entry(_mut) and .index_entry(_mut) methods + + #[rustfmt::skip] + let dense = DMatrix::from_row_slice(2, 3, &[ + 1, 0, 3, + 0, 5, 6 + ]); + let csr = CsrMatrix::from(&dense); + + assert_eq!(csr.get_entry(0, 0), Some(SparseEntry::NonZero(&1))); + assert_eq!(csr.index_entry(0, 0), SparseEntry::NonZero(&1)); + assert_eq!(csr.get_entry(0, 1), Some(SparseEntry::Zero)); + assert_eq!(csr.index_entry(0, 1), SparseEntry::Zero); + assert_eq!(csr.get_entry(0, 2), Some(SparseEntry::NonZero(&3))); + assert_eq!(csr.index_entry(0, 2), SparseEntry::NonZero(&3)); + assert_eq!(csr.get_entry(1, 0), Some(SparseEntry::Zero)); + assert_eq!(csr.index_entry(1, 0), SparseEntry::Zero); + assert_eq!(csr.get_entry(1, 1), Some(SparseEntry::NonZero(&5))); + assert_eq!(csr.index_entry(1, 1), SparseEntry::NonZero(&5)); + assert_eq!(csr.get_entry(1, 2), Some(SparseEntry::NonZero(&6))); + assert_eq!(csr.index_entry(1, 2), SparseEntry::NonZero(&6)); + + // Check some out of bounds with .get_entry + assert_eq!(csr.get_entry(0, 3), None); + assert_eq!(csr.get_entry(0, 4), None); + assert_eq!(csr.get_entry(1, 3), None); + assert_eq!(csr.get_entry(1, 4), None); + assert_eq!(csr.get_entry(2, 0), None); + assert_eq!(csr.get_entry(2, 1), None); + assert_eq!(csr.get_entry(2, 2), None); + assert_eq!(csr.get_entry(2, 3), None); + assert_eq!(csr.get_entry(2, 4), None); + + // Check that out of bounds with .index_entry panics + assert_panics!(csr.index_entry(0, 3)); + assert_panics!(csr.index_entry(0, 4)); + assert_panics!(csr.index_entry(1, 3)); + assert_panics!(csr.index_entry(1, 4)); + assert_panics!(csr.index_entry(2, 0)); + assert_panics!(csr.index_entry(2, 1)); + assert_panics!(csr.index_entry(2, 2)); + assert_panics!(csr.index_entry(2, 3)); + assert_panics!(csr.index_entry(2, 4)); + + { + // Check mutable versions of the above functions + let mut csr = csr; + + assert_eq!(csr.get_entry_mut(0, 0), Some(SparseEntryMut::NonZero(&mut 1))); + assert_eq!(csr.index_entry_mut(0, 0), SparseEntryMut::NonZero(&mut 1)); + assert_eq!(csr.get_entry_mut(0, 1), Some(SparseEntryMut::Zero)); + assert_eq!(csr.index_entry_mut(0, 1), SparseEntryMut::Zero); + assert_eq!(csr.get_entry_mut(0, 2), Some(SparseEntryMut::NonZero(&mut 3))); + assert_eq!(csr.index_entry_mut(0, 2), SparseEntryMut::NonZero(&mut 3)); + assert_eq!(csr.get_entry_mut(1, 0), Some(SparseEntryMut::Zero)); + assert_eq!(csr.index_entry_mut(1, 0), SparseEntryMut::Zero); + assert_eq!(csr.get_entry_mut(1, 1), Some(SparseEntryMut::NonZero(&mut 5))); + assert_eq!(csr.index_entry_mut(1, 1), SparseEntryMut::NonZero(&mut 5)); + assert_eq!(csr.get_entry_mut(1, 2), Some(SparseEntryMut::NonZero(&mut 6))); + assert_eq!(csr.index_entry_mut(1, 2), SparseEntryMut::NonZero(&mut 6)); + + // Check some out of bounds with .get_entry_mut + assert_eq!(csr.get_entry_mut(0, 3), None); + assert_eq!(csr.get_entry_mut(0, 4), None); + assert_eq!(csr.get_entry_mut(1, 3), None); + assert_eq!(csr.get_entry_mut(1, 4), None); + assert_eq!(csr.get_entry_mut(2, 0), None); + assert_eq!(csr.get_entry_mut(2, 1), None); + assert_eq!(csr.get_entry_mut(2, 2), None); + assert_eq!(csr.get_entry_mut(2, 3), None); + assert_eq!(csr.get_entry_mut(2, 4), None); + + // Check that out of bounds with .index_entry_mut panics + // Note: the cloning is necessary because a mutable reference is not UnwindSafe + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(0, 3); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(0, 4); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(1, 3); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(1, 4); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(2, 0); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(2, 1); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(2, 2); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(2, 3); }); + assert_panics!({ let mut csr = csr.clone(); csr.index_entry_mut(2, 4); }); + } +} + +#[test] +fn csr_matrix_row_iter() { + #[rustfmt::skip] + let dense = DMatrix::from_row_slice(3, 4, &[ + 0, 1, 2, 0, + 3, 0, 0, 0, + 0, 4, 0, 5 + ]); + let csr = CsrMatrix::from(&dense); + + // Immutable iterator + { + let mut row_iter = csr.row_iter(); + + { + let row = row_iter.next().unwrap(); + assert_eq!(row.ncols(), 4); + assert_eq!(row.nnz(), 2); + assert_eq!(row.col_indices(), &[1, 2]); + assert_eq!(row.values(), &[1, 2]); + assert_eq!(row.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(1), Some(SparseEntry::NonZero(&1))); + assert_eq!(row.get_entry(2), Some(SparseEntry::NonZero(&2))); + assert_eq!(row.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(4), None); + } + + { + let row = row_iter.next().unwrap(); + assert_eq!(row.ncols(), 4); + assert_eq!(row.nnz(), 1); + assert_eq!(row.col_indices(), &[0]); + assert_eq!(row.values(), &[3]); + assert_eq!(row.get_entry(0), Some(SparseEntry::NonZero(&3))); + assert_eq!(row.get_entry(1), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(4), None); + } + + { + let row = row_iter.next().unwrap(); + assert_eq!(row.ncols(), 4); + assert_eq!(row.nnz(), 2); + assert_eq!(row.col_indices(), &[1, 3]); + assert_eq!(row.values(), &[4, 5]); + assert_eq!(row.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(1), Some(SparseEntry::NonZero(&4))); + assert_eq!(row.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(3), Some(SparseEntry::NonZero(&5))); + assert_eq!(row.get_entry(4), None); + } + + assert!(row_iter.next().is_none()); + } + + // Mutable iterator + { + let mut csr = csr; + let mut row_iter = csr.row_iter_mut(); + + { + let mut row = row_iter.next().unwrap(); + assert_eq!(row.ncols(), 4); + assert_eq!(row.nnz(), 2); + assert_eq!(row.col_indices(), &[1, 2]); + assert_eq!(row.values(), &[1, 2]); + assert_eq!(row.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(1), Some(SparseEntry::NonZero(&1))); + assert_eq!(row.get_entry(2), Some(SparseEntry::NonZero(&2))); + assert_eq!(row.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(4), None); + + assert_eq!(row.values_mut(), &mut [1, 2]); + assert_eq!( + row.cols_and_values_mut(), + ([1, 2].as_ref(), [1, 2].as_mut()) + ); + assert_eq!(row.get_entry_mut(0), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(1), Some(SparseEntryMut::NonZero(&mut 1))); + assert_eq!(row.get_entry_mut(2), Some(SparseEntryMut::NonZero(&mut 2))); + assert_eq!(row.get_entry_mut(3), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(4), None); + } + + { + let mut row = row_iter.next().unwrap(); + assert_eq!(row.ncols(), 4); + assert_eq!(row.nnz(), 1); + assert_eq!(row.col_indices(), &[0]); + assert_eq!(row.values(), &[3]); + assert_eq!(row.get_entry(0), Some(SparseEntry::NonZero(&3))); + assert_eq!(row.get_entry(1), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(3), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(4), None); + + assert_eq!(row.values_mut(), &mut [3]); + assert_eq!(row.cols_and_values_mut(), ([0].as_ref(), [3].as_mut())); + assert_eq!(row.get_entry_mut(0), Some(SparseEntryMut::NonZero(&mut 3))); + assert_eq!(row.get_entry_mut(1), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(2), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(3), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(4), None); + } + + { + let mut row = row_iter.next().unwrap(); + assert_eq!(row.ncols(), 4); + assert_eq!(row.nnz(), 2); + assert_eq!(row.col_indices(), &[1, 3]); + assert_eq!(row.values(), &[4, 5]); + assert_eq!(row.get_entry(0), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(1), Some(SparseEntry::NonZero(&4))); + assert_eq!(row.get_entry(2), Some(SparseEntry::Zero)); + assert_eq!(row.get_entry(3), Some(SparseEntry::NonZero(&5))); + assert_eq!(row.get_entry(4), None); + + assert_eq!(row.values_mut(), &mut [4, 5]); + assert_eq!( + row.cols_and_values_mut(), + ([1, 3].as_ref(), [4, 5].as_mut()) + ); + assert_eq!(row.get_entry_mut(0), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(1), Some(SparseEntryMut::NonZero(&mut 4))); + assert_eq!(row.get_entry_mut(2), Some(SparseEntryMut::Zero)); + assert_eq!(row.get_entry_mut(3), Some(SparseEntryMut::NonZero(&mut 5))); + assert_eq!(row.get_entry_mut(4), None); + } + + assert!(row_iter.next().is_none()); + } +} + +proptest! { + #[test] + fn csr_double_transpose_is_identity(csr in csr_strategy()) { + prop_assert_eq!(csr.transpose().transpose(), csr); + } + + #[test] + fn csr_transpose_agrees_with_dense(csr in csr_strategy()) { + let dense_transpose = DMatrix::from(&csr).transpose(); + let csr_transpose = csr.transpose(); + prop_assert_eq!(dense_transpose, DMatrix::from(&csr_transpose)); + prop_assert_eq!(csr.nnz(), csr_transpose.nnz()); + } + + #[test] + fn csr_filter( + (csr, triplet_subset) + in csr_strategy() + .prop_flat_map(|matrix| { + let triplets: Vec<_> = matrix.triplet_iter().cloned_values().collect(); + let subset = subsequence(triplets, 0 ..= matrix.nnz()) + .prop_map(|triplet_subset| { + let set: HashSet<_> = triplet_subset.into_iter().collect(); + set + }); + (Just(matrix), subset) + })) + { + // We generate a CsrMatrix and a HashSet corresponding to a subset of the (i, j, v) + // values in the matrix, which we use for filtering the matrix entries. + // The resulting triplets in the filtered matrix must then be exactly equal to + // the subset. + let filtered = csr.filter(|i, j, v| triplet_subset.contains(&(i, j, *v))); + let filtered_triplets: HashSet<_> = filtered + .triplet_iter() + .cloned_values() + .collect(); + + prop_assert_eq!(filtered_triplets, triplet_subset); + } + + #[test] + fn csr_lower_triangle_agrees_with_dense(csr in csr_strategy()) { + let csr_lower_triangle = csr.lower_triangle(); + prop_assert_eq!(DMatrix::from(&csr_lower_triangle), DMatrix::from(&csr).lower_triangle()); + prop_assert!(csr_lower_triangle.nnz() <= csr.nnz()); + } + + #[test] + fn csr_upper_triangle_agrees_with_dense(csr in csr_strategy()) { + let csr_upper_triangle = csr.upper_triangle(); + prop_assert_eq!(DMatrix::from(&csr_upper_triangle), DMatrix::from(&csr).upper_triangle()); + prop_assert!(csr_upper_triangle.nnz() <= csr.nnz()); + } + + #[test] + fn csr_diagonal_as_csr(csr in csr_strategy()) { + let d = csr.diagonal_as_csr(); + let d_entries: HashSet<_> = d.triplet_iter().cloned_values().collect(); + let csr_diagonal_entries: HashSet<_> = csr + .triplet_iter() + .cloned_values() + .filter(|&(i, j, _)| i == j) + .collect(); + + prop_assert_eq!(d_entries, csr_diagonal_entries); + } + + #[test] + fn csr_identity(n in 0 ..= 6usize) { + let csr = CsrMatrix::::identity(n); + prop_assert_eq!(csr.nnz(), n); + prop_assert_eq!(DMatrix::from(&csr), DMatrix::identity(n, n)); + } +} diff --git a/nalgebra-sparse/tests/unit_tests/mod.rs b/nalgebra-sparse/tests/unit_tests/mod.rs new file mode 100644 index 00000000..ee2166dc --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/mod.rs @@ -0,0 +1,8 @@ +mod cholesky; +mod convert_serial; +mod coo; +mod csc; +mod csr; +mod ops; +mod pattern; +mod proptest; diff --git a/nalgebra-sparse/tests/unit_tests/ops.proptest-regressions b/nalgebra-sparse/tests/unit_tests/ops.proptest-regressions new file mode 100644 index 00000000..ae873c3f --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/ops.proptest-regressions @@ -0,0 +1,14 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 6748ea4ac9523fcc4dd8327b27c6818f8df10eb2042774f59a6e3fa3205dbcbd # shrinks to (beta, alpha, (c, a, b)) = (0, -1, (Matrix { data: VecStorage { data: [0, 0, 0, 0, 0, 1, 5, -4, 2], nrows: Dynamic { value: 3 }, ncols: Dynamic { value: 3 } } }, CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 2, 2, 2], minor_indices: [0, 1], minor_dim: 5 }, values: [-5, -2] }, Matrix { data: VecStorage { data: [4, -2, -3, -3, -5, 3, 5, 1, -4, -4, 3, 5, 5, 5, -3], nrows: Dynamic { value: 5 }, ncols: Dynamic { value: 3 } } })) +cc dcf67ab7b8febf109cfa58ee0f082b9f7c23d6ad0df2e28dc99984deeb6b113a # shrinks to (beta, alpha, (c, a, b)) = (0, 0, (Matrix { data: VecStorage { data: [0, -1], nrows: Dynamic { value: 1 }, ncols: Dynamic { value: 2 } } }, CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 0], minor_indices: [], minor_dim: 4 }, values: [] }, Matrix { data: VecStorage { data: [3, 1, 1, 0, 0, 3, -5, -3], nrows: Dynamic { value: 4 }, ncols: Dynamic { value: 2 } } })) +cc dbaef9886eaad28be7cd48326b857f039d695bc0b19e9ada3304e812e984d2c3 # shrinks to (beta, alpha, (c, a, b)) = (0, -1, (Matrix { data: VecStorage { data: [1], nrows: Dynamic { value: 1 }, ncols: Dynamic { value: 1 } } }, CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 0], minor_indices: [], minor_dim: 0 }, values: [] }, Matrix { data: VecStorage { data: [], nrows: Dynamic { value: 0 }, ncols: Dynamic { value: 1 } } })) +cc 99e312beb498ffa79194f41501ea312dce1911878eba131282904ac97205aaa9 # shrinks to SpmmCsrDenseArgs { c, beta, alpha, trans_a, a, trans_b, b } = SpmmCsrDenseArgs { c: Matrix { data: VecStorage { data: [-1, 4, -1, -4, 2, 1, 4, -2, 1, 3, -2, 5], nrows: Dynamic { value: 2 }, ncols: Dynamic { value: 6 } } }, beta: 0, alpha: 0, trans_a: Transpose, a: CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 1, 1, 1, 1, 1, 1], minor_indices: [0], minor_dim: 2 }, values: [0] }, trans_b: Transpose, b: Matrix { data: VecStorage { data: [-1, 1, 0, -5, 4, -5, 2, 2, 4, -4, -3, -1, 1, -1, 0, 1, -3, 4, -5, 0, 1, -5, 0, 1, 1, -3, 5, 3, 5, -3, -5, 3, -1, -4, -4, -3], nrows: Dynamic { value: 6 }, ncols: Dynamic { value: 6 } } } } +cc bf74259df2db6eda24eb42098e57ea1c604bb67d6d0023fa308c321027b53a43 # shrinks to (alpha, beta, c, a, b, trans_a, trans_b) = (0, 0, Matrix { data: VecStorage { data: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], nrows: Dynamic { value: 4 }, ncols: Dynamic { value: 5 } } }, CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 3, 6, 9, 12], minor_indices: [0, 1, 3, 1, 2, 3, 0, 1, 2, 1, 2, 3], minor_dim: 4 }, values: [-3, 3, -3, 1, -3, 0, 2, 1, 3, 0, -4, -1] }, Matrix { data: VecStorage { data: [3, 1, 4, -5, 5, -2, -5, -1, 1, -1, 3, -3, -2, 4, 2, -1, -1, 3, -5, 5], nrows: Dynamic { value: 4 }, ncols: Dynamic { value: 5 } } }, NoTranspose, NoTranspose) +cc cbd6dac45a2f610e10cf4c15d4614cdbf7dfedbfcd733e4cc65c2e79829d14b3 # shrinks to SpmmCsrArgs { c, beta, alpha, trans_a, a, trans_b, b } = SpmmCsrArgs { c: CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 0, 1, 1, 1, 1], minor_indices: [0], minor_dim: 1 }, values: [0] }, beta: 0, alpha: 1, trans_a: Transpose(true), a: CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 0, 0, 1, 1, 1], minor_indices: [1], minor_dim: 5 }, values: [-1] }, trans_b: Transpose(true), b: CsrMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 2], minor_indices: [2, 4], minor_dim: 5 }, values: [-1, 0] } } +cc 8af78e2e41087743c8696c4d5563d59464f284662ccf85efc81ac56747d528bb # shrinks to (a, b) = (CscMatrix { cs: CsMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 6, 12, 18, 24, 30, 33], minor_indices: [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 1, 2, 5], minor_dim: 6 }, values: [0.4566433975117654, -0.5109683327713039, 0.0, -3.276901622678194, 0.0, -2.2065487385437095, 0.0, -0.42643054427847016, -2.9232369281581234, 0.0, 1.2913925579441763, 0.0, -1.4073766622090917, -4.795473113569459, 4.681765156869446, -0.821162215887913, 3.0315816068414794, -3.3986924718213407, -3.498903007282241, -3.1488953408335236, 3.458104636152161, -4.774694888508124, 2.603884664757498, 0.0, 0.0, -3.2650988857765535, 4.26699442646613, 0.0, -0.012223422086023561, 3.6899095325779285, -1.4264458042247958, 0.0, 3.4849193883471266] } }, Matrix { data: VecStorage { data: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.9513896933988457, -4.426942420881461, 0.0, 0.0, 0.0, -0.28264084049240257], nrows: Dynamic { value: 6 }, ncols: Dynamic { value: 2 } } }) +cc a4effd988fe352146fca365875e108ecf4f7d41f6ad54683e923ca6ce712e5d0 # shrinks to (a, b) = (CscMatrix { cs: CsMatrix { sparsity_pattern: SparsityPattern { major_offsets: [0, 5, 11, 17, 22, 27, 31], minor_indices: [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 3, 4, 5, 1, 2, 3, 4, 5, 0, 1, 3, 5], minor_dim: 6 }, values: [-2.24935510943371, -2.2288203680206227, 0.0, -1.029740125494273, 0.0, 0.0, 0.22632926934348507, -0.9123245943877407, 0.0, 3.8564332876991827, 0.0, 0.0, 0.0, -0.8235065737081717, 1.9337984046721566, 0.11003468246027737, -3.422112890579867, -3.7824068893569196, 0.0, -0.021700572247226546, -4.914783069982362, 0.6227245544506541, 0.0, 0.0, -4.411368879922364, -0.00013623178651567258, -2.613658177661417, -2.2783292441548637, 0.0, 1.351859435890189, -0.021345159183605134] } }, Matrix { data: VecStorage { data: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.519417607973404, 0.0, 0.0, 0.0, -0.21238483334481817], nrows: Dynamic { value: 6 }, ncols: Dynamic { value: 3 } } }) diff --git a/nalgebra-sparse/tests/unit_tests/ops.rs b/nalgebra-sparse/tests/unit_tests/ops.rs new file mode 100644 index 00000000..f2a02fd8 --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/ops.rs @@ -0,0 +1,1235 @@ +use crate::common::{ + csc_strategy, csr_strategy, non_zero_i32_value_strategy, value_strategy, + PROPTEST_I32_VALUE_STRATEGY, PROPTEST_MATRIX_DIM, PROPTEST_MAX_NNZ, +}; +use nalgebra_sparse::csc::CscMatrix; +use nalgebra_sparse::csr::CsrMatrix; +use nalgebra_sparse::ops::serial::{ + spadd_csc_prealloc, spadd_csr_prealloc, spadd_pattern, spmm_csc_dense, spmm_csc_prealloc, + spmm_csr_dense, spmm_csr_pattern, spmm_csr_prealloc, spsolve_csc_lower_triangular, +}; +use nalgebra_sparse::ops::Op; +use nalgebra_sparse::pattern::SparsityPattern; +use nalgebra_sparse::proptest::{csc, csr, sparsity_pattern}; + +use nalgebra::proptest::{matrix, vector}; +use nalgebra::{DMatrix, DMatrixSlice, DMatrixSliceMut, Scalar}; + +use proptest::prelude::*; + +use matrixcompare::prop_assert_matrix_eq; + +use std::panic::catch_unwind; + +/// Represents the sparsity pattern of a CSR matrix as a dense matrix with 0/1 +fn dense_csr_pattern(pattern: &SparsityPattern) -> DMatrix { + let boolean_csr = + CsrMatrix::try_from_pattern_and_values(pattern.clone(), vec![1; pattern.nnz()]).unwrap(); + DMatrix::from(&boolean_csr) +} + +/// Represents the sparsity pattern of a CSC matrix as a dense matrix with 0/1 +fn dense_csc_pattern(pattern: &SparsityPattern) -> DMatrix { + let boolean_csc = + CscMatrix::try_from_pattern_and_values(pattern.clone(), vec![1; pattern.nnz()]).unwrap(); + DMatrix::from(&boolean_csc) +} + +#[derive(Debug)] +struct SpmmCsrDenseArgs { + c: DMatrix, + beta: T, + alpha: T, + a: Op>, + b: Op>, +} + +#[derive(Debug)] +struct SpmmCscDenseArgs { + c: DMatrix, + beta: T, + alpha: T, + a: Op>, + b: Op>, +} + +/// Returns matrices C, A and B with compatible dimensions such that it can be used +/// in an `spmm` operation `C = beta * C + alpha * trans(A) * trans(B)`. +fn spmm_csr_dense_args_strategy() -> impl Strategy> { + let max_nnz = PROPTEST_MAX_NNZ; + let value_strategy = PROPTEST_I32_VALUE_STRATEGY; + let c_rows = PROPTEST_MATRIX_DIM; + let c_cols = PROPTEST_MATRIX_DIM; + let common_dim = PROPTEST_MATRIX_DIM; + let trans_strategy = trans_strategy(); + let c_matrix_strategy = matrix(value_strategy.clone(), c_rows, c_cols); + + ( + c_matrix_strategy, + common_dim, + trans_strategy.clone(), + trans_strategy.clone(), + ) + .prop_flat_map(move |(c, common_dim, trans_a, trans_b)| { + let a_shape = if trans_a { + (common_dim, c.nrows()) + } else { + (c.nrows(), common_dim) + }; + let b_shape = if trans_b { + (c.ncols(), common_dim) + } else { + (common_dim, c.ncols()) + }; + let a = csr(value_strategy.clone(), a_shape.0, a_shape.1, max_nnz); + let b = matrix(value_strategy.clone(), b_shape.0, b_shape.1); + + // We use the same values for alpha, beta parameters as for matrix elements + let alpha = value_strategy.clone(); + let beta = value_strategy.clone(); + + (Just(c), beta, alpha, Just(trans_a), a, Just(trans_b), b) + }) + .prop_map( + |(c, beta, alpha, trans_a, a, trans_b, b)| SpmmCsrDenseArgs { + c, + beta, + alpha, + a: if trans_a { + Op::Transpose(a) + } else { + Op::NoOp(a) + }, + b: if trans_b { + Op::Transpose(b) + } else { + Op::NoOp(b) + }, + }, + ) +} + +/// Returns matrices C, A and B with compatible dimensions such that it can be used +/// in an `spmm` operation `C = beta * C + alpha * trans(A) * trans(B)`. +fn spmm_csc_dense_args_strategy() -> impl Strategy> { + spmm_csr_dense_args_strategy().prop_map(|args| SpmmCscDenseArgs { + c: args.c, + beta: args.beta, + alpha: args.alpha, + a: args.a.map_same_op(|a| CscMatrix::from(&a)), + b: args.b, + }) +} + +#[derive(Debug)] +struct SpaddCsrArgs { + c: CsrMatrix, + beta: T, + alpha: T, + a: Op>, +} + +#[derive(Debug)] +struct SpaddCscArgs { + c: CscMatrix, + beta: T, + alpha: T, + a: Op>, +} + +fn spadd_csr_prealloc_args_strategy() -> impl Strategy> { + let value_strategy = PROPTEST_I32_VALUE_STRATEGY; + + spadd_pattern_strategy() + .prop_flat_map(move |(a_pattern, b_pattern)| { + let c_pattern = spadd_pattern(&a_pattern, &b_pattern); + + let a_values = vec![value_strategy.clone(); a_pattern.nnz()]; + let c_values = vec![value_strategy.clone(); c_pattern.nnz()]; + let alpha = value_strategy.clone(); + let beta = value_strategy.clone(); + ( + Just(c_pattern), + Just(a_pattern), + c_values, + a_values, + alpha, + beta, + trans_strategy(), + ) + }) + .prop_map( + |(c_pattern, a_pattern, c_values, a_values, alpha, beta, trans_a)| { + let c = CsrMatrix::try_from_pattern_and_values(c_pattern, c_values).unwrap(); + let a = CsrMatrix::try_from_pattern_and_values(a_pattern, a_values).unwrap(); + + let a = if trans_a { + Op::Transpose(a.transpose()) + } else { + Op::NoOp(a) + }; + SpaddCsrArgs { c, beta, alpha, a } + }, + ) +} + +fn spadd_csc_prealloc_args_strategy() -> impl Strategy> { + spadd_csr_prealloc_args_strategy().prop_map(|args| SpaddCscArgs { + c: CscMatrix::from(&args.c), + beta: args.beta, + alpha: args.alpha, + a: args.a.map_same_op(|a| CscMatrix::from(&a)), + }) +} + +fn dense_strategy() -> impl Strategy> { + matrix( + PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_MATRIX_DIM, + PROPTEST_MATRIX_DIM, + ) +} + +fn trans_strategy() -> impl Strategy + Clone { + proptest::bool::ANY +} + +/// Wraps the values of the given strategy in `Op`, producing both transposed and non-transposed +/// values. +fn op_strategy(strategy: S) -> impl Strategy> { + let is_transposed = proptest::bool::ANY; + (strategy, is_transposed).prop_map(|(obj, is_trans)| { + if is_trans { + Op::Transpose(obj) + } else { + Op::NoOp(obj) + } + }) +} + +fn pattern_strategy() -> impl Strategy { + sparsity_pattern(PROPTEST_MATRIX_DIM, PROPTEST_MATRIX_DIM, PROPTEST_MAX_NNZ) +} + +/// Constructs pairs (a, b) where a and b have the same dimensions +fn spadd_pattern_strategy() -> impl Strategy { + pattern_strategy().prop_flat_map(|a| { + let b = sparsity_pattern(a.major_dim(), a.minor_dim(), PROPTEST_MAX_NNZ); + (Just(a), b) + }) +} + +/// Constructs pairs (a, b) where a and b have compatible dimensions for a matrix product +fn spmm_csr_pattern_strategy() -> impl Strategy { + pattern_strategy().prop_flat_map(|a| { + let b = sparsity_pattern(a.minor_dim(), PROPTEST_MATRIX_DIM, PROPTEST_MAX_NNZ); + (Just(a), b) + }) +} + +#[derive(Debug)] +struct SpmmCsrArgs { + c: CsrMatrix, + beta: T, + alpha: T, + a: Op>, + b: Op>, +} + +#[derive(Debug)] +struct SpmmCscArgs { + c: CscMatrix, + beta: T, + alpha: T, + a: Op>, + b: Op>, +} + +fn spmm_csr_prealloc_args_strategy() -> impl Strategy> { + spmm_csr_pattern_strategy() + .prop_flat_map(|(a_pattern, b_pattern)| { + let a_values = vec![PROPTEST_I32_VALUE_STRATEGY; a_pattern.nnz()]; + let b_values = vec![PROPTEST_I32_VALUE_STRATEGY; b_pattern.nnz()]; + let c_pattern = spmm_csr_pattern(&a_pattern, &b_pattern); + let c_values = vec![PROPTEST_I32_VALUE_STRATEGY; c_pattern.nnz()]; + let a = a_values.prop_map(move |values| { + CsrMatrix::try_from_pattern_and_values(a_pattern.clone(), values).unwrap() + }); + let b = b_values.prop_map(move |values| { + CsrMatrix::try_from_pattern_and_values(b_pattern.clone(), values).unwrap() + }); + let c = c_values.prop_map(move |values| { + CsrMatrix::try_from_pattern_and_values(c_pattern.clone(), values).unwrap() + }); + let alpha = PROPTEST_I32_VALUE_STRATEGY; + let beta = PROPTEST_I32_VALUE_STRATEGY; + (c, beta, alpha, trans_strategy(), a, trans_strategy(), b) + }) + .prop_map( + |(c, beta, alpha, trans_a, a, trans_b, b)| SpmmCsrArgs:: { + c, + beta, + alpha, + a: if trans_a { + Op::Transpose(a.transpose()) + } else { + Op::NoOp(a) + }, + b: if trans_b { + Op::Transpose(b.transpose()) + } else { + Op::NoOp(b) + }, + }, + ) +} + +fn spmm_csc_prealloc_args_strategy() -> impl Strategy> { + // Note: Converting from CSR is simple, but might be significantly slower than + // writing a common implementation that can be shared between CSR and CSC args + spmm_csr_prealloc_args_strategy().prop_map(|args| SpmmCscArgs { + c: CscMatrix::from(&args.c), + beta: args.beta, + alpha: args.alpha, + a: args.a.map_same_op(|a| CscMatrix::from(&a)), + b: args.b.map_same_op(|b| CscMatrix::from(&b)), + }) +} + +fn csc_invertible_diagonal() -> impl Strategy> { + let non_zero_values = + value_strategy::().prop_filter("Only non-zeros values accepted", |x| x != &0.0); + + vector(non_zero_values, PROPTEST_MATRIX_DIM).prop_map(|d| { + let mut matrix = CscMatrix::identity(d.len()); + matrix.values_mut().clone_from_slice(&d.as_slice()); + matrix + }) +} + +fn csc_square_with_non_zero_diagonals() -> impl Strategy> { + csc_invertible_diagonal().prop_flat_map(|d| { + csc( + value_strategy::(), + d.nrows(), + d.nrows(), + PROPTEST_MAX_NNZ, + ) + .prop_map(move |mut c| { + for (i, j, v) in c.triplet_iter_mut() { + if i == j { + *v = 0.0; + } + } + + // Return the sum of a matrix with zero diagonals and an invertible diagonal + // matrix + c + &d + }) + }) +} + +/// Helper function to help us call dense GEMM with our `Op` type +fn dense_gemm<'a>( + beta: i32, + c: impl Into>, + alpha: i32, + a: Op>>, + b: Op>>, +) { + let mut c = c.into(); + let a = a.convert(); + let b = b.convert(); + + use Op::{NoOp, Transpose}; + match (a, b) { + (NoOp(a), NoOp(b)) => c.gemm(alpha, &a, &b, beta), + (Transpose(a), NoOp(b)) => c.gemm(alpha, &a.transpose(), &b, beta), + (NoOp(a), Transpose(b)) => c.gemm(alpha, &a, &b.transpose(), beta), + (Transpose(a), Transpose(b)) => c.gemm(alpha, &a.transpose(), &b.transpose(), beta), + } +} + +proptest! { + #[test] + fn spmm_csr_dense_agrees_with_dense_result( + SpmmCsrDenseArgs { c, beta, alpha, a, b } + in spmm_csr_dense_args_strategy() + ) { + let mut spmm_result = c.clone(); + spmm_csr_dense(beta, &mut spmm_result, alpha, a.as_ref(), b.as_ref()); + + let mut gemm_result = c.clone(); + let a_dense = a.map_same_op(|a| DMatrix::from(&a)); + dense_gemm(beta, &mut gemm_result, alpha, a_dense.as_ref(), b.as_ref()); + + prop_assert_eq!(spmm_result, gemm_result); + } + + #[test] + fn spmm_csr_dense_panics_on_dim_mismatch( + (alpha, beta, c, a, b) + in (PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_I32_VALUE_STRATEGY, + dense_strategy(), + op_strategy(csr_strategy()), + op_strategy(dense_strategy())) + ) { + // We refer to `A * B` as the "product" + let product_rows = match &a { + Op::NoOp(ref a) => a.nrows(), + Op::Transpose(ref a) => a.ncols(), + }; + let product_cols = match &b { + Op::NoOp(ref b) => b.ncols(), + Op::Transpose(ref b) => b.nrows(), + }; + // Determine the common dimension in the product + // from the perspective of a and b, respectively + let product_a_common = match &a { + Op::NoOp(ref a) => a.ncols(), + Op::Transpose(ref a) => a.nrows(), + }; + let product_b_common = match &b { + Op::NoOp(ref b) => b.nrows(), + Op::Transpose(ref b) => b.ncols() + }; + + let dims_are_compatible = product_rows == c.nrows() + && product_cols == c.ncols() + && product_a_common == product_b_common; + + // If the dimensions randomly happen to be compatible, then of course we need to + // skip the test, so we assume that they are not. + prop_assume!(!dims_are_compatible); + + let result = catch_unwind(|| { + let mut spmm_result = c.clone(); + spmm_csr_dense(beta, &mut spmm_result, alpha, a.as_ref(), b.as_ref()); + }); + + prop_assert!(result.is_err(), + "The SPMM kernel executed successfully despite mismatch dimensions"); + } + + #[test] + fn spadd_pattern_test((a, b) in spadd_pattern_strategy()) + { + // (a, b) are dimensionally compatible patterns + let pattern_result = spadd_pattern(&a, &b); + + // To verify the pattern, we construct CSR matrices with positive integer entries + // corresponding to a and b, and convert them to dense matrices. + // The sum of these dense matrices will then have non-zeros in exactly the same locations + // as the result of "adding" the sparsity patterns + let a_csr = CsrMatrix::try_from_pattern_and_values(a.clone(), vec![1; a.nnz()]) + .unwrap(); + let a_dense = DMatrix::from(&a_csr); + let b_csr = CsrMatrix::try_from_pattern_and_values(b.clone(), vec![1; b.nnz()]) + .unwrap(); + let b_dense = DMatrix::from(&b_csr); + let c_dense = a_dense + b_dense; + let c_csr = CsrMatrix::from(&c_dense); + + prop_assert_eq!(&pattern_result, c_csr.pattern()); + } + + #[test] + fn spadd_csr_prealloc_test(SpaddCsrArgs { c, beta, alpha, a } in spadd_csr_prealloc_args_strategy()) { + // Test that we get the expected result by comparing to an equivalent dense operation + // (here we give in the C matrix, so the sparsity pattern is essentially fixed) + + let mut c_sparse = c.clone(); + spadd_csr_prealloc(beta, &mut c_sparse, alpha, a.as_ref()).unwrap(); + + let mut c_dense = DMatrix::from(&c); + let op_a_dense = match a { + Op::NoOp(a) => DMatrix::from(&a), + Op::Transpose(a) => DMatrix::from(&a).transpose(), + }; + c_dense = beta * c_dense + alpha * &op_a_dense; + + prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense); + } + + #[test] + fn csr_add_csr( + // a and b have the same dimensions + (a, b) + in csr_strategy() + .prop_flat_map(|a| { + let b = csr(PROPTEST_I32_VALUE_STRATEGY, a.nrows(), a.ncols(), PROPTEST_MAX_NNZ); + (Just(a), b) + })) + { + // We use the dense result as the ground truth for the arithmetic result + let c_dense = DMatrix::from(&a) + DMatrix::from(&b); + // However, it's not enough only to cover the dense result, we also need to verify the + // sparsity pattern. We can determine the exact sparsity pattern by using + // dense arithmetic with positive integer values and extracting positive entries. + let c_dense_pattern = dense_csr_pattern(a.pattern()) + dense_csr_pattern(b.pattern()); + let c_pattern = CsrMatrix::from(&c_dense_pattern).pattern().clone(); + + // Check each combination of owned matrices and references + let c_owned_owned = a.clone() + b.clone(); + prop_assert_eq!(&DMatrix::from(&c_owned_owned), &c_dense); + prop_assert_eq!(c_owned_owned.pattern(), &c_pattern); + + let c_owned_ref = a.clone() + &b; + prop_assert_eq!(&DMatrix::from(&c_owned_ref), &c_dense); + prop_assert_eq!(c_owned_ref.pattern(), &c_pattern); + + let c_ref_owned = &a + b.clone(); + prop_assert_eq!(&DMatrix::from(&c_ref_owned), &c_dense); + prop_assert_eq!(c_ref_owned.pattern(), &c_pattern); + + let c_ref_ref = &a + &b; + prop_assert_eq!(&DMatrix::from(&c_ref_ref), &c_dense); + prop_assert_eq!(c_ref_ref.pattern(), &c_pattern); + } + + #[test] + fn csr_sub_csr( + // a and b have the same dimensions + (a, b) + in csr_strategy() + .prop_flat_map(|a| { + let b = csr(PROPTEST_I32_VALUE_STRATEGY, a.nrows(), a.ncols(), PROPTEST_MAX_NNZ); + (Just(a), b) + })) + { + // See comments in csr_add_csr for rationale for checking the pattern this way + let c_dense = DMatrix::from(&a) - DMatrix::from(&b); + let c_dense_pattern = dense_csr_pattern(a.pattern()) + dense_csr_pattern(b.pattern()); + let c_pattern = CsrMatrix::from(&c_dense_pattern).pattern().clone(); + + // Check each combination of owned matrices and references + let c_owned_owned = a.clone() - b.clone(); + prop_assert_eq!(&DMatrix::from(&c_owned_owned), &c_dense); + prop_assert_eq!(c_owned_owned.pattern(), &c_pattern); + + let c_owned_ref = a.clone() - &b; + prop_assert_eq!(&DMatrix::from(&c_owned_ref), &c_dense); + prop_assert_eq!(c_owned_ref.pattern(), &c_pattern); + + let c_ref_owned = &a - b.clone(); + prop_assert_eq!(&DMatrix::from(&c_ref_owned), &c_dense); + prop_assert_eq!(c_ref_owned.pattern(), &c_pattern); + + let c_ref_ref = &a - &b; + prop_assert_eq!(&DMatrix::from(&c_ref_ref), &c_dense); + prop_assert_eq!(c_ref_ref.pattern(), &c_pattern); + } + + #[test] + fn spmm_csr_pattern_test((a, b) in spmm_csr_pattern_strategy()) + { + // (a, b) are multiplication-wise dimensionally compatible patterns + let c_pattern = spmm_csr_pattern(&a, &b); + + // To verify the pattern, we construct CSR matrices with positive integer entries + // corresponding to a and b, and convert them to dense matrices. + // The product of these dense matrices will then have non-zeros in exactly the same locations + // as the result of "multiplying" the sparsity patterns + let a_csr = CsrMatrix::try_from_pattern_and_values(a.clone(), vec![1; a.nnz()]) + .unwrap(); + let a_dense = DMatrix::from(&a_csr); + let b_csr = CsrMatrix::try_from_pattern_and_values(b.clone(), vec![1; b.nnz()]) + .unwrap(); + let b_dense = DMatrix::from(&b_csr); + let c_dense = a_dense * b_dense; + let c_csr = CsrMatrix::from(&c_dense); + + prop_assert_eq!(&c_pattern, c_csr.pattern()); + } + + #[test] + fn spmm_csr_prealloc_test(SpmmCsrArgs { c, beta, alpha, a, b } + in spmm_csr_prealloc_args_strategy() + ) { + // Test that we get the expected result by comparing to an equivalent dense operation + // (here we give in the C matrix, so the sparsity pattern is essentially fixed) + let mut c_sparse = c.clone(); + spmm_csr_prealloc(beta, &mut c_sparse, alpha, a.as_ref(), b.as_ref()).unwrap(); + + let mut c_dense = DMatrix::from(&c); + let op_a_dense = match a { + Op::NoOp(ref a) => DMatrix::from(a), + Op::Transpose(ref a) => DMatrix::from(a).transpose(), + }; + let op_b_dense = match b { + Op::NoOp(ref b) => DMatrix::from(b), + Op::Transpose(ref b) => DMatrix::from(b).transpose(), + }; + c_dense = beta * c_dense + alpha * &op_a_dense * op_b_dense; + + prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense); + } + + #[test] + fn spmm_csr_prealloc_panics_on_dim_mismatch( + (alpha, beta, c, a, b) + in (PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_I32_VALUE_STRATEGY, + csr_strategy(), + op_strategy(csr_strategy()), + op_strategy(csr_strategy())) + ) { + // We refer to `A * B` as the "product" + let product_rows = match &a { + Op::NoOp(ref a) => a.nrows(), + Op::Transpose(ref a) => a.ncols(), + }; + let product_cols = match &b { + Op::NoOp(ref b) => b.ncols(), + Op::Transpose(ref b) => b.nrows(), + }; + // Determine the common dimension in the product + // from the perspective of a and b, respectively + let product_a_common = match &a { + Op::NoOp(ref a) => a.ncols(), + Op::Transpose(ref a) => a.nrows(), + }; + let product_b_common = match &b { + Op::NoOp(ref b) => b.nrows(), + Op::Transpose(ref b) => b.ncols(), + }; + + let dims_are_compatible = product_rows == c.nrows() + && product_cols == c.ncols() + && product_a_common == product_b_common; + + // If the dimensions randomly happen to be compatible, then of course we need to + // skip the test, so we assume that they are not. + prop_assume!(!dims_are_compatible); + + let result = catch_unwind(|| { + let mut spmm_result = c.clone(); + spmm_csr_prealloc(beta, &mut spmm_result, alpha, a.as_ref(), b.as_ref()).unwrap(); + }); + + prop_assert!(result.is_err(), + "The SPMM kernel executed successfully despite mismatch dimensions"); + } + + #[test] + fn spadd_csr_prealloc_panics_on_dim_mismatch( + (alpha, beta, c, op_a) + in (PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_I32_VALUE_STRATEGY, + csr_strategy(), + op_strategy(csr_strategy())) + ) { + let op_a_rows = match &op_a { + &Op::NoOp(ref a) => a.nrows(), + &Op::Transpose(ref a) => a.ncols() + }; + let op_a_cols = match &op_a { + &Op::NoOp(ref a) => a.ncols(), + &Op::Transpose(ref a) => a.nrows() + }; + + let dims_are_compatible = c.nrows() == op_a_rows && c.ncols() == op_a_cols; + + // If the dimensions randomly happen to be compatible, then of course we need to + // skip the test, so we assume that they are not. + prop_assume!(!dims_are_compatible); + + let result = catch_unwind(|| { + let mut spmm_result = c.clone(); + spadd_csr_prealloc(beta, &mut spmm_result, alpha, op_a.as_ref()).unwrap(); + }); + + prop_assert!(result.is_err(), + "The SPMM kernel executed successfully despite mismatch dimensions"); + } + + #[test] + fn csr_mul_csr( + // a and b have dimensions compatible for multiplication + (a, b) + in csr_strategy() + .prop_flat_map(|a| { + let max_nnz = PROPTEST_MAX_NNZ; + let cols = PROPTEST_MATRIX_DIM; + let b = csr(PROPTEST_I32_VALUE_STRATEGY, a.ncols(), cols, max_nnz); + (Just(a), b) + })) + { + // We use the dense result as the ground truth for the arithmetic result + let c_dense = DMatrix::from(&a) * DMatrix::from(&b); + // However, it's not enough only to cover the dense result, we also need to verify the + // sparsity pattern. We can determine the exact sparsity pattern by using + // dense arithmetic with positive integer values and extracting positive entries. + let c_dense_pattern = dense_csr_pattern(a.pattern()) * dense_csr_pattern(b.pattern()); + let c_pattern = CsrMatrix::from(&c_dense_pattern).pattern().clone(); + + // Check each combination of owned matrices and references + let c_owned_owned = a.clone() * b.clone(); + prop_assert_eq!(&DMatrix::from(&c_owned_owned), &c_dense); + prop_assert_eq!(c_owned_owned.pattern(), &c_pattern); + + let c_owned_ref = a.clone() * &b; + prop_assert_eq!(&DMatrix::from(&c_owned_ref), &c_dense); + prop_assert_eq!(c_owned_ref.pattern(), &c_pattern); + + let c_ref_owned = &a * b.clone(); + prop_assert_eq!(&DMatrix::from(&c_ref_owned), &c_dense); + prop_assert_eq!(c_ref_owned.pattern(), &c_pattern); + + let c_ref_ref = &a * &b; + prop_assert_eq!(&DMatrix::from(&c_ref_ref), &c_dense); + prop_assert_eq!(c_ref_ref.pattern(), &c_pattern); + } + + #[test] + fn spmm_csc_prealloc_test(SpmmCscArgs { c, beta, alpha, a, b } + in spmm_csc_prealloc_args_strategy() + ) { + // Test that we get the expected result by comparing to an equivalent dense operation + // (here we give in the C matrix, so the sparsity pattern is essentially fixed) + let mut c_sparse = c.clone(); + spmm_csc_prealloc(beta, &mut c_sparse, alpha, a.as_ref(), b.as_ref()).unwrap(); + + let mut c_dense = DMatrix::from(&c); + let op_a_dense = match a { + Op::NoOp(ref a) => DMatrix::from(a), + Op::Transpose(ref a) => DMatrix::from(a).transpose(), + }; + let op_b_dense = match b { + Op::NoOp(ref b) => DMatrix::from(b), + Op::Transpose(ref b) => DMatrix::from(b).transpose(), + }; + c_dense = beta * c_dense + alpha * &op_a_dense * op_b_dense; + + prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense); + } + + #[test] + fn spmm_csc_prealloc_panics_on_dim_mismatch( + (alpha, beta, c, a, b) + in (PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_I32_VALUE_STRATEGY, + csc_strategy(), + op_strategy(csc_strategy()), + op_strategy(csc_strategy())) + ) { + // We refer to `A * B` as the "product" + let product_rows = match &a { + Op::NoOp(ref a) => a.nrows(), + Op::Transpose(ref a) => a.ncols(), + }; + let product_cols = match &b { + Op::NoOp(ref b) => b.ncols(), + Op::Transpose(ref b) => b.nrows(), + }; + // Determine the common dimension in the product + // from the perspective of a and b, respectively + let product_a_common = match &a { + Op::NoOp(ref a) => a.ncols(), + Op::Transpose(ref a) => a.nrows(), + }; + let product_b_common = match &b { + Op::NoOp(ref b) => b.nrows(), + Op::Transpose(ref b) => b.ncols(), + }; + + let dims_are_compatible = product_rows == c.nrows() + && product_cols == c.ncols() + && product_a_common == product_b_common; + + // If the dimensions randomly happen to be compatible, then of course we need to + // skip the test, so we assume that they are not. + prop_assume!(!dims_are_compatible); + + let result = catch_unwind(|| { + let mut spmm_result = c.clone(); + spmm_csc_prealloc(beta, &mut spmm_result, alpha, a.as_ref(), b.as_ref()).unwrap(); + }); + + prop_assert!(result.is_err(), + "The SPMM kernel executed successfully despite mismatch dimensions"); + } + + #[test] + fn csc_mul_csc( + // a and b have dimensions compatible for multiplication + (a, b) + in csc_strategy() + .prop_flat_map(|a| { + let max_nnz = PROPTEST_MAX_NNZ; + let cols = PROPTEST_MATRIX_DIM; + let b = csc(PROPTEST_I32_VALUE_STRATEGY, a.ncols(), cols, max_nnz); + (Just(a), b) + }) + .prop_map(|(a, b)| { + println!("a: {} x {}, b: {} x {}", a.nrows(), a.ncols(), b.nrows(), b.ncols()); + (a, b) + })) + { + assert_eq!(a.ncols(), b.nrows()); + // We use the dense result as the ground truth for the arithmetic result + let c_dense = DMatrix::from(&a) * DMatrix::from(&b); + // However, it's not enough only to cover the dense result, we also need to verify the + // sparsity pattern. We can determine the exact sparsity pattern by using + // dense arithmetic with positive integer values and extracting positive entries. + let c_dense_pattern = dense_csc_pattern(a.pattern()) * dense_csc_pattern(b.pattern()); + let c_pattern = CscMatrix::from(&c_dense_pattern).pattern().clone(); + + // Check each combination of owned matrices and references + let c_owned_owned = a.clone() * b.clone(); + prop_assert_eq!(&DMatrix::from(&c_owned_owned), &c_dense); + prop_assert_eq!(c_owned_owned.pattern(), &c_pattern); + + let c_owned_ref = a.clone() * &b; + prop_assert_eq!(&DMatrix::from(&c_owned_ref), &c_dense); + prop_assert_eq!(c_owned_ref.pattern(), &c_pattern); + + let c_ref_owned = &a * b.clone(); + prop_assert_eq!(&DMatrix::from(&c_ref_owned), &c_dense); + prop_assert_eq!(c_ref_owned.pattern(), &c_pattern); + + let c_ref_ref = &a * &b; + prop_assert_eq!(&DMatrix::from(&c_ref_ref), &c_dense); + prop_assert_eq!(c_ref_ref.pattern(), &c_pattern); + } + + #[test] + fn spmm_csc_dense_agrees_with_dense_result( + SpmmCscDenseArgs { c, beta, alpha, a, b } + in spmm_csc_dense_args_strategy() + ) { + let mut spmm_result = c.clone(); + spmm_csc_dense(beta, &mut spmm_result, alpha, a.as_ref(), b.as_ref()); + + let mut gemm_result = c.clone(); + let a_dense = a.map_same_op(|a| DMatrix::from(&a)); + dense_gemm(beta, &mut gemm_result, alpha, a_dense.as_ref(), b.as_ref()); + + prop_assert_eq!(spmm_result, gemm_result); + } + + #[test] + fn spmm_csc_dense_panics_on_dim_mismatch( + (alpha, beta, c, a, b) + in (PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_I32_VALUE_STRATEGY, + dense_strategy(), + op_strategy(csc_strategy()), + op_strategy(dense_strategy())) + ) { + // We refer to `A * B` as the "product" + let product_rows = match &a { + Op::NoOp(ref a) => a.nrows(), + Op::Transpose(ref a) => a.ncols(), + }; + let product_cols = match &b { + Op::NoOp(ref b) => b.ncols(), + Op::Transpose(ref b) => b.nrows(), + }; + // Determine the common dimension in the product + // from the perspective of a and b, respectively + let product_a_common = match &a { + Op::NoOp(ref a) => a.ncols(), + Op::Transpose(ref a) => a.nrows(), + }; + let product_b_common = match &b { + Op::NoOp(ref b) => b.nrows(), + Op::Transpose(ref b) => b.ncols() + }; + + let dims_are_compatible = product_rows == c.nrows() + && product_cols == c.ncols() + && product_a_common == product_b_common; + + // If the dimensions randomly happen to be compatible, then of course we need to + // skip the test, so we assume that they are not. + prop_assume!(!dims_are_compatible); + + let result = catch_unwind(|| { + let mut spmm_result = c.clone(); + spmm_csc_dense(beta, &mut spmm_result, alpha, a.as_ref(), b.as_ref()); + }); + + prop_assert!(result.is_err(), + "The SPMM kernel executed successfully despite mismatch dimensions"); + } + + #[test] + fn spadd_csc_prealloc_test(SpaddCscArgs { c, beta, alpha, a } in spadd_csc_prealloc_args_strategy()) { + // Test that we get the expected result by comparing to an equivalent dense operation + // (here we give in the C matrix, so the sparsity pattern is essentially fixed) + + let mut c_sparse = c.clone(); + spadd_csc_prealloc(beta, &mut c_sparse, alpha, a.as_ref()).unwrap(); + + let mut c_dense = DMatrix::from(&c); + let op_a_dense = match a { + Op::NoOp(a) => DMatrix::from(&a), + Op::Transpose(a) => DMatrix::from(&a).transpose(), + }; + c_dense = beta * c_dense + alpha * &op_a_dense; + + prop_assert_eq!(&DMatrix::from(&c_sparse), &c_dense); + } + + #[test] + fn spadd_csc_prealloc_panics_on_dim_mismatch( + (alpha, beta, c, op_a) + in (PROPTEST_I32_VALUE_STRATEGY, + PROPTEST_I32_VALUE_STRATEGY, + csc_strategy(), + op_strategy(csc_strategy())) + ) { + let op_a_rows = match &op_a { + &Op::NoOp(ref a) => a.nrows(), + &Op::Transpose(ref a) => a.ncols() + }; + let op_a_cols = match &op_a { + &Op::NoOp(ref a) => a.ncols(), + &Op::Transpose(ref a) => a.nrows() + }; + + let dims_are_compatible = c.nrows() == op_a_rows && c.ncols() == op_a_cols; + + // If the dimensions randomly happen to be compatible, then of course we need to + // skip the test, so we assume that they are not. + prop_assume!(!dims_are_compatible); + + let result = catch_unwind(|| { + let mut spmm_result = c.clone(); + spadd_csc_prealloc(beta, &mut spmm_result, alpha, op_a.as_ref()).unwrap(); + }); + + prop_assert!(result.is_err(), + "The SPMM kernel executed successfully despite mismatch dimensions"); + } + + #[test] + fn csc_add_csc( + // a and b have the same dimensions + (a, b) + in csc_strategy() + .prop_flat_map(|a| { + let b = csc(PROPTEST_I32_VALUE_STRATEGY, a.nrows(), a.ncols(), PROPTEST_MAX_NNZ); + (Just(a), b) + })) + { + // We use the dense result as the ground truth for the arithmetic result + let c_dense = DMatrix::from(&a) + DMatrix::from(&b); + // However, it's not enough only to cover the dense result, we also need to verify the + // sparsity pattern. We can determine the exact sparsity pattern by using + // dense arithmetic with positive integer values and extracting positive entries. + let c_dense_pattern = dense_csc_pattern(a.pattern()) + dense_csc_pattern(b.pattern()); + let c_pattern = CscMatrix::from(&c_dense_pattern).pattern().clone(); + + // Check each combination of owned matrices and references + let c_owned_owned = a.clone() + b.clone(); + prop_assert_eq!(&DMatrix::from(&c_owned_owned), &c_dense); + prop_assert_eq!(c_owned_owned.pattern(), &c_pattern); + + let c_owned_ref = a.clone() + &b; + prop_assert_eq!(&DMatrix::from(&c_owned_ref), &c_dense); + prop_assert_eq!(c_owned_ref.pattern(), &c_pattern); + + let c_ref_owned = &a + b.clone(); + prop_assert_eq!(&DMatrix::from(&c_ref_owned), &c_dense); + prop_assert_eq!(c_ref_owned.pattern(), &c_pattern); + + let c_ref_ref = &a + &b; + prop_assert_eq!(&DMatrix::from(&c_ref_ref), &c_dense); + prop_assert_eq!(c_ref_ref.pattern(), &c_pattern); + } + + #[test] + fn csc_sub_csc( + // a and b have the same dimensions + (a, b) + in csc_strategy() + .prop_flat_map(|a| { + let b = csc(PROPTEST_I32_VALUE_STRATEGY, a.nrows(), a.ncols(), PROPTEST_MAX_NNZ); + (Just(a), b) + })) + { + // See comments in csc_add_csc for rationale for checking the pattern this way + let c_dense = DMatrix::from(&a) - DMatrix::from(&b); + let c_dense_pattern = dense_csc_pattern(a.pattern()) + dense_csc_pattern(b.pattern()); + let c_pattern = CscMatrix::from(&c_dense_pattern).pattern().clone(); + + // Check each combination of owned matrices and references + let c_owned_owned = a.clone() - b.clone(); + prop_assert_eq!(&DMatrix::from(&c_owned_owned), &c_dense); + prop_assert_eq!(c_owned_owned.pattern(), &c_pattern); + + let c_owned_ref = a.clone() - &b; + prop_assert_eq!(&DMatrix::from(&c_owned_ref), &c_dense); + prop_assert_eq!(c_owned_ref.pattern(), &c_pattern); + + let c_ref_owned = &a - b.clone(); + prop_assert_eq!(&DMatrix::from(&c_ref_owned), &c_dense); + prop_assert_eq!(c_ref_owned.pattern(), &c_pattern); + + let c_ref_ref = &a - &b; + prop_assert_eq!(&DMatrix::from(&c_ref_ref), &c_dense); + prop_assert_eq!(c_ref_ref.pattern(), &c_pattern); + } + + #[test] + fn csr_mul_scalar((scalar, matrix) in (PROPTEST_I32_VALUE_STRATEGY, csr_strategy())) { + let dense = DMatrix::from(&matrix); + let dense_result = dense * scalar; + + let result_owned_owned = matrix.clone() * scalar; + let result_owned_ref = matrix.clone() * &scalar; + let result_ref_owned = &matrix * scalar; + let result_ref_ref = &matrix * &scalar; + + // Check that all the combinations of reference and owned variables return the same + // result + prop_assert_eq!(&result_owned_ref, &result_owned_owned); + prop_assert_eq!(&result_ref_owned, &result_owned_owned); + prop_assert_eq!(&result_ref_ref, &result_owned_owned); + + // Check that this result is consistent with the dense result, and that the + // NNZ is the same as before + prop_assert_eq!(result_owned_owned.nnz(), matrix.nnz()); + prop_assert_eq!(DMatrix::from(&result_owned_owned), dense_result); + + // Finally, check mul-assign + let mut result_assign_owned = matrix.clone(); + result_assign_owned *= scalar; + let mut result_assign_ref = matrix.clone(); + result_assign_ref *= &scalar; + + prop_assert_eq!(&result_assign_owned, &result_owned_owned); + prop_assert_eq!(&result_assign_ref, &result_owned_owned); + } + + #[test] + fn csc_mul_scalar((scalar, matrix) in (PROPTEST_I32_VALUE_STRATEGY, csc_strategy())) { + let dense = DMatrix::from(&matrix); + let dense_result = dense * scalar; + + let result_owned_owned = matrix.clone() * scalar; + let result_owned_ref = matrix.clone() * &scalar; + let result_ref_owned = &matrix * scalar; + let result_ref_ref = &matrix * &scalar; + + // Check that all the combinations of reference and owned variables return the same + // result + prop_assert_eq!(&result_owned_ref, &result_owned_owned); + prop_assert_eq!(&result_ref_owned, &result_owned_owned); + prop_assert_eq!(&result_ref_ref, &result_owned_owned); + + // Check that this result is consistent with the dense result, and that the + // NNZ is the same as before + prop_assert_eq!(result_owned_owned.nnz(), matrix.nnz()); + prop_assert_eq!(DMatrix::from(&result_owned_owned), dense_result); + + // Finally, check mul-assign + let mut result_assign_owned = matrix.clone(); + result_assign_owned *= scalar; + let mut result_assign_ref = matrix.clone(); + result_assign_ref *= &scalar; + + prop_assert_eq!(&result_assign_owned, &result_owned_owned); + prop_assert_eq!(&result_assign_ref, &result_owned_owned); + } + + #[test] + fn scalar_mul_csr((scalar, matrix) in (PROPTEST_I32_VALUE_STRATEGY, csr_strategy())) { + // For scalar * matrix, we cannot generally implement this for any type T, + // so we have implemented this for the built in types separately. This requires + // us to also test these types separately. For validation, we check that + // scalar * matrix == matrix * scalar, + // which is sufficient for correctness if matrix * scalar is correctly implemented + // (which is tested separately). + // We only test for i32 here, because with our current implementation, the implementations + // for different types are completely identical and only rely on basic arithmetic + // operations + let result = &matrix * scalar; + prop_assert_eq!(&(scalar * matrix.clone()), &result); + prop_assert_eq!(&(scalar * &matrix), &result); + prop_assert_eq!(&(&scalar * matrix.clone()), &result); + prop_assert_eq!(&(&scalar * &matrix), &result); + } + + #[test] + fn scalar_mul_csc((scalar, matrix) in (PROPTEST_I32_VALUE_STRATEGY, csc_strategy())) { + // See comments for scalar_mul_csr + let result = &matrix * scalar; + prop_assert_eq!(&(scalar * matrix.clone()), &result); + prop_assert_eq!(&(scalar * &matrix), &result); + prop_assert_eq!(&(&scalar * matrix.clone()), &result); + prop_assert_eq!(&(&scalar * &matrix), &result); + } + + #[test] + fn csr_neg(csr in csr_strategy()) { + let result = &csr - 2 * &csr; + prop_assert_eq!(-&csr, result.clone()); + prop_assert_eq!(-csr, result); + } + + #[test] + fn csc_neg(csc in csc_strategy()) { + let result = &csc - 2 * &csc; + prop_assert_eq!(-&csc, result.clone()); + prop_assert_eq!(-csc, result); + } + + #[test] + fn csr_div((csr, divisor) in (csr_strategy(), non_zero_i32_value_strategy())) { + let result_owned_owned = csr.clone() / divisor; + let result_owned_ref = csr.clone() / &divisor; + let result_ref_owned = &csr / divisor; + let result_ref_ref = &csr / &divisor; + + // Verify that all results are the same + prop_assert_eq!(&result_owned_ref, &result_owned_owned); + prop_assert_eq!(&result_ref_owned, &result_owned_owned); + prop_assert_eq!(&result_ref_ref, &result_owned_owned); + + // Check that NNZ was left unchanged + prop_assert_eq!(result_owned_owned.nnz(), csr.nnz()); + + // Then compare against the equivalent dense result + let dense_result = DMatrix::from(&csr) / divisor; + prop_assert_eq!(DMatrix::from(&result_owned_owned), dense_result); + } + + #[test] + fn csc_div((csc, divisor) in (csc_strategy(), non_zero_i32_value_strategy())) { + let result_owned_owned = csc.clone() / divisor; + let result_owned_ref = csc.clone() / &divisor; + let result_ref_owned = &csc / divisor; + let result_ref_ref = &csc / &divisor; + + // Verify that all results are the same + prop_assert_eq!(&result_owned_ref, &result_owned_owned); + prop_assert_eq!(&result_ref_owned, &result_owned_owned); + prop_assert_eq!(&result_ref_ref, &result_owned_owned); + + // Check that NNZ was left unchanged + prop_assert_eq!(result_owned_owned.nnz(), csc.nnz()); + + // Then compare against the equivalent dense result + let dense_result = DMatrix::from(&csc) / divisor; + prop_assert_eq!(DMatrix::from(&result_owned_owned), dense_result); + } + + #[test] + fn csr_div_assign((csr, divisor) in (csr_strategy(), non_zero_i32_value_strategy())) { + let result_owned = { + let mut csr = csr.clone(); + csr /= divisor; + csr + }; + + let result_ref = { + let mut csr = csr.clone(); + csr /= &divisor; + csr + }; + + let expected_result = csr / divisor; + + prop_assert_eq!(&result_owned, &expected_result); + prop_assert_eq!(&result_ref, &expected_result); + } + + #[test] + fn csc_div_assign((csc, divisor) in (csc_strategy(), non_zero_i32_value_strategy())) { + let result_owned = { + let mut csc = csc.clone(); + csc /= divisor; + csc + }; + + let result_ref = { + let mut csc = csc.clone(); + csc /= &divisor; + csc + }; + + let expected_result = csc / divisor; + + prop_assert_eq!(&result_owned, &expected_result); + prop_assert_eq!(&result_ref, &expected_result); + } + + #[test] + fn csr_mul_dense( + // a and b have dimensions compatible for multiplication + (a, b) + in csr_strategy() + .prop_flat_map(|a| { + let cols = PROPTEST_MATRIX_DIM; + let b = matrix(PROPTEST_I32_VALUE_STRATEGY, a.ncols(), cols); + (Just(a), b) + })) + { + let expected = DMatrix::from(&a) * &b; + prop_assert_eq!(&a * &b, expected.clone()); + prop_assert_eq!(&a * b.clone(), expected.clone()); + prop_assert_eq!(a.clone() * &b, expected.clone()); + prop_assert_eq!(a.clone() * b.clone(), expected.clone()); + } + + #[test] + fn csc_mul_dense( + // a and b have dimensions compatible for multiplication + (a, b) + in csc_strategy() + .prop_flat_map(|a| { + let cols = PROPTEST_MATRIX_DIM; + let b = matrix(PROPTEST_I32_VALUE_STRATEGY, a.ncols(), cols); + (Just(a), b) + })) + { + let expected = DMatrix::from(&a) * &b; + prop_assert_eq!(&a * &b, expected.clone()); + prop_assert_eq!(&a * b.clone(), expected.clone()); + prop_assert_eq!(a.clone() * &b, expected.clone()); + prop_assert_eq!(a.clone() * b.clone(), expected.clone()); + } + + #[test] + fn csc_solve_lower_triangular_no_transpose( + // A CSC matrix `a` and a dimensionally compatible dense matrix `b` + (a, b) + in csc_square_with_non_zero_diagonals() + .prop_flat_map(|a| { + let nrows = a.nrows(); + (Just(a), matrix(value_strategy::(), nrows, PROPTEST_MATRIX_DIM)) + })) + { + let mut x = b.clone(); + spsolve_csc_lower_triangular(Op::NoOp(&a), &mut x).unwrap(); + + let a_lower = a.lower_triangle(); + // We're using a high tolerance here because there are some "bad" inputs that can give + // severe loss of precision. + prop_assert_matrix_eq!(&a_lower * &x, &b, comp = abs, tol = 1e-4); + } + + #[test] + fn csc_solve_lower_triangular_transpose( + // A CSC matrix `a` and a dimensionally compatible dense matrix `b` (with a transposed) + (a, b) + in csc_square_with_non_zero_diagonals() + .prop_flat_map(|a| { + let ncols = a.ncols(); + (Just(a), matrix(value_strategy::(), ncols, PROPTEST_MATRIX_DIM)) + })) + { + let mut x = b.clone(); + spsolve_csc_lower_triangular(Op::Transpose(&a), &mut x).unwrap(); + + let a_lower = a.lower_triangle(); + // We're using a high tolerance here because there are some "bad" inputs that can give + // severe loss of precision. + prop_assert_matrix_eq!(&a_lower.transpose() * &x, &b, comp = abs, tol = 1e-4); + } + +} diff --git a/nalgebra-sparse/tests/unit_tests/pattern.rs b/nalgebra-sparse/tests/unit_tests/pattern.rs new file mode 100644 index 00000000..310cffae --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/pattern.rs @@ -0,0 +1,154 @@ +use nalgebra_sparse::pattern::{SparsityPattern, SparsityPatternFormatError}; + +#[test] +fn sparsity_pattern_valid_data() { + // Construct pattern from valid data and check that selected methods return results + // that agree with expectations. + + { + // A pattern with zero explicitly stored entries + let pattern = + SparsityPattern::try_from_offsets_and_indices(3, 2, vec![0, 0, 0, 0], Vec::new()) + .unwrap(); + + assert_eq!(pattern.major_dim(), 3); + assert_eq!(pattern.minor_dim(), 2); + assert_eq!(pattern.nnz(), 0); + assert_eq!(pattern.major_offsets(), &[0, 0, 0, 0]); + assert_eq!(pattern.minor_indices(), &[]); + assert_eq!(pattern.lane(0), &[]); + assert_eq!(pattern.lane(1), &[]); + assert_eq!(pattern.lane(2), &[]); + assert!(pattern.entries().next().is_none()); + + assert_eq!(pattern, SparsityPattern::zeros(3, 2)); + + let (offsets, indices) = pattern.disassemble(); + assert_eq!(offsets, vec![0, 0, 0, 0]); + assert_eq!(indices, vec![]); + } + + { + // Arbitrary pattern + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let pattern = + SparsityPattern::try_from_offsets_and_indices(3, 6, offsets.clone(), indices.clone()) + .unwrap(); + + assert_eq!(pattern.major_dim(), 3); + assert_eq!(pattern.minor_dim(), 6); + assert_eq!(pattern.major_offsets(), offsets.as_slice()); + assert_eq!(pattern.minor_indices(), indices.as_slice()); + assert_eq!(pattern.nnz(), 5); + assert_eq!(pattern.lane(0), &[0, 5]); + assert_eq!(pattern.lane(1), &[]); + assert_eq!(pattern.lane(2), &[1, 2, 3]); + assert_eq!( + pattern.entries().collect::>(), + vec![(0, 0), (0, 5), (2, 1), (2, 2), (2, 3)] + ); + + let (offsets2, indices2) = pattern.disassemble(); + assert_eq!(offsets2, offsets); + assert_eq!(indices2, indices); + } +} + +#[test] +fn sparsity_pattern_try_from_invalid_data() { + { + // Empty offset array (invalid length) + let pattern = SparsityPattern::try_from_offsets_and_indices(0, 0, Vec::new(), Vec::new()); + assert_eq!( + pattern, + Err(SparsityPatternFormatError::InvalidOffsetArrayLength) + ); + } + + { + // Offset array invalid length for arbitrary data + let offsets = vec![0, 3, 5]; + let indices = vec![0, 1, 2, 3, 5]; + + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert!(matches!( + pattern, + Err(SparsityPatternFormatError::InvalidOffsetArrayLength) + )); + } + + { + // Invalid first entry in offsets array + let offsets = vec![1, 2, 2, 5]; + let indices = vec![0, 5, 1, 2, 3]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert!(matches!( + pattern, + Err(SparsityPatternFormatError::InvalidOffsetFirstLast) + )); + } + + { + // Invalid last entry in offsets array + let offsets = vec![0, 2, 2, 4]; + let indices = vec![0, 5, 1, 2, 3]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert!(matches!( + pattern, + Err(SparsityPatternFormatError::InvalidOffsetFirstLast) + )); + } + + { + // Invalid length of offsets array + let offsets = vec![0, 2, 2]; + let indices = vec![0, 5, 1, 2, 3]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert!(matches!( + pattern, + Err(SparsityPatternFormatError::InvalidOffsetArrayLength) + )); + } + + { + // Nonmonotonic offsets + let offsets = vec![0, 3, 2, 5]; + let indices = vec![0, 1, 2, 3, 4]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert_eq!( + pattern, + Err(SparsityPatternFormatError::NonmonotonicOffsets) + ); + } + + { + // Nonmonotonic minor indices + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 2, 3, 1, 4]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert_eq!( + pattern, + Err(SparsityPatternFormatError::NonmonotonicMinorIndices) + ); + } + + { + // Minor index out of bounds + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 6, 1, 2, 3]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert_eq!( + pattern, + Err(SparsityPatternFormatError::MinorIndexOutOfBounds) + ); + } + + { + // Duplicate entry + let offsets = vec![0, 2, 2, 5]; + let indices = vec![0, 5, 2, 2, 3]; + let pattern = SparsityPattern::try_from_offsets_and_indices(3, 6, offsets, indices); + assert_eq!(pattern, Err(SparsityPatternFormatError::DuplicateEntry)); + } +} diff --git a/nalgebra-sparse/tests/unit_tests/proptest.rs b/nalgebra-sparse/tests/unit_tests/proptest.rs new file mode 100644 index 00000000..0116d25e --- /dev/null +++ b/nalgebra-sparse/tests/unit_tests/proptest.rs @@ -0,0 +1,247 @@ +#[test] +#[ignore] +fn coo_no_duplicates_generates_admissible_matrices() { + //TODO +} + +#[cfg(feature = "slow-tests")] +mod slow { + use nalgebra::DMatrix; + use nalgebra_sparse::proptest::{ + coo_no_duplicates, coo_with_duplicates, csc, csr, sparsity_pattern, + }; + + use itertools::Itertools; + use proptest::strategy::ValueTree; + use proptest::test_runner::TestRunner; + + use proptest::prelude::*; + + use nalgebra_sparse::csr::CsrMatrix; + use std::collections::HashSet; + use std::iter::repeat; + use std::ops::RangeInclusive; + + fn generate_all_possible_matrices( + value_range: RangeInclusive, + rows_range: RangeInclusive, + cols_range: RangeInclusive, + ) -> HashSet> { + // Enumerate all possible combinations + let mut all_combinations = HashSet::new(); + for nrows in rows_range { + for ncols in cols_range.clone() { + // For the given number of rows and columns + let n_values = nrows * ncols; + + if n_values == 0 { + // If we have zero rows or columns, the set of matrices with the given + // rows and columns is a single element: an empty matrix + all_combinations.insert(DMatrix::from_row_slice(nrows, ncols, &[])); + } else { + // Otherwise, we need to sample all possible matrices. + // To do this, we generate the values as the (multi) Cartesian product + // of the value sets. For example, for a 2x2 matrices, we consider + // all possible 4-element arrays that the matrices can take by + // considering all elements in the cartesian product + // V x V x V x V + // where V is the set of eligible values, e.g. V := -1 ..= 1 + let values_iter = repeat(value_range.clone()) + .take(n_values) + .multi_cartesian_product(); + for matrix_values in values_iter { + all_combinations.insert(DMatrix::from_row_slice( + nrows, + ncols, + &matrix_values, + )); + } + } + } + } + all_combinations + } + + #[cfg(feature = "slow-tests")] + #[test] + fn coo_no_duplicates_samples_all_admissible_outputs() { + // Note: This test basically mirrors a similar test for `matrix` in the `nalgebra` repo. + + // Test that the proptest generation covers all possible outputs for a small space of inputs + // given enough samples. + + // We use a deterministic test runner to make the test "stable". + let mut runner = TestRunner::deterministic(); + + // This number needs to be high enough so that we with high probability sample + // all possible cases + let num_generated_matrices = 500000; + + let values = -1..=1; + let rows = 0..=2; + let cols = 0..=3; + let max_nnz = rows.end() * cols.end(); + let strategy = coo_no_duplicates(values.clone(), rows.clone(), cols.clone(), max_nnz); + + // Enumerate all possible combinations + let all_combinations = generate_all_possible_matrices(values, rows, cols); + + let visited_combinations = + sample_matrix_output_space(strategy, &mut runner, num_generated_matrices); + + assert_eq!(visited_combinations.len(), all_combinations.len()); + assert_eq!( + visited_combinations, all_combinations, + "Did not sample all possible values." + ); + } + + #[cfg(feature = "slow-tests")] + #[test] + fn coo_with_duplicates_samples_all_admissible_outputs() { + // This is almost the same as the test for coo_no_duplicates, except that we need + // a different "success" criterion, since coo_with_duplicates is able to generate + // matrices with values outside of the value constraints. See below for details. + + // We use a deterministic test runner to make the test "stable". + let mut runner = TestRunner::deterministic(); + + // This number needs to be high enough so that we with high probability sample + // all possible cases + let num_generated_matrices = 500000; + + let values = -1..=1; + let rows = 0..=2; + let cols = 0..=3; + let max_nnz = rows.end() * cols.end(); + let strategy = coo_with_duplicates(values.clone(), rows.clone(), cols.clone(), max_nnz, 2); + + // Enumerate all possible combinations that fit the constraints + // (note: this is only a subset of the matrices that can be generated by + // `coo_with_duplicates`) + let all_combinations = generate_all_possible_matrices(values, rows, cols); + + let visited_combinations = + sample_matrix_output_space(strategy, &mut runner, num_generated_matrices); + + // Here we cannot verify that the set of visited combinations is *equal* to + // all possible outcomes with the given constraints, however the + // strategy should be able to generate all matrices that fit the constraints. + // In other words, we need to determine that set of all admissible matrices + // is contained in the set of visited matrices + assert!(all_combinations.is_subset(&visited_combinations)); + } + + #[cfg(feature = "slow-tests")] + #[test] + fn csr_samples_all_admissible_outputs() { + // We use a deterministic test runner to make the test "stable". + let mut runner = TestRunner::deterministic(); + + // This number needs to be high enough so that we with high probability sample + // all possible cases + let num_generated_matrices = 500000; + + let values = -1..=1; + let rows = 0..=2; + let cols = 0..=3; + let max_nnz = rows.end() * cols.end(); + let strategy = csr(values.clone(), rows.clone(), cols.clone(), max_nnz); + + let all_combinations = generate_all_possible_matrices(values, rows, cols); + + let visited_combinations = + sample_matrix_output_space(strategy, &mut runner, num_generated_matrices); + + assert_eq!(visited_combinations.len(), all_combinations.len()); + assert_eq!( + visited_combinations, all_combinations, + "Did not sample all possible values." + ); + } + + #[cfg(feature = "slow-tests")] + #[test] + fn csc_samples_all_admissible_outputs() { + // We use a deterministic test runner to make the test "stable". + let mut runner = TestRunner::deterministic(); + + // This number needs to be high enough so that we with high probability sample + // all possible cases + let num_generated_matrices = 500000; + + let values = -1..=1; + let rows = 0..=2; + let cols = 0..=3; + let max_nnz = rows.end() * cols.end(); + let strategy = csc(values.clone(), rows.clone(), cols.clone(), max_nnz); + + let all_combinations = generate_all_possible_matrices(values, rows, cols); + + let visited_combinations = + sample_matrix_output_space(strategy, &mut runner, num_generated_matrices); + + assert_eq!(visited_combinations.len(), all_combinations.len()); + assert_eq!( + visited_combinations, all_combinations, + "Did not sample all possible values." + ); + } + + #[cfg(feature = "slow-tests")] + #[test] + fn sparsity_pattern_samples_all_admissible_outputs() { + let mut runner = TestRunner::deterministic(); + + let num_generated_patterns = 50000; + + let major_dims = 0..=2; + let minor_dims = 0..=3; + let max_nnz = major_dims.end() * minor_dims.end(); + let strategy = sparsity_pattern(major_dims.clone(), minor_dims.clone(), max_nnz); + + let visited_patterns: HashSet<_> = sample_strategy(strategy, &mut runner) + .take(num_generated_patterns) + .map(|pattern| { + // We represent patterns as dense matrices with 1 if an entry is occupied, + // 0 otherwise + let values = vec![1; pattern.nnz()]; + CsrMatrix::try_from_pattern_and_values(pattern, values).unwrap() + }) + .map(|csr| DMatrix::from(&csr)) + .collect(); + + let all_possible_patterns = generate_all_possible_matrices(0..=1, major_dims, minor_dims); + + assert_eq!(visited_patterns.len(), all_possible_patterns.len()); + assert_eq!(visited_patterns, all_possible_patterns); + } + + fn sample_matrix_output_space( + strategy: S, + runner: &mut TestRunner, + num_samples: usize, + ) -> HashSet> + where + S: Strategy, + DMatrix: for<'b> From<&'b S::Value>, + { + sample_strategy(strategy, runner) + .take(num_samples) + .map(|matrix| DMatrix::from(&matrix)) + .collect() + } + + fn sample_strategy<'a, S: 'a + Strategy>( + strategy: S, + runner: &'a mut TestRunner, + ) -> impl 'a + Iterator { + repeat(()).map(move |_| { + let tree = strategy + .new_tree(runner) + .expect("Tree generation should not fail"); + let value = tree.current(); + value + }) + } +} diff --git a/src/base/allocator.rs b/src/base/allocator.rs index ebd55553..3632cf5d 100644 --- a/src/base/allocator.rs +++ b/src/base/allocator.rs @@ -1,6 +1,7 @@ //! Abstract definition of a matrix data storage allocator. use std::any::Any; +use std::mem; use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::base::dimension::{Dim, U1}; @@ -21,7 +22,7 @@ pub trait Allocator: Any + Sized { type Buffer: ContiguousStorageMut + Clone; /// Allocates a buffer with the given number of rows and columns without initializing its content. - unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> Self::Buffer; + unsafe fn allocate_uninitialized(nrows: R, ncols: C) -> mem::MaybeUninit; /// Allocates a buffer initialized with the content of the given iterator. fn allocate_from_iterator>( diff --git a/src/base/array_storage.rs b/src/base/array_storage.rs index 4fdd3e73..e067cb49 100644 --- a/src/base/array_storage.rs +++ b/src/base/array_storage.rs @@ -394,6 +394,26 @@ where } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable + for ArrayStorage +where + R::Value: Mul, + Prod: ArrayLength, + Self: Copy, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod + for ArrayStorage +where + R::Value: Mul, + Prod: ArrayLength, + Self: Copy, +{ +} + #[cfg(feature = "abomonation-serialize")] impl Abomonation for ArrayStorage where diff --git a/src/base/blas.rs b/src/base/blas.rs index 761077e5..92a43a38 100644 --- a/src/base/blas.rs +++ b/src/base/blas.rs @@ -1328,7 +1328,8 @@ where ShapeConstraint: DimEq + DimEq + DimEq, DefaultAllocator: Allocator, { - let mut work = unsafe { Vector::new_uninitialized_generic(self.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, U1) }; self.quadform_tr_with_workspace(&mut work, alpha, lhs, mid, beta) } @@ -1421,7 +1422,8 @@ where ShapeConstraint: DimEq + DimEq + AreMultipliable, DefaultAllocator: Allocator, { - let mut work = unsafe { Vector::new_uninitialized_generic(mid.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(mid.data.shape().0, U1) }; self.quadform_with_workspace(&mut work, alpha, mid, rhs, beta) } } diff --git a/src/base/construction.rs b/src/base/construction.rs index 8c34bf3c..ba15a0f0 100644 --- a/src/base/construction.rs +++ b/src/base/construction.rs @@ -14,6 +14,7 @@ use rand::Rng; #[cfg(feature = "std")] use rand_distr::StandardNormal; use std::iter; +use std::mem; use typenum::{self, Cmp, Greater}; #[cfg(feature = "std")] @@ -25,6 +26,23 @@ use crate::base::dimension::{Dim, DimName, Dynamic, U1, U2, U3, U4, U5, U6}; use crate::base::storage::Storage; use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Scalar, Unit, Vector, VectorN}; +/// When "no_unsound_assume_init" is enabled, expands to `unimplemented!()` instead of `new_uninitialized_generic().assume_init()`. +/// Intended as a placeholder, each callsite should be refactored to use uninitialized memory soundly +#[macro_export] +macro_rules! unimplemented_or_uninitialized_generic { + ($nrows:expr, $ncols:expr) => {{ + #[cfg(feature="no_unsound_assume_init")] { + // Some of the call sites need the number of rows and columns from this to infer a type, so + // uninitialized memory is used to infer the type, as `N: Zero` isn't available at all callsites. + // This may technically still be UB even though the assume_init is dead code, but all callsites should be fixed before #556 is closed. + let typeinference_helper = crate::base::Matrix::new_uninitialized_generic($nrows, $ncols); + unimplemented!(); + typeinference_helper.assume_init() + } + #[cfg(not(feature="no_unsound_assume_init"))] { crate::base::Matrix::new_uninitialized_generic($nrows, $ncols).assume_init() } + }} +} + /// # Generic constructors /// This set of matrix and vector construction functions are all generic /// with-regard to the matrix dimensions. They all expect to be given @@ -38,8 +56,8 @@ where /// Creates a new uninitialized matrix. If the matrix has a compile-time dimension, this panics /// if `nrows != R::to_usize()` or `ncols != C::to_usize()`. #[inline] - pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> Self { - Self::from_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) + pub unsafe fn new_uninitialized_generic(nrows: R, ncols: C) -> mem::MaybeUninit { + Self::from_uninitialized_data(DefaultAllocator::allocate_uninitialized(nrows, ncols)) } /// Creates a matrix with all its elements set to `elem`. @@ -88,7 +106,7 @@ where "Matrix init. error: the slice did not contain the right number of elements." ); - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; let mut iter = slice.iter(); for i in 0..nrows.value() { @@ -114,7 +132,7 @@ where where F: FnMut(usize, usize) -> N, { - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -356,7 +374,7 @@ macro_rules! impl_constructors( ($($Dims: ty),*; $(=> $DimIdent: ident: $DimBound: ident),*; $($gargs: expr),*; $($args: ident),*) => { /// Creates a new uninitialized matrix or vector. #[inline] - pub unsafe fn new_uninitialized($($args: usize),*) -> Self { + pub unsafe fn new_uninitialized($($args: usize),*) -> mem::MaybeUninit { Self::new_uninitialized_generic($($gargs),*) } @@ -806,8 +824,8 @@ where { #[inline] fn sample<'a, G: Rng + ?Sized>(&self, rng: &'a mut G) -> MatrixMN { - let nrows = R::try_to_usize().unwrap_or_else(|| rng.gen_range(0, 10)); - let ncols = C::try_to_usize().unwrap_or_else(|| rng.gen_range(0, 10)); + let nrows = R::try_to_usize().unwrap_or_else(|| rng.gen_range(0..10)); + let ncols = C::try_to_usize().unwrap_or_else(|| rng.gen_range(0..10)); MatrixMN::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| rng.gen()) } @@ -823,9 +841,9 @@ where Owned: Clone + Send, { #[inline] - fn arbitrary(g: &mut G) -> Self { - let nrows = R::try_to_usize().unwrap_or(g.gen_range(0, 10)); - let ncols = C::try_to_usize().unwrap_or(g.gen_range(0, 10)); + fn arbitrary(g: &mut Gen) -> Self { + let nrows = R::try_to_usize().unwrap_or(usize::arbitrary(g) % 10); + let ncols = C::try_to_usize().unwrap_or(usize::arbitrary(g) % 10); Self::from_fn_generic(R::from_usize(nrows), C::from_usize(ncols), |_, _| { N::arbitrary(g) @@ -865,7 +883,10 @@ macro_rules! componentwise_constructors_impl( #[inline] pub fn new($($args: N),*) -> Self { unsafe { - let mut res = Self::new_uninitialized(); + #[cfg(feature="no_unsound_assume_init")] + let mut res: Self = unimplemented!(); + #[cfg(not(feature="no_unsound_assume_init"))] + let mut res = Self::new_uninitialized().assume_init(); $( *res.get_unchecked_mut(($irow, $icol)) = $args; )* res diff --git a/src/base/conversion.rs b/src/base/conversion.rs index 77bf4005..8ef1a967 100644 --- a/src/base/conversion.rs +++ b/src/base/conversion.rs @@ -50,7 +50,8 @@ where let nrows2 = R2::from_usize(nrows); let ncols2 = C2::from_usize(ncols); - let mut res = unsafe { MatrixMN::::new_uninitialized_generic(nrows2, ncols2) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows2, ncols2) }; for i in 0..nrows { for j in 0..ncols { unsafe { @@ -73,7 +74,7 @@ where let nrows = R1::from_usize(nrows2); let ncols = C1::from_usize(ncols2); - let mut res = unsafe { Self::new_uninitialized_generic(nrows, ncols) }; + let mut res: Self = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for i in 0..nrows2 { for j in 0..ncols2 { unsafe { @@ -117,9 +118,9 @@ macro_rules! impl_from_into_asref_1D( fn from(arr: [N; $SZ]) -> Self { unsafe { let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&arr[0], res.data.ptr_mut(), $SZ); + ptr::copy_nonoverlapping(&arr[0], (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - res + res.assume_init() } } } @@ -184,9 +185,9 @@ macro_rules! impl_from_into_asref_2D( fn from(arr: [[N; $SZRows]; $SZCols]) -> Self { unsafe { let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&arr[0][0], res.data.ptr_mut(), $SZRows * $SZCols); + ptr::copy_nonoverlapping(&arr[0][0], (*res.as_mut_ptr()).data.ptr_mut(), $SZRows * $SZCols); - res + res.assume_init() } } } @@ -244,9 +245,9 @@ macro_rules! impl_from_into_mint_1D( fn from(v: mint::$VT) -> Self { unsafe { let mut res = Self::new_uninitialized(); - ptr::copy_nonoverlapping(&v.x, res.data.ptr_mut(), $SZ); + ptr::copy_nonoverlapping(&v.x, (*res.as_mut_ptr()).data.ptr_mut(), $SZ); - res + res.assume_init() } } } @@ -306,13 +307,13 @@ macro_rules! impl_from_into_mint_2D( fn from(m: mint::$MV) -> Self { unsafe { let mut res = Self::new_uninitialized(); - let mut ptr = res.data.ptr_mut(); + let mut ptr = (*res.as_mut_ptr()).data.ptr_mut(); $( ptr::copy_nonoverlapping(&m.$component.x, ptr, $SZRows); ptr = ptr.offset($SZRows); )* let _ = ptr; - res + res.assume_init() } } } diff --git a/src/base/default_allocator.rs b/src/base/default_allocator.rs index bedca471..81ed1f53 100644 --- a/src/base/default_allocator.rs +++ b/src/base/default_allocator.rs @@ -45,9 +45,8 @@ where type Buffer = ArrayStorage; #[inline] - unsafe fn allocate_uninitialized(_: R, _: C) -> Self::Buffer { - // TODO: Undefined behavior, see #556 - mem::MaybeUninit::::uninit().assume_init() + unsafe fn allocate_uninitialized(_: R, _: C) -> mem::MaybeUninit { + mem::MaybeUninit::::uninit() } #[inline] @@ -56,7 +55,10 @@ where ncols: C, iter: I, ) -> Self::Buffer { - let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols) }; + #[cfg(feature = "no_unsound_assume_init")] + let mut res: Self::Buffer = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() }; let mut count = 0; for (res, e) in res.iter_mut().zip(iter.into_iter()) { @@ -80,13 +82,13 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] - unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> Self::Buffer { + unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit { let mut res = Vec::new(); let length = nrows.value() * ncols.value(); res.reserve_exact(length); res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } #[inline] @@ -110,13 +112,13 @@ impl Allocator for DefaultAllocator { type Buffer = VecStorage; #[inline] - unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> Self::Buffer { + unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit { let mut res = Vec::new(); let length = nrows.value() * ncols.value(); res.reserve_exact(length); res.set_len(length); - VecStorage::new(nrows, ncols, res) + mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res)) } #[inline] @@ -156,7 +158,11 @@ where cto: CTo, buf: >::Buffer, ) -> ArrayStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: ArrayStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); @@ -184,7 +190,11 @@ where cto: CTo, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); @@ -212,7 +222,11 @@ where cto: Dynamic, buf: ArrayStorage, ) -> VecStorage { - let mut res = >::allocate_uninitialized(rto, cto); + #[cfg(feature = "no_unsound_assume_init")] + let mut res: VecStorage = unimplemented!(); + #[cfg(not(feature = "no_unsound_assume_init"))] + let mut res = + >::allocate_uninitialized(rto, cto).assume_init(); let (rfrom, cfrom) = buf.shape(); diff --git a/src/base/edition.rs b/src/base/edition.rs index 983bde43..9d8606af 100644 --- a/src/base/edition.rs +++ b/src/base/edition.rs @@ -54,8 +54,9 @@ impl> Matrix { { let irows = irows.into_iter(); let ncols = self.data.shape().1; - let mut res = - unsafe { MatrixMN::new_uninitialized_generic(Dynamic::new(irows.len()), ncols) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(Dynamic::new(irows.len()), ncols) + }; // First, check that all the indices from irows are valid. // This will allow us to use unchecked access in the inner loop. @@ -89,8 +90,9 @@ impl> Matrix { { let icols = icols.into_iter(); let nrows = self.data.shape().0; - let mut res = - unsafe { MatrixMN::new_uninitialized_generic(nrows, Dynamic::new(icols.len())) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(nrows, Dynamic::new(icols.len())) + }; for (destination, source) in icols.enumerate() { res.column_mut(destination).copy_from(&self.column(*source)) @@ -896,7 +898,9 @@ impl DMatrix { where DefaultAllocator: Reallocator, { - let placeholder = unsafe { Self::new_uninitialized(0, 0) }; + let placeholder = unsafe { + crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), Dynamic::new(0)) + }; let old = mem::replace(self, placeholder); let new = old.resize(new_nrows, new_ncols, val); let _ = mem::replace(self, new); @@ -919,8 +923,9 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - unsafe { Self::new_uninitialized_generic(Dynamic::new(0), self.data.shape().1) }; + let placeholder = unsafe { + crate::unimplemented_or_uninitialized_generic!(Dynamic::new(0), self.data.shape().1) + }; let old = mem::replace(self, placeholder); let new = old.resize_vertically(new_nrows, val); let _ = mem::replace(self, new); @@ -943,8 +948,9 @@ where where DefaultAllocator: Reallocator, { - let placeholder = - unsafe { Self::new_uninitialized_generic(self.data.shape().0, Dynamic::new(0)) }; + let placeholder = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, Dynamic::new(0)) + }; let old = mem::replace(self, placeholder); let new = old.resize_horizontally(new_ncols, val); let _ = mem::replace(self, new); diff --git a/src/base/helper.rs b/src/base/helper.rs index de601fb6..fe5ffd02 100644 --- a/src/base/helper.rs +++ b/src/base/helper.rs @@ -7,7 +7,7 @@ use rand::Rng; #[cfg(feature = "arbitrary")] #[doc(hidden)] #[inline] -pub fn reject bool, T: Arbitrary>(g: &mut G, f: F) -> T { +pub fn reject bool, T: Arbitrary>(g: &mut Gen, f: F) -> T { use std::iter; iter::repeat(()) .map(|_| Arbitrary::arbitrary(g)) diff --git a/src/base/iter.rs b/src/base/iter.rs index 1f330d95..f744cf02 100644 --- a/src/base/iter.rs +++ b/src/base/iter.rs @@ -1,5 +1,6 @@ //! Matrix iterators. +use std::iter::FusedIterator; use std::marker::PhantomData; use std::mem; @@ -111,6 +112,46 @@ macro_rules! iterator { } } + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> DoubleEndedIterator + for $Name<'a, N, R, C, S> + { + #[inline] + fn next_back(&mut self) -> Option<$Ref> { + unsafe { + if self.size == 0 { + None + } else { + // Pre-decrement `size` such that it now counts to the + // element we want to return. + self.size -= 1; + + // Fetch strides + let inner_stride = self.strides.0.value(); + let outer_stride = self.strides.1.value(); + + // Compute number of rows + // Division should be exact + let inner_raw_size = self.inner_end.offset_from(self.inner_ptr) as usize; + let inner_size = inner_raw_size / inner_stride; + + // Compute rows and cols remaining + let outer_remaining = self.size / inner_size; + let inner_remaining = self.size % inner_size; + + // Compute pointer to last element + let last = self.ptr.offset( + (outer_remaining * outer_stride + inner_remaining * inner_stride) + as isize, + ); + + // We want either `& *last` or `&mut *last` here, depending + // on the mutability of `$Ref`. + Some(mem::transmute(last)) + } + } + } + } + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> ExactSizeIterator for $Name<'a, N, R, C, S> { @@ -119,6 +160,11 @@ macro_rules! iterator { self.size } } + + impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage> FusedIterator + for $Name<'a, N, R, C, S> + { + } }; } diff --git a/src/base/matrix.rs b/src/base/matrix.rs index 8035d2f8..89d24dcc 100644 --- a/src/base/matrix.rs +++ b/src/base/matrix.rs @@ -279,6 +279,22 @@ impl> matrixcompare_core::DenseAc } } +#[cfg(feature = "bytemuck")] +unsafe impl> bytemuck::Zeroable + for Matrix +where + S: bytemuck::Zeroable, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl> bytemuck::Pod for Matrix +where + S: bytemuck::Pod, + Self: Copy, +{ +} + impl Matrix { /// Creates a new matrix with the given data without statically checking that the matrix /// dimension matches the storage dimension. @@ -298,6 +314,21 @@ impl> Matrix { unsafe { Self::from_data_statically_unchecked(data) } } + /// Creates a new uninitialized matrix with the given uninitialized data + pub unsafe fn from_uninitialized_data(data: mem::MaybeUninit) -> mem::MaybeUninit { + let res: Matrix> = Matrix { + data, + _phantoms: PhantomData, + }; + let res: mem::MaybeUninit>> = + mem::MaybeUninit::new(res); + // safety: since we wrap the inner MaybeUninit in an outer MaybeUninit above, the fact that the `data` field is partially-uninitialized is still opaque. + // with s/transmute_copy/transmute/, rustc claims that `MaybeUninit>>` may be of a different size from `MaybeUninit>` + // but MaybeUninit's documentation says "MaybeUninit is guaranteed to have the same size, alignment, and ABI as T", which implies those types should be the same size + let res: mem::MaybeUninit> = mem::transmute_copy(&res); + res + } + /// The shape of this matrix returned as the tuple (number of rows, number of columns). /// /// # Examples: @@ -497,7 +528,7 @@ impl> Matrix { let ncols: SameShapeC = Dim::from_usize(ncols); let mut res: MatrixSum = - unsafe { Matrix::new_uninitialized_generic(nrows, ncols) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; // TODO: use copy_from for j in 0..res.ncols() { @@ -546,7 +577,7 @@ impl> Matrix { let (nrows, ncols) = self.data.shape(); unsafe { - let mut res = Matrix::new_uninitialized_generic(ncols, nrows); + let mut res = crate::unimplemented_or_uninitialized_generic!(ncols, nrows); self.transpose_to(&mut res); res @@ -564,7 +595,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -608,7 +640,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; for j in 0..ncols.value() { for i in 0..nrows.value() { @@ -635,7 +668,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; assert_eq!( (nrows.value(), ncols.value()), @@ -676,7 +710,8 @@ impl> Matrix { { let (nrows, ncols) = self.data.shape(); - let mut res = unsafe { MatrixMN::new_uninitialized_generic(nrows, ncols) }; + let mut res: MatrixMN = + unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; assert_eq!( (nrows.value(), ncols.value()), @@ -1170,7 +1205,8 @@ impl> Matrix = Matrix::new_uninitialized_generic(ncols, nrows); + let mut res: MatrixMN<_, C, R> = + crate::unimplemented_or_uninitialized_generic!(ncols, nrows); self.adjoint_to(&mut res); res @@ -1311,7 +1347,8 @@ impl> SquareMatrix { ); let dim = self.data.shape().0; - let mut res = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; + let mut res: VectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; for i in 0..dim.value() { unsafe { @@ -1438,7 +1475,8 @@ impl, S: Storage> Vector { { let len = self.len(); let hnrows = DimSum::::from_usize(len + 1); - let mut res = unsafe { VectorN::::new_uninitialized_generic(hnrows, U1) }; + let mut res: VectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(hnrows, U1) }; res.generic_slice_mut((0, 0), self.data.shape()) .copy_from(self); res[(len, 0)] = element; @@ -1783,7 +1821,8 @@ impl::from_usize(3); let ncols = SameShapeC::::from_usize(1); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res: MatrixCross = + crate::unimplemented_or_uninitialized_generic!(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((1, 0)); @@ -1807,7 +1846,8 @@ impl::from_usize(1); let ncols = SameShapeC::::from_usize(3); - let mut res = Matrix::new_uninitialized_generic(nrows, ncols); + let mut res: MatrixCross = + crate::unimplemented_or_uninitialized_generic!(nrows, ncols); let ax = self.get_unchecked((0, 0)); let ay = self.get_unchecked((0, 1)); diff --git a/src/base/matrix_alga.rs b/src/base/matrix_alga.rs index c8c08e64..6e97aedb 100644 --- a/src/base/matrix_alga.rs +++ b/src/base/matrix_alga.rs @@ -433,8 +433,8 @@ where "Matrix meet/join error: mismatched dimensions." ); - let mut mres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; - let mut jres = unsafe { Self::new_uninitialized_generic(shape.0, shape.1) }; + let mut mres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; + let mut jres = unsafe { crate::unimplemented_or_uninitialized_generic!(shape.0, shape.1) }; for i in 0..shape.0.value() * shape.1.value() { unsafe { diff --git a/src/base/mod.rs b/src/base/mod.rs index edea4a2d..9f08572f 100644 --- a/src/base/mod.rs +++ b/src/base/mod.rs @@ -15,6 +15,7 @@ mod alias_slice; mod array_storage; mod cg; mod componentwise; +#[macro_use] mod construction; mod construction_slice; mod conversion; diff --git a/src/base/norm.rs b/src/base/norm.rs index a7fa66e9..9717e031 100644 --- a/src/base/norm.rs +++ b/src/base/norm.rs @@ -8,7 +8,7 @@ use crate::allocator::Allocator; use crate::base::{DefaultAllocator, Dim, DimName, Matrix, MatrixMN, Normed, VectorN}; use crate::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint}; use crate::storage::{Storage, StorageMut}; -use crate::{ComplexField, Scalar, SimdComplexField, Unit}; +use crate::{ComplexField, RealField, Scalar, SimdComplexField, Unit}; use simba::scalar::ClosedNeg; use simba::simd::{SimdOption, SimdPartialOrd}; @@ -334,11 +334,27 @@ impl> Matrix { { let n = self.norm(); - if n >= min_magnitude { + if n > min_magnitude { self.scale_mut(magnitude / n) } } + /// Returns a new vector with the same magnitude as `self` clamped between `0.0` and `max`. + #[inline] + pub fn cap_magnitude(&self, max: N::RealField) -> MatrixMN + where + N: RealField, + DefaultAllocator: Allocator, + { + let n = self.norm(); + + if n > max { + self.scale(max / n) + } else { + self.clone_owned() + } + } + /// Returns a normalized version of this matrix unless its norm as smaller or equal to `eps`. /// /// The components of this matrix cannot be SIMD types (see `simd_try_normalize`) instead. diff --git a/src/base/ops.rs b/src/base/ops.rs index 01968b47..73f18a8c 100644 --- a/src/base/ops.rs +++ b/src/base/ops.rs @@ -331,7 +331,7 @@ macro_rules! componentwise_binop_impl( let (nrows, ncols) = self.shape(); let nrows: SameShapeR = Dim::from_usize(nrows); let ncols: SameShapeC = Dim::from_usize(ncols); - Matrix::new_uninitialized_generic(nrows, ncols) + crate::unimplemented_or_uninitialized_generic!(nrows, ncols) }; self.$method_to_statically_unchecked(rhs, &mut res); @@ -573,9 +573,9 @@ where #[inline] fn mul(self, rhs: &'b Matrix) -> Self::Output { - let mut res = - unsafe { Matrix::new_uninitialized_generic(self.data.shape().0, rhs.data.shape().1) }; - + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, rhs.data.shape().1) + }; self.mul_to(rhs, &mut res); res } @@ -684,8 +684,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = - unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) + }; self.tr_mul_to(rhs, &mut res); res @@ -700,8 +701,9 @@ where DefaultAllocator: Allocator, ShapeConstraint: SameNumberOfRows, { - let mut res = - unsafe { Matrix::new_uninitialized_generic(self.data.shape().1, rhs.data.shape().1) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(self.data.shape().1, rhs.data.shape().1) + }; self.ad_mul_to(rhs, &mut res); res @@ -815,8 +817,9 @@ where let (nrows1, ncols1) = self.data.shape(); let (nrows2, ncols2) = rhs.data.shape(); - let mut res = - unsafe { Matrix::new_uninitialized_generic(nrows1.mul(nrows2), ncols1.mul(ncols2)) }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!(nrows1.mul(nrows2), ncols1.mul(ncols2)) + }; { let mut data_res = res.data.ptr_mut(); diff --git a/src/base/statistics.rs b/src/base/statistics.rs index 231f654b..811b508f 100644 --- a/src/base/statistics.rs +++ b/src/base/statistics.rs @@ -17,7 +17,8 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res = unsafe { RowVectorN::new_uninitialized_generic(U1, ncols) }; + let mut res: RowVectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(U1, ncols) }; for i in 0..ncols.value() { // TODO: avoid bound checking of column. @@ -42,7 +43,8 @@ impl> Matrix { DefaultAllocator: Allocator, { let ncols = self.data.shape().1; - let mut res = unsafe { VectorN::new_uninitialized_generic(ncols, U1) }; + let mut res: VectorN = + unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; for i in 0..ncols.value() { // TODO: avoid bound checking of column. diff --git a/src/base/unit.rs b/src/base/unit.rs index 2483307a..70e3a927 100644 --- a/src/base/unit.rs +++ b/src/base/unit.rs @@ -30,6 +30,12 @@ pub struct Unit { pub(crate) value: T, } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Unit where T: bytemuck::Zeroable {} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Unit where T: bytemuck::Pod {} + #[cfg(feature = "serde-serialize")] impl Serialize for Unit { fn serialize(&self, serializer: S) -> Result diff --git a/src/debug/random_orthogonal.rs b/src/debug/random_orthogonal.rs index de72bdb7..fc740dc2 100644 --- a/src/debug/random_orthogonal.rs +++ b/src/debug/random_orthogonal.rs @@ -48,9 +48,8 @@ where DefaultAllocator: Allocator, Owned: Clone + Send, { - fn arbitrary(g: &mut G) -> Self { - use rand::Rng; - let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50)); + fn arbitrary(g: &mut Gen) -> Self { + let dim = D::try_to_usize().unwrap_or(1 + usize::arbitrary(g) % 50); Self::new(D::from_usize(dim), || N::arbitrary(g)) } } diff --git a/src/debug/random_sdp.rs b/src/debug/random_sdp.rs index b9f81859..fa2ef118 100644 --- a/src/debug/random_sdp.rs +++ b/src/debug/random_sdp.rs @@ -51,9 +51,8 @@ where DefaultAllocator: Allocator, Owned: Clone + Send, { - fn arbitrary(g: &mut G) -> Self { - use rand::Rng; - let dim = D::try_to_usize().unwrap_or(g.gen_range(1, 50)); + fn arbitrary(g: &mut Gen) -> Self { + let dim = D::try_to_usize().unwrap_or(1 + usize::arbitrary(g) % 50); Self::new(D::from_usize(dim), || N::arbitrary(g)) } } diff --git a/src/geometry/dual_quaternion.rs b/src/geometry/dual_quaternion.rs index ce9f7284..b11e7364 100644 --- a/src/geometry/dual_quaternion.rs +++ b/src/geometry/dual_quaternion.rs @@ -1,6 +1,13 @@ -use crate::{Quaternion, SimdRealField}; +use crate::{ + Isometry3, Matrix4, Normed, Point3, Quaternion, Scalar, SimdRealField, Translation3, Unit, + UnitQuaternion, Vector3, VectorN, Zero, U8, +}; +use approx::{AbsDiffEq, RelativeEq, UlpsEq}; #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use std::fmt; + +use simba::scalar::{ClosedNeg, RealField}; /// A dual quaternion. /// @@ -28,14 +35,23 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// If a feature that you need is missing, feel free to open an issue or a PR. /// See https://github.com/dimforge/nalgebra/issues/487 #[repr(C)] -#[derive(Debug, Default, Eq, PartialEq, Copy, Clone)] -pub struct DualQuaternion { +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct DualQuaternion { /// The real component of the quaternion pub real: Quaternion, /// The dual component of the quaternion pub dual: Quaternion, } +impl Default for DualQuaternion { + fn default() -> Self { + Self { + real: Quaternion::default(), + dual: Quaternion::default(), + } + } +} + impl DualQuaternion where N::Element: SimdRealField, @@ -77,8 +93,147 @@ where /// relative_eq!(dq.real.norm(), 1.0); /// ``` #[inline] - pub fn normalize_mut(&mut self) { - *self = self.normalize(); + pub fn normalize_mut(&mut self) -> N { + let real_norm = self.real.norm(); + self.real /= real_norm; + self.dual /= real_norm; + real_norm + } + + /// The conjugate of this dual quaternion, containing the conjugate of + /// the real and imaginary parts.. + /// + /// # Example + /// ``` + /// # use nalgebra::{DualQuaternion, Quaternion}; + /// let real = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let dual = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let dq = DualQuaternion::from_real_and_dual(real, dual); + /// + /// let conj = dq.conjugate(); + /// assert!(conj.real.i == -2.0 && conj.real.j == -3.0 && conj.real.k == -4.0); + /// assert!(conj.real.w == 1.0); + /// assert!(conj.dual.i == -6.0 && conj.dual.j == -7.0 && conj.dual.k == -8.0); + /// assert!(conj.dual.w == 5.0); + /// ``` + #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] + pub fn conjugate(&self) -> Self { + Self::from_real_and_dual(self.real.conjugate(), self.dual.conjugate()) + } + + /// Replaces this quaternion by its conjugate. + /// + /// # Example + /// ``` + /// # use nalgebra::{DualQuaternion, Quaternion}; + /// let real = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let dual = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let mut dq = DualQuaternion::from_real_and_dual(real, dual); + /// + /// dq.conjugate_mut(); + /// assert!(dq.real.i == -2.0 && dq.real.j == -3.0 && dq.real.k == -4.0); + /// assert!(dq.real.w == 1.0); + /// assert!(dq.dual.i == -6.0 && dq.dual.j == -7.0 && dq.dual.k == -8.0); + /// assert!(dq.dual.w == 5.0); + /// ``` + #[inline] + pub fn conjugate_mut(&mut self) { + self.real.conjugate_mut(); + self.dual.conjugate_mut(); + } + + /// Inverts this dual quaternion if it is not zero. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{DualQuaternion, Quaternion}; + /// let real = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let dual = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let dq = DualQuaternion::from_real_and_dual(real, dual); + /// let inverse = dq.try_inverse(); + /// + /// assert!(inverse.is_some()); + /// assert_relative_eq!(inverse.unwrap() * dq, DualQuaternion::identity()); + /// + /// //Non-invertible case + /// let zero = Quaternion::new(0.0, 0.0, 0.0, 0.0); + /// let dq = DualQuaternion::from_real_and_dual(zero, zero); + /// let inverse = dq.try_inverse(); + /// + /// assert!(inverse.is_none()); + /// ``` + #[inline] + #[must_use = "Did you mean to use try_inverse_mut()?"] + pub fn try_inverse(&self) -> Option + where + N: RealField, + { + let mut res = *self; + if res.try_inverse_mut() { + Some(res) + } else { + None + } + } + + /// Inverts this dual quaternion in-place if it is not zero. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{DualQuaternion, Quaternion}; + /// let real = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let dual = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let dq = DualQuaternion::from_real_and_dual(real, dual); + /// let mut dq_inverse = dq; + /// dq_inverse.try_inverse_mut(); + /// + /// assert_relative_eq!(dq_inverse * dq, DualQuaternion::identity()); + /// + /// //Non-invertible case + /// let zero = Quaternion::new(0.0, 0.0, 0.0, 0.0); + /// let mut dq = DualQuaternion::from_real_and_dual(zero, zero); + /// assert!(!dq.try_inverse_mut()); + /// ``` + #[inline] + pub fn try_inverse_mut(&mut self) -> bool + where + N: RealField, + { + let inverted = self.real.try_inverse_mut(); + if inverted { + self.dual = -self.real * self.dual * self.real; + true + } else { + false + } + } + + /// Linear interpolation between two dual quaternions. + /// + /// Computes `self * (1 - t) + other * t`. + /// + /// # Example + /// ``` + /// # use nalgebra::{DualQuaternion, Quaternion}; + /// let dq1 = DualQuaternion::from_real_and_dual( + /// Quaternion::new(1.0, 0.0, 0.0, 4.0), + /// Quaternion::new(0.0, 2.0, 0.0, 0.0) + /// ); + /// let dq2 = DualQuaternion::from_real_and_dual( + /// Quaternion::new(2.0, 0.0, 1.0, 0.0), + /// Quaternion::new(0.0, 2.0, 0.0, 0.0) + /// ); + /// assert_eq!(dq1.lerp(&dq2, 0.25), DualQuaternion::from_real_and_dual( + /// Quaternion::new(1.25, 0.0, 0.25, 3.0), + /// Quaternion::new(0.0, 2.0, 0.0, 0.0) + /// )); + /// ``` + #[inline] + pub fn lerp(&self, other: &Self, t: N) -> Self { + self * (N::one() - t) + other * t } } @@ -114,3 +269,669 @@ where }) } } + +impl DualQuaternion { + fn to_vector(&self) -> VectorN { + self.as_ref().clone().into() + } +} + +impl> AbsDiffEq for DualQuaternion { + type Epsilon = N; + + #[inline] + fn default_epsilon() -> Self::Epsilon { + N::default_epsilon() + } + + #[inline] + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.to_vector().abs_diff_eq(&other.to_vector(), epsilon) || + // Account for the double-covering of S², i.e. q = -q + self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.abs_diff_eq(&-*b, epsilon)) + } +} + +impl> RelativeEq for DualQuaternion { + #[inline] + fn default_max_relative() -> Self::Epsilon { + N::default_max_relative() + } + + #[inline] + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.to_vector().relative_eq(&other.to_vector(), epsilon, max_relative) || + // Account for the double-covering of S², i.e. q = -q + self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.relative_eq(&-*b, epsilon, max_relative)) + } +} + +impl> UlpsEq for DualQuaternion { + #[inline] + fn default_max_ulps() -> u32 { + N::default_max_ulps() + } + + #[inline] + fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { + self.to_vector().ulps_eq(&other.to_vector(), epsilon, max_ulps) || + // Account for the double-covering of S², i.e. q = -q. + self.to_vector().iter().zip(other.to_vector().iter()).all(|(a, b)| a.ulps_eq(&-*b, epsilon, max_ulps)) + } +} + +/// A unit quaternions. May be used to represent a rotation followed by a translation. +pub type UnitDualQuaternion = Unit>; + +impl PartialEq for UnitDualQuaternion { + #[inline] + fn eq(&self, rhs: &Self) -> bool { + self.as_ref().eq(rhs.as_ref()) + } +} + +impl Eq for UnitDualQuaternion {} + +impl Normed for DualQuaternion { + type Norm = N::SimdRealField; + + #[inline] + fn norm(&self) -> N::SimdRealField { + self.real.norm() + } + + #[inline] + fn norm_squared(&self) -> N::SimdRealField { + self.real.norm_squared() + } + + #[inline] + fn scale_mut(&mut self, n: Self::Norm) { + self.real.scale_mut(n); + self.dual.scale_mut(n); + } + + #[inline] + fn unscale_mut(&mut self, n: Self::Norm) { + self.real.unscale_mut(n); + self.dual.unscale_mut(n); + } +} + +impl UnitDualQuaternion +where + N::Element: SimdRealField, +{ + /// The underlying dual quaternion. + /// + /// Same as `self.as_ref()`. + /// + /// # Example + /// ``` + /// # use nalgebra::{DualQuaternion, UnitDualQuaternion, Quaternion}; + /// let id = UnitDualQuaternion::identity(); + /// assert_eq!(*id.dual_quaternion(), DualQuaternion::from_real_and_dual( + /// Quaternion::new(1.0, 0.0, 0.0, 0.0), + /// Quaternion::new(0.0, 0.0, 0.0, 0.0) + /// )); + /// ``` + #[inline] + pub fn dual_quaternion(&self) -> &DualQuaternion { + self.as_ref() + } + + /// Compute the conjugate of this unit quaternion. + /// + /// # Example + /// ``` + /// # use nalgebra::{UnitDualQuaternion, DualQuaternion, Quaternion}; + /// let qr = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let qd = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let unit = UnitDualQuaternion::new_normalize( + /// DualQuaternion::from_real_and_dual(qr, qd) + /// ); + /// let conj = unit.conjugate(); + /// assert_eq!(conj.real, unit.real.conjugate()); + /// assert_eq!(conj.dual, unit.dual.conjugate()); + /// ``` + #[inline] + #[must_use = "Did you mean to use conjugate_mut()?"] + pub fn conjugate(&self) -> Self { + Self::new_unchecked(self.as_ref().conjugate()) + } + + /// Compute the conjugate of this unit quaternion in-place. + /// + /// # Example + /// ``` + /// # use nalgebra::{UnitDualQuaternion, DualQuaternion, Quaternion}; + /// let qr = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let qd = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let unit = UnitDualQuaternion::new_normalize( + /// DualQuaternion::from_real_and_dual(qr, qd) + /// ); + /// let mut conj = unit.clone(); + /// conj.conjugate_mut(); + /// assert_eq!(conj.as_ref().real, unit.as_ref().real.conjugate()); + /// assert_eq!(conj.as_ref().dual, unit.as_ref().dual.conjugate()); + /// ``` + #[inline] + pub fn conjugate_mut(&mut self) { + self.as_mut_unchecked().conjugate_mut() + } + + /// Inverts this dual quaternion if it is not zero. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, Quaternion, DualQuaternion}; + /// let qr = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let qd = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let unit = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual(qr, qd)); + /// let inv = unit.inverse(); + /// assert_relative_eq!(unit * inv, UnitDualQuaternion::identity(), epsilon = 1.0e-6); + /// assert_relative_eq!(inv * unit, UnitDualQuaternion::identity(), epsilon = 1.0e-6); + /// ``` + #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] + pub fn inverse(&self) -> Self { + let real = Unit::new_unchecked(self.as_ref().real) + .inverse() + .into_inner(); + let dual = -real * self.as_ref().dual * real; + UnitDualQuaternion::new_unchecked(DualQuaternion { real, dual }) + } + + /// Inverts this dual quaternion in place if it is not zero. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, Quaternion, DualQuaternion}; + /// let qr = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let qd = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let unit = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual(qr, qd)); + /// let mut inv = unit.clone(); + /// inv.inverse_mut(); + /// assert_relative_eq!(unit * inv, UnitDualQuaternion::identity(), epsilon = 1.0e-6); + /// assert_relative_eq!(inv * unit, UnitDualQuaternion::identity(), epsilon = 1.0e-6); + /// ``` + #[inline] + #[must_use = "Did you mean to use inverse_mut()?"] + pub fn inverse_mut(&mut self) { + let quat = self.as_mut_unchecked(); + quat.real = Unit::new_unchecked(quat.real).inverse().into_inner(); + quat.dual = -quat.real * quat.dual * quat.real; + } + + /// The unit dual quaternion needed to make `self` and `other` coincide. + /// + /// The result is such that: `self.isometry_to(other) * self == other`. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, DualQuaternion, Quaternion}; + /// let qr = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let qd = Quaternion::new(5.0, 6.0, 7.0, 8.0); + /// let dq1 = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual(qr, qd)); + /// let dq2 = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual(qd, qr)); + /// let dq_to = dq1.isometry_to(&dq2); + /// assert_relative_eq!(dq_to * dq1, dq2, epsilon = 1.0e-6); + /// ``` + #[inline] + pub fn isometry_to(&self, other: &Self) -> Self { + other / self + } + + /// Linear interpolation between two unit dual quaternions. + /// + /// The result is not normalized. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, DualQuaternion, Quaternion}; + /// let dq1 = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual( + /// Quaternion::new(0.5, 0.0, 0.5, 0.0), + /// Quaternion::new(0.0, 0.5, 0.0, 0.5) + /// )); + /// let dq2 = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual( + /// Quaternion::new(0.5, 0.0, 0.0, 0.5), + /// Quaternion::new(0.5, 0.0, 0.5, 0.0) + /// )); + /// assert_relative_eq!( + /// UnitDualQuaternion::new_normalize(dq1.lerp(&dq2, 0.5)), + /// UnitDualQuaternion::new_normalize( + /// DualQuaternion::from_real_and_dual( + /// Quaternion::new(0.5, 0.0, 0.25, 0.25), + /// Quaternion::new(0.25, 0.25, 0.25, 0.25) + /// ) + /// ), + /// epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn lerp(&self, other: &Self, t: N) -> DualQuaternion { + self.as_ref().lerp(other.as_ref(), t) + } + + /// Normalized linear interpolation between two unit quaternions. + /// + /// This is the same as `self.lerp` except that the result is normalized. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, DualQuaternion, Quaternion}; + /// let dq1 = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual( + /// Quaternion::new(0.5, 0.0, 0.5, 0.0), + /// Quaternion::new(0.0, 0.5, 0.0, 0.5) + /// )); + /// let dq2 = UnitDualQuaternion::new_normalize(DualQuaternion::from_real_and_dual( + /// Quaternion::new(0.5, 0.0, 0.0, 0.5), + /// Quaternion::new(0.5, 0.0, 0.5, 0.0) + /// )); + /// assert_relative_eq!(dq1.nlerp(&dq2, 0.2), UnitDualQuaternion::new_normalize( + /// DualQuaternion::from_real_and_dual( + /// Quaternion::new(0.5, 0.0, 0.4, 0.1), + /// Quaternion::new(0.1, 0.4, 0.1, 0.4) + /// ) + /// ), epsilon = 1.0e-6); + /// ``` + #[inline] + pub fn nlerp(&self, other: &Self, t: N) -> Self { + let mut res = self.lerp(other, t); + let _ = res.normalize_mut(); + + Self::new_unchecked(res) + } + + /// Screw linear interpolation between two unit quaternions. This creates a + /// smooth arc from one dual-quaternion to another. + /// + /// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation + /// is not well-defined). Use `.try_sclerp` instead to avoid the panic. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, DualQuaternion, UnitQuaternion, Vector3}; + /// + /// let dq1 = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0), + /// ); + /// + /// let dq2 = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 0.0, 3.0).into(), + /// UnitQuaternion::from_euler_angles(-std::f32::consts::PI, 0.0, 0.0), + /// ); + /// + /// let dq = dq1.sclerp(&dq2, 1.0 / 3.0); + /// + /// assert_relative_eq!( + /// dq.rotation().euler_angles().0, std::f32::consts::FRAC_PI_2, epsilon = 1.0e-6 + /// ); + /// assert_relative_eq!(dq.translation().vector.y, 3.0, epsilon = 1.0e-6); + #[inline] + pub fn sclerp(&self, other: &Self, t: N) -> Self + where + N: RealField, + { + self.try_sclerp(other, t, N::default_epsilon()) + .expect("DualQuaternion sclerp: ambiguous configuration.") + } + + /// Computes the screw-linear interpolation between two unit quaternions or returns `None` + /// if both quaternions are approximately 180 degrees apart (in which case the interpolation is + /// not well-defined). + /// + /// # Arguments + /// * `self`: the first quaternion to interpolate from. + /// * `other`: the second quaternion to interpolate toward. + /// * `t`: the interpolation parameter. Should be between 0 and 1. + /// * `epsilon`: the value below which the sinus of the angle separating both quaternion + /// must be to return `None`. + #[inline] + pub fn try_sclerp(&self, other: &Self, t: N, epsilon: N) -> Option + where + N: RealField, + { + let two = N::one() + N::one(); + let half = N::one() / two; + + // Invert one of the quaternions if we've got a longest-path + // interpolation. + let other = { + let dot_product = self.as_ref().real.coords.dot(&other.as_ref().real.coords); + if dot_product < N::zero() { + -other.clone() + } else { + other.clone() + } + }; + + let difference = self.as_ref().conjugate() * other.as_ref(); + let norm_squared = difference.real.vector().norm_squared(); + if relative_eq!(norm_squared, N::zero(), epsilon = epsilon) { + return None; + } + + let inverse_norm_squared = N::one() / norm_squared; + let inverse_norm = inverse_norm_squared.sqrt(); + + let mut angle = two * difference.real.scalar().acos(); + let mut pitch = -two * difference.dual.scalar() * inverse_norm; + let direction = difference.real.vector() * inverse_norm; + let moment = (difference.dual.vector() + - direction * (pitch * difference.real.scalar() * half)) + * inverse_norm; + + angle *= t; + pitch *= t; + + let sin = (half * angle).sin(); + let cos = (half * angle).cos(); + let real = Quaternion::from_parts(cos, direction * sin); + let dual = Quaternion::from_parts( + -pitch * half * sin, + moment * sin + direction * (pitch * half * cos), + ); + + Some( + self * UnitDualQuaternion::new_unchecked(DualQuaternion::from_real_and_dual( + real, dual, + )), + ) + } + + /// Return the rotation part of this unit dual quaternion. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0) + /// ); + /// + /// assert_relative_eq!( + /// dq.rotation().angle(), std::f32::consts::FRAC_PI_4, epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn rotation(&self) -> UnitQuaternion { + Unit::new_unchecked(self.as_ref().real) + } + + /// Return the translation part of this unit dual quaternion. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_4, 0.0, 0.0) + /// ); + /// + /// assert_relative_eq!( + /// dq.translation().vector, Vector3::new(0.0, 3.0, 0.0), epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn translation(&self) -> Translation3 { + let two = N::one() + N::one(); + Translation3::from( + ((self.as_ref().dual * self.as_ref().real.conjugate()) * two) + .vector() + .into_owned(), + ) + } + + /// Builds an isometry from this unit dual quaternion. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3}; + /// let rotation = UnitQuaternion::from_euler_angles(std::f32::consts::PI, 0.0, 0.0); + /// let translation = Vector3::new(1.0, 3.0, 2.5); + /// let dq = UnitDualQuaternion::from_parts( + /// translation.into(), + /// rotation + /// ); + /// let iso = dq.to_isometry(); + /// + /// assert_relative_eq!(iso.rotation.angle(), std::f32::consts::PI, epsilon = 1.0e-6); + /// assert_relative_eq!(iso.translation.vector, translation, epsilon = 1.0e-6); + /// ``` + #[inline] + pub fn to_isometry(&self) -> Isometry3 { + Isometry3::from_parts(self.translation(), self.rotation()) + } + + /// Rotate and translate a point by this unit dual quaternion interpreted + /// as an isometry. + /// + /// This is the same as the multiplication `self * pt`. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let point = Point3::new(1.0, 2.0, 3.0); + /// + /// assert_relative_eq!( + /// dq.transform_point(&point), Point3::new(1.0, 0.0, 2.0), epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn transform_point(&self, pt: &Point3) -> Point3 { + self * pt + } + + /// Rotate a vector by this unit dual quaternion, ignoring the translational + /// component. + /// + /// This is the same as the multiplication `self * v`. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let vector = Vector3::new(1.0, 2.0, 3.0); + /// + /// assert_relative_eq!( + /// dq.transform_vector(&vector), Vector3::new(1.0, -3.0, 2.0), epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn transform_vector(&self, v: &Vector3) -> Vector3 { + self * v + } + + /// Rotate and translate a point by the inverse of this unit quaternion. + /// + /// This may be cheaper than inverting the unit dual quaternion and + /// transforming the point. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let point = Point3::new(1.0, 2.0, 3.0); + /// + /// assert_relative_eq!( + /// dq.inverse_transform_point(&point), Point3::new(1.0, 3.0, 1.0), epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn inverse_transform_point(&self, pt: &Point3) -> Point3 { + self.inverse() * pt + } + + /// Rotate a vector by the inverse of this unit quaternion, ignoring the + /// translational component. + /// + /// This may be cheaper than inverting the unit dual quaternion and + /// transforming the vector. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let vector = Vector3::new(1.0, 2.0, 3.0); + /// + /// assert_relative_eq!( + /// dq.inverse_transform_vector(&vector), Vector3::new(1.0, 3.0, -2.0), epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn inverse_transform_vector(&self, v: &Vector3) -> Vector3 { + self.inverse() * v + } + + /// Rotate a unit vector by the inverse of this unit quaternion, ignoring + /// the translational component. This may be + /// cheaper than inverting the unit dual quaternion and transforming the + /// vector. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Unit, Vector3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let vector = Unit::new_unchecked(Vector3::new(0.0, 1.0, 0.0)); + /// + /// assert_relative_eq!( + /// dq.inverse_transform_unit_vector(&vector), + /// Unit::new_unchecked(Vector3::new(0.0, 0.0, -1.0)), + /// epsilon = 1.0e-6 + /// ); + /// ``` + #[inline] + pub fn inverse_transform_unit_vector(&self, v: &Unit>) -> Unit> { + self.inverse() * v + } +} + +impl UnitDualQuaternion +where + N::Element: SimdRealField, +{ + /// Converts this unit dual quaternion interpreted as an isometry + /// into its equivalent homogeneous transformation matrix. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Matrix4, UnitDualQuaternion, UnitQuaternion, Vector3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(1.0, 3.0, 2.0).into(), + /// UnitQuaternion::from_axis_angle(&Vector3::z_axis(), std::f32::consts::FRAC_PI_6) + /// ); + /// let expected = Matrix4::new(0.8660254, -0.5, 0.0, 1.0, + /// 0.5, 0.8660254, 0.0, 3.0, + /// 0.0, 0.0, 1.0, 2.0, + /// 0.0, 0.0, 0.0, 1.0); + /// + /// assert_relative_eq!(dq.to_homogeneous(), expected, epsilon = 1.0e-6); + /// ``` + #[inline] + pub fn to_homogeneous(&self) -> Matrix4 { + self.to_isometry().to_homogeneous() + } +} + +impl Default for UnitDualQuaternion { + fn default() -> Self { + Self::identity() + } +} + +impl fmt::Display for UnitDualQuaternion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(axis) = self.rotation().axis() { + let axis = axis.into_inner(); + write!( + f, + "UnitDualQuaternion translation: {} − angle: {} − axis: ({}, {}, {})", + self.translation().vector, + self.rotation().angle(), + axis[0], + axis[1], + axis[2] + ) + } else { + write!( + f, + "UnitDualQuaternion translation: {} − angle: {} − axis: (undefined)", + self.translation().vector, + self.rotation().angle() + ) + } + } +} + +impl> AbsDiffEq for UnitDualQuaternion { + type Epsilon = N; + + #[inline] + fn default_epsilon() -> Self::Epsilon { + N::default_epsilon() + } + + #[inline] + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.as_ref().abs_diff_eq(other.as_ref(), epsilon) + } +} + +impl> RelativeEq for UnitDualQuaternion { + #[inline] + fn default_max_relative() -> Self::Epsilon { + N::default_max_relative() + } + + #[inline] + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.as_ref() + .relative_eq(other.as_ref(), epsilon, max_relative) + } +} + +impl> UlpsEq for UnitDualQuaternion { + #[inline] + fn default_max_ulps() -> u32 { + N::default_max_ulps() + } + + #[inline] + fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool { + self.as_ref().ulps_eq(other.as_ref(), epsilon, max_ulps) + } +} diff --git a/src/geometry/dual_quaternion_alga.rs b/src/geometry/dual_quaternion_alga.rs new file mode 100644 index 00000000..f6ee9e10 --- /dev/null +++ b/src/geometry/dual_quaternion_alga.rs @@ -0,0 +1,324 @@ +use num::Zero; + +use alga::general::{ + AbstractGroup, AbstractGroupAbelian, AbstractLoop, AbstractMagma, AbstractModule, + AbstractMonoid, AbstractQuasigroup, AbstractSemigroup, Additive, Id, Identity, Module, + Multiplicative, RealField, TwoSidedInverse, +}; +use alga::linear::{ + AffineTransformation, DirectIsometry, FiniteDimVectorSpace, Isometry, NormedSpace, + ProjectiveTransformation, Similarity, Transformation, VectorSpace, +}; + +use crate::base::Vector3; +use crate::geometry::{ + DualQuaternion, Point3, Quaternion, Translation3, UnitDualQuaternion, UnitQuaternion, +}; + +impl Identity for DualQuaternion { + #[inline] + fn identity() -> Self { + Self::identity() + } +} + +impl Identity for DualQuaternion { + #[inline] + fn identity() -> Self { + Self::zero() + } +} + +impl AbstractMagma for DualQuaternion { + #[inline] + fn operate(&self, rhs: &Self) -> Self { + self * rhs + } +} + +impl AbstractMagma for DualQuaternion { + #[inline] + fn operate(&self, rhs: &Self) -> Self { + self + rhs + } +} + +impl TwoSidedInverse for DualQuaternion { + #[inline] + fn two_sided_inverse(&self) -> Self { + -self + } +} + +macro_rules! impl_structures( + ($DualQuaternion: ident; $($marker: ident<$operator: ident>),* $(,)*) => {$( + impl $marker<$operator> for $DualQuaternion { } + )*} +); + +impl_structures!( + DualQuaternion; + AbstractSemigroup, + AbstractMonoid, + + AbstractSemigroup, + AbstractQuasigroup, + AbstractMonoid, + AbstractLoop, + AbstractGroup, + AbstractGroupAbelian +); + +/* + * + * Vector space. + * + */ +impl AbstractModule for DualQuaternion { + type AbstractRing = N; + + #[inline] + fn multiply_by(&self, n: N) -> Self { + self * n + } +} + +impl Module for DualQuaternion { + type Ring = N; +} + +impl VectorSpace for DualQuaternion { + type Field = N; +} + +impl FiniteDimVectorSpace for DualQuaternion { + #[inline] + fn dimension() -> usize { + 8 + } + + #[inline] + fn canonical_basis_element(i: usize) -> Self { + if i < 4 { + DualQuaternion::from_real_and_dual( + Quaternion::canonical_basis_element(i), + Quaternion::zero(), + ) + } else { + DualQuaternion::from_real_and_dual( + Quaternion::zero(), + Quaternion::canonical_basis_element(i - 4), + ) + } + } + + #[inline] + fn dot(&self, other: &Self) -> N { + self.real.dot(&other.real) + self.dual.dot(&other.dual) + } + + #[inline] + unsafe fn component_unchecked(&self, i: usize) -> &N { + self.as_ref().get_unchecked(i) + } + + #[inline] + unsafe fn component_unchecked_mut(&mut self, i: usize) -> &mut N { + self.as_mut().get_unchecked_mut(i) + } +} + +impl NormedSpace for DualQuaternion { + type RealField = N; + type ComplexField = N; + + #[inline] + fn norm_squared(&self) -> N { + self.real.norm_squared() + } + + #[inline] + fn norm(&self) -> N { + self.real.norm() + } + + #[inline] + fn normalize(&self) -> Self { + self.normalize() + } + + #[inline] + fn normalize_mut(&mut self) -> N { + self.normalize_mut() + } + + #[inline] + fn try_normalize(&self, min_norm: N) -> Option { + let real_norm = self.real.norm(); + if real_norm > min_norm { + Some(Self::from_real_and_dual( + self.real / real_norm, + self.dual / real_norm, + )) + } else { + None + } + } + + #[inline] + fn try_normalize_mut(&mut self, min_norm: N) -> Option { + let real_norm = self.real.norm(); + if real_norm > min_norm { + self.real /= real_norm; + self.dual /= real_norm; + Some(real_norm) + } else { + None + } + } +} + +/* + * + * Implementations for UnitDualQuaternion. + * + */ +impl Identity for UnitDualQuaternion { + #[inline] + fn identity() -> Self { + Self::identity() + } +} + +impl AbstractMagma + for UnitDualQuaternion +{ + #[inline] + fn operate(&self, rhs: &Self) -> Self { + self * rhs + } +} + +impl TwoSidedInverse + for UnitDualQuaternion +{ + #[inline] + fn two_sided_inverse(&self) -> Self { + self.inverse() + } + + #[inline] + fn two_sided_inverse_mut(&mut self) { + self.inverse_mut() + } +} + +impl_structures!( + UnitDualQuaternion; + AbstractSemigroup, + AbstractQuasigroup, + AbstractMonoid, + AbstractLoop, + AbstractGroup +); + +impl Transformation> for UnitDualQuaternion { + #[inline] + fn transform_point(&self, pt: &Point3) -> Point3 { + self.transform_point(pt) + } + + #[inline] + fn transform_vector(&self, v: &Vector3) -> Vector3 { + self.transform_vector(v) + } +} + +impl ProjectiveTransformation> + for UnitDualQuaternion +{ + #[inline] + fn inverse_transform_point(&self, pt: &Point3) -> Point3 { + self.inverse_transform_point(pt) + } + + #[inline] + fn inverse_transform_vector(&self, v: &Vector3) -> Vector3 { + self.inverse_transform_vector(v) + } +} + +impl AffineTransformation> + for UnitDualQuaternion +{ + type Rotation = UnitQuaternion; + type NonUniformScaling = Id; + type Translation = Translation3; + + #[inline] + fn decompose(&self) -> (Self::Translation, Self::Rotation, Id, Self::Rotation) { + ( + self.translation(), + self.rotation(), + Id::new(), + UnitQuaternion::identity(), + ) + } + + #[inline] + fn append_translation(&self, translation: &Self::Translation) -> Self { + self * Self::from_parts(translation.clone(), UnitQuaternion::identity()) + } + + #[inline] + fn prepend_translation(&self, translation: &Self::Translation) -> Self { + Self::from_parts(translation.clone(), UnitQuaternion::identity()) * self + } + + #[inline] + fn append_rotation(&self, r: &Self::Rotation) -> Self { + r * self + } + + #[inline] + fn prepend_rotation(&self, r: &Self::Rotation) -> Self { + self * r + } + + #[inline] + fn append_scaling(&self, _: &Self::NonUniformScaling) -> Self { + self.clone() + } + + #[inline] + fn prepend_scaling(&self, _: &Self::NonUniformScaling) -> Self { + self.clone() + } +} + +impl Similarity> for UnitDualQuaternion { + type Scaling = Id; + + #[inline] + fn translation(&self) -> Translation3 { + self.translation() + } + + #[inline] + fn rotation(&self) -> UnitQuaternion { + self.rotation() + } + + #[inline] + fn scaling(&self) -> Id { + Id::new() + } +} + +macro_rules! marker_impl( + ($($Trait: ident),*) => {$( + impl $Trait> for UnitDualQuaternion { } + )*} +); + +marker_impl!(Isometry, DirectIsometry); diff --git a/src/geometry/dual_quaternion_construction.rs b/src/geometry/dual_quaternion_construction.rs index 25a979f7..739972a9 100644 --- a/src/geometry/dual_quaternion_construction.rs +++ b/src/geometry/dual_quaternion_construction.rs @@ -1,6 +1,12 @@ -use crate::{DualQuaternion, Quaternion, SimdRealField}; +use crate::{ + DualQuaternion, Isometry3, Quaternion, Scalar, SimdRealField, Translation3, UnitDualQuaternion, + UnitQuaternion, +}; +use num::{One, Zero}; +#[cfg(feature = "arbitrary")] +use quickcheck::{Arbitrary, Gen}; -impl DualQuaternion { +impl DualQuaternion { /// Creates a dual quaternion from its rotation and translation components. /// /// # Example @@ -16,7 +22,8 @@ impl DualQuaternion { pub fn from_real_and_dual(real: Quaternion, dual: Quaternion) -> Self { Self { real, dual } } - /// The dual quaternion multiplicative identity + + /// The dual quaternion multiplicative identity. /// /// # Example /// @@ -33,10 +40,183 @@ impl DualQuaternion { /// assert_eq!(dq2 * dq1, dq2); /// ``` #[inline] - pub fn identity() -> Self { + pub fn identity() -> Self + where + N: SimdRealField, + { Self::from_real_and_dual( Quaternion::from_real(N::one()), Quaternion::from_real(N::zero()), ) } } + +impl DualQuaternion +where + N::Element: SimdRealField, +{ + /// Creates a dual quaternion from only its real part, with no translation + /// component. + /// + /// # Example + /// ``` + /// # use nalgebra::{DualQuaternion, Quaternion}; + /// let rot = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// + /// let dq = DualQuaternion::from_real(rot); + /// assert_eq!(dq.real.w, 1.0); + /// assert_eq!(dq.dual.w, 0.0); + /// ``` + #[inline] + pub fn from_real(real: Quaternion) -> Self { + Self { + real, + dual: Quaternion::zero(), + } + } +} + +impl One for DualQuaternion +where + N::Element: SimdRealField, +{ + #[inline] + fn one() -> Self { + Self::identity() + } +} + +impl Zero for DualQuaternion +where + N::Element: SimdRealField, +{ + #[inline] + fn zero() -> Self { + DualQuaternion::from_real_and_dual(Quaternion::zero(), Quaternion::zero()) + } + + #[inline] + fn is_zero(&self) -> bool { + self.real.is_zero() && self.dual.is_zero() + } +} + +#[cfg(feature = "arbitrary")] +impl Arbitrary for DualQuaternion +where + N: SimdRealField + Arbitrary + Send, + N::Element: SimdRealField, +{ + #[inline] + fn arbitrary(rng: &mut Gen) -> Self { + Self::from_real_and_dual(Arbitrary::arbitrary(rng), Arbitrary::arbitrary(rng)) + } +} + +impl UnitDualQuaternion { + /// The unit dual quaternion multiplicative identity, which also represents + /// the identity transformation as an isometry. + /// + /// ``` + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3}; + /// let ident = UnitDualQuaternion::identity(); + /// let point = Point3::new(1.0, -4.3, 3.33); + /// + /// assert_eq!(ident * point, point); + /// assert_eq!(ident, ident.inverse()); + /// ``` + #[inline] + pub fn identity() -> Self { + Self::new_unchecked(DualQuaternion::identity()) + } +} + +impl UnitDualQuaternion +where + N::Element: SimdRealField, +{ + /// Return a dual quaternion representing the translation and orientation + /// given by the provided rotation quaternion and translation vector. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitDualQuaternion, UnitQuaternion, Vector3, Point3}; + /// let dq = UnitDualQuaternion::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let point = Point3::new(1.0, 2.0, 3.0); + /// + /// assert_relative_eq!(dq * point, Point3::new(1.0, 0.0, 2.0), epsilon = 1.0e-6); + /// ``` + #[inline] + pub fn from_parts(translation: Translation3, rotation: UnitQuaternion) -> Self { + let half: N = crate::convert(0.5f64); + UnitDualQuaternion::new_unchecked(DualQuaternion { + real: rotation.clone().into_inner(), + dual: Quaternion::from_parts(N::zero(), translation.vector) + * rotation.clone().into_inner() + * half, + }) + } + + /// Return a unit dual quaternion representing the translation and orientation + /// given by the provided isometry. + /// + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{Isometry3, UnitDualQuaternion, UnitQuaternion, Vector3, Point3}; + /// let iso = Isometry3::from_parts( + /// Vector3::new(0.0, 3.0, 0.0).into(), + /// UnitQuaternion::from_euler_angles(std::f32::consts::FRAC_PI_2, 0.0, 0.0) + /// ); + /// let dq = UnitDualQuaternion::from_isometry(&iso); + /// let point = Point3::new(1.0, 2.0, 3.0); + /// + /// assert_relative_eq!(dq * point, iso * point, epsilon = 1.0e-6); + /// ``` + #[inline] + pub fn from_isometry(isometry: &Isometry3) -> Self { + UnitDualQuaternion::from_parts(isometry.translation, isometry.rotation) + } + + /// Creates a dual quaternion from a unit quaternion rotation. + /// + /// # Example + /// ``` + /// # #[macro_use] extern crate approx; + /// # use nalgebra::{UnitQuaternion, UnitDualQuaternion, Quaternion}; + /// let q = Quaternion::new(1.0, 2.0, 3.0, 4.0); + /// let rot = UnitQuaternion::new_normalize(q); + /// + /// let dq = UnitDualQuaternion::from_rotation(rot); + /// assert_relative_eq!(dq.as_ref().real.norm(), 1.0, epsilon = 1.0e-6); + /// assert_eq!(dq.as_ref().dual.norm(), 0.0); + /// ``` + #[inline] + pub fn from_rotation(rotation: UnitQuaternion) -> Self { + Self::new_unchecked(DualQuaternion::from_real(rotation.into_inner())) + } +} + +impl One for UnitDualQuaternion +where + N::Element: SimdRealField, +{ + #[inline] + fn one() -> Self { + Self::identity() + } +} + +#[cfg(feature = "arbitrary")] +impl Arbitrary for UnitDualQuaternion +where + N: SimdRealField + Arbitrary + Send, + N::Element: SimdRealField, +{ + #[inline] + fn arbitrary(rng: &mut Gen) -> Self { + Self::new_normalize(Arbitrary::arbitrary(rng)) + } +} diff --git a/src/geometry/dual_quaternion_conversion.rs b/src/geometry/dual_quaternion_conversion.rs new file mode 100644 index 00000000..20c6895a --- /dev/null +++ b/src/geometry/dual_quaternion_conversion.rs @@ -0,0 +1,188 @@ +use simba::scalar::{RealField, SubsetOf, SupersetOf}; +use simba::simd::SimdRealField; + +use crate::base::dimension::U3; +use crate::base::{Matrix4, Vector4}; +use crate::geometry::{ + DualQuaternion, Isometry3, Similarity3, SuperTCategoryOf, TAffine, Transform, Translation3, + UnitDualQuaternion, UnitQuaternion, +}; + +/* + * This file provides the following conversions: + * ============================================= + * + * DualQuaternion -> DualQuaternion + * UnitDualQuaternion -> UnitDualQuaternion + * UnitDualQuaternion -> Isometry + * UnitDualQuaternion -> Similarity + * UnitDualQuaternion -> Transform + * UnitDualQuaternion -> Matrix (homogeneous) + * + * NOTE: + * UnitDualQuaternion -> DualQuaternion is already provided by: Unit -> T + */ + +impl SubsetOf> for DualQuaternion +where + N1: SimdRealField, + N2: SimdRealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> DualQuaternion { + DualQuaternion::from_real_and_dual(self.real.to_superset(), self.dual.to_superset()) + } + + #[inline] + fn is_in_subset(dq: &DualQuaternion) -> bool { + crate::is_convertible::<_, Vector4>(&dq.real.coords) + && crate::is_convertible::<_, Vector4>(&dq.dual.coords) + } + + #[inline] + fn from_superset_unchecked(dq: &DualQuaternion) -> Self { + DualQuaternion::from_real_and_dual( + dq.real.to_subset_unchecked(), + dq.dual.to_subset_unchecked(), + ) + } +} + +impl SubsetOf> for UnitDualQuaternion +where + N1: SimdRealField, + N2: SimdRealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> UnitDualQuaternion { + UnitDualQuaternion::new_unchecked(self.as_ref().to_superset()) + } + + #[inline] + fn is_in_subset(dq: &UnitDualQuaternion) -> bool { + crate::is_convertible::<_, DualQuaternion>(dq.as_ref()) + } + + #[inline] + fn from_superset_unchecked(dq: &UnitDualQuaternion) -> Self { + Self::new_unchecked(crate::convert_ref_unchecked(dq.as_ref())) + } +} + +impl SubsetOf> for UnitDualQuaternion +where + N1: RealField, + N2: RealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> Isometry3 { + let dq: UnitDualQuaternion = self.to_superset(); + let iso = dq.to_isometry(); + crate::convert_unchecked(iso) + } + + #[inline] + fn is_in_subset(iso: &Isometry3) -> bool { + crate::is_convertible::<_, UnitQuaternion>(&iso.rotation) + && crate::is_convertible::<_, Translation3>(&iso.translation) + } + + #[inline] + fn from_superset_unchecked(iso: &Isometry3) -> Self { + let dq = UnitDualQuaternion::::from_isometry(iso); + crate::convert_unchecked(dq) + } +} + +impl SubsetOf> for UnitDualQuaternion +where + N1: RealField, + N2: RealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> Similarity3 { + Similarity3::from_isometry(crate::convert_ref(self), N2::one()) + } + + #[inline] + fn is_in_subset(sim: &Similarity3) -> bool { + sim.scaling() == N2::one() + } + + #[inline] + fn from_superset_unchecked(sim: &Similarity3) -> Self { + crate::convert_ref_unchecked(&sim.isometry) + } +} + +impl SubsetOf> for UnitDualQuaternion +where + N1: RealField, + N2: RealField + SupersetOf, + C: SuperTCategoryOf, +{ + #[inline] + fn to_superset(&self) -> Transform { + Transform::from_matrix_unchecked(self.to_homogeneous().to_superset()) + } + + #[inline] + fn is_in_subset(t: &Transform) -> bool { + >::is_in_subset(t.matrix()) + } + + #[inline] + fn from_superset_unchecked(t: &Transform) -> Self { + Self::from_superset_unchecked(t.matrix()) + } +} + +impl> SubsetOf> + for UnitDualQuaternion +{ + #[inline] + fn to_superset(&self) -> Matrix4 { + self.to_homogeneous().to_superset() + } + + #[inline] + fn is_in_subset(m: &Matrix4) -> bool { + crate::is_convertible::<_, Isometry3>(m) + } + + #[inline] + fn from_superset_unchecked(m: &Matrix4) -> Self { + let iso: Isometry3 = crate::convert_ref_unchecked(m); + Self::from_isometry(&iso) + } +} + +impl From> for Matrix4 +where + N::Element: SimdRealField, +{ + #[inline] + fn from(dq: UnitDualQuaternion) -> Self { + dq.to_homogeneous() + } +} + +impl From> for Isometry3 +where + N::Element: SimdRealField, +{ + #[inline] + fn from(dq: UnitDualQuaternion) -> Self { + dq.to_isometry() + } +} + +impl From> for UnitDualQuaternion +where + N::Element: SimdRealField, +{ + #[inline] + fn from(iso: Isometry3) -> Self { + Self::from_isometry(&iso) + } +} diff --git a/src/geometry/dual_quaternion_ops.rs b/src/geometry/dual_quaternion_ops.rs index 0c9f78f4..44d36b97 100644 --- a/src/geometry/dual_quaternion_ops.rs +++ b/src/geometry/dual_quaternion_ops.rs @@ -10,10 +10,32 @@ * * (Assignment Operators) * + * -DualQuaternion * DualQuaternion × Scalar * DualQuaternion × DualQuaternion * DualQuaternion + DualQuaternion * DualQuaternion - DualQuaternion + * DualQuaternion × UnitDualQuaternion + * DualQuaternion ÷ UnitDualQuaternion + * -UnitDualQuaternion + * UnitDualQuaternion × DualQuaternion + * UnitDualQuaternion × UnitDualQuaternion + * UnitDualQuaternion ÷ UnitDualQuaternion + * UnitDualQuaternion × Translation3 + * UnitDualQuaternion ÷ Translation3 + * UnitDualQuaternion × UnitQuaternion + * UnitDualQuaternion ÷ UnitQuaternion + * Translation3 × UnitDualQuaternion + * Translation3 ÷ UnitDualQuaternion + * UnitQuaternion × UnitDualQuaternion + * UnitQuaternion ÷ UnitDualQuaternion + * UnitDualQuaternion × Isometry3 + * UnitDualQuaternion ÷ Isometry3 + * Isometry3 × UnitDualQuaternion + * Isometry3 ÷ UnitDualQuaternion + * UnitDualQuaternion × Point + * UnitDualQuaternion × Vector + * UnitDualQuaternion × Unit * * --- * @@ -22,9 +44,16 @@ * - https://cs.gmu.edu/~jmlien/teaching/cs451/uploads/Main/dual-quaternion.pdf */ -use crate::{DualQuaternion, SimdRealField}; +use crate::base::storage::Storage; +use crate::{ + Allocator, DefaultAllocator, DualQuaternion, Isometry3, Point, Point3, Quaternion, + SimdRealField, Translation3, Unit, UnitDualQuaternion, UnitQuaternion, Vector, Vector3, U1, U3, + U4, +}; use std::mem; -use std::ops::{Add, Index, IndexMut, Mul, Sub}; +use std::ops::{ + Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign, +}; impl AsRef<[N; 8]> for DualQuaternion { #[inline] @@ -56,49 +85,1175 @@ impl IndexMut for DualQuaternion { } } -impl Mul> for DualQuaternion +impl Neg for DualQuaternion where N::Element: SimdRealField, { type Output = DualQuaternion; - fn mul(self, rhs: Self) -> Self::Output { - Self::from_real_and_dual( - self.real * rhs.real, - self.real * rhs.dual + self.dual * rhs.real, + #[inline] + fn neg(self) -> Self::Output { + DualQuaternion::from_real_and_dual(-self.real, -self.dual) + } +} + +impl<'a, N: SimdRealField> Neg for &'a DualQuaternion +where + N::Element: SimdRealField, +{ + type Output = DualQuaternion; + + #[inline] + fn neg(self) -> Self::Output { + DualQuaternion::from_real_and_dual(-&self.real, -&self.dual) + } +} + +impl Neg for UnitDualQuaternion +where + N::Element: SimdRealField, +{ + type Output = UnitDualQuaternion; + + #[inline] + fn neg(self) -> Self::Output { + UnitDualQuaternion::new_unchecked(-self.into_inner()) + } +} + +impl<'a, N: SimdRealField> Neg for &'a UnitDualQuaternion +where + N::Element: SimdRealField, +{ + type Output = UnitDualQuaternion; + + #[inline] + fn neg(self) -> Self::Output { + UnitDualQuaternion::new_unchecked(-self.as_ref()) + } +} + +macro_rules! dual_quaternion_op_impl( + ($Op: ident, $op: ident; + ($LhsRDim: ident, $LhsCDim: ident), ($RhsRDim: ident, $RhsCDim: ident) + $(for $Storage: ident: $StoragesBound: ident $(<$($BoundParam: ty),*>)*),*; + $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty, Output = $Result: ty $(=> $VDimA: ty, $VDimB: ty)*; + $action: expr; $($lives: tt),*) => { + impl<$($lives ,)* N: SimdRealField $(, $Storage: $StoragesBound $(<$($BoundParam),*>)*)*> $Op<$Rhs> for $Lhs + where N::Element: SimdRealField, + DefaultAllocator: Allocator + + Allocator { + type Output = $Result; + + #[inline] + fn $op($lhs, $rhs: $Rhs) -> Self::Output { + $action + } + } + } +); + +// DualQuaternion + DualQuaternion +dual_quaternion_op_impl!( + Add, add; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: &'b DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + &self.real + &rhs.real, + &self.dual + &rhs.dual, + ); + 'a, 'b); + +dual_quaternion_op_impl!( + Add, add; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + &self.real + rhs.real, + &self.dual + rhs.dual, + ); + 'a); + +dual_quaternion_op_impl!( + Add, add; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + self.real + &rhs.real, + self.dual + &rhs.dual, + ); + 'b); + +dual_quaternion_op_impl!( + Add, add; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + self.real + rhs.real, + self.dual + rhs.dual, + ); ); + +// DualQuaternion - DualQuaternion +dual_quaternion_op_impl!( + Sub, sub; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: &'b DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + &self.real - &rhs.real, + &self.dual - &rhs.dual, + ); + 'a, 'b); + +dual_quaternion_op_impl!( + Sub, sub; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + &self.real - rhs.real, + &self.dual - rhs.dual, + ); + 'a); + +dual_quaternion_op_impl!( + Sub, sub; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + self.real - &rhs.real, + self.dual - &rhs.dual, + ); + 'b); + +dual_quaternion_op_impl!( + Sub, sub; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + self.real - rhs.real, + self.dual - rhs.dual, + ); ); + +// DualQuaternion × DualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: &'b DualQuaternion, Output = DualQuaternion; + DualQuaternion::from_real_and_dual( + &self.real * &rhs.real, + &self.real * &rhs.dual + &self.dual * &rhs.real, + ); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: DualQuaternion, Output = DualQuaternion; + self * &rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b DualQuaternion, Output = DualQuaternion; + &self * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: DualQuaternion, Output = DualQuaternion; + &self * &rhs; ); + +// DualQuaternion × UnitDualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: &'b UnitDualQuaternion, Output = DualQuaternion; + self * rhs.dual_quaternion(); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: UnitDualQuaternion, Output = DualQuaternion; + self * rhs.dual_quaternion(); + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b UnitDualQuaternion, Output = DualQuaternion; + self * rhs.dual_quaternion(); + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: UnitDualQuaternion, Output = DualQuaternion; + self * rhs.dual_quaternion();); + +// DualQuaternion ÷ UnitDualQuaternion +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: &'b UnitDualQuaternion, Output = DualQuaternion; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * rhs.inverse().dual_quaternion() }; + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a DualQuaternion, rhs: UnitDualQuaternion, Output = DualQuaternion; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * rhs.inverse().dual_quaternion() }; + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b UnitDualQuaternion, Output = DualQuaternion; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * rhs.inverse().dual_quaternion() }; + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: UnitDualQuaternion, Output = DualQuaternion; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * rhs.inverse().dual_quaternion() };); + +// UnitDualQuaternion × UnitDualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: &'b UnitDualQuaternion, Output = UnitDualQuaternion; + UnitDualQuaternion::new_unchecked(self.as_ref() * rhs.as_ref()); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: UnitDualQuaternion, Output = UnitDualQuaternion; + self * &rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitDualQuaternion, Output = UnitDualQuaternion; + &self * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitDualQuaternion, Output = UnitDualQuaternion; + &self * &rhs; ); + +// UnitDualQuaternion ÷ UnitDualQuaternion +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: &'b UnitDualQuaternion, Output = UnitDualQuaternion; + #[allow(clippy::suspicious_arithmetic_impl)] { self * rhs.inverse() }; + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: UnitDualQuaternion, Output = UnitDualQuaternion; + self / &rhs; + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitDualQuaternion, Output = UnitDualQuaternion; + &self / rhs; + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitDualQuaternion, Output = UnitDualQuaternion; + &self / &rhs; ); + +// UnitDualQuaternion × DualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: &'b DualQuaternion, + Output = DualQuaternion => U1, U4; + self.dual_quaternion() * rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: DualQuaternion, + Output = DualQuaternion => U3, U3; + self.dual_quaternion() * rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b DualQuaternion, + Output = DualQuaternion => U3, U3; + self.dual_quaternion() * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: DualQuaternion, + Output = DualQuaternion => U3, U3; + self.dual_quaternion() * rhs;); + +// UnitDualQuaternion × UnitQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: &'b UnitQuaternion, + Output = UnitDualQuaternion => U1, U4; + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: UnitQuaternion, + Output = UnitDualQuaternion => U3, U3; + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitQuaternion, + Output = UnitDualQuaternion => U3, U3; + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner())); + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitQuaternion, + Output = UnitDualQuaternion => U3, U3; + self * UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(rhs.into_inner()));); + +// UnitQuaternion × UnitDualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitQuaternion, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U1, U4; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: &'a UnitQuaternion, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U3; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitQuaternion, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U3; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U4, U1); + self: UnitQuaternion, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U3; + UnitDualQuaternion::::new_unchecked(DualQuaternion::from_real(self.into_inner())) * rhs;); + +// UnitDualQuaternion ÷ UnitQuaternion +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: &'b UnitQuaternion, + Output = UnitDualQuaternion => U1, U4; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_rotation(rhs.inverse()) }; + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a UnitDualQuaternion, rhs: UnitQuaternion, + Output = UnitDualQuaternion => U3, U3; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_rotation(rhs.inverse()) }; + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitQuaternion, + Output = UnitDualQuaternion => U3, U3; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_rotation(rhs.inverse()) }; + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitQuaternion, + Output = UnitDualQuaternion => U3, U3; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_rotation(rhs.inverse()) };); + +// UnitQuaternion ÷ UnitDualQuaternion +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a UnitQuaternion, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U1, U4; + #[allow(clippy::suspicious_arithmetic_impl)] + { + UnitDualQuaternion::::new_unchecked( + DualQuaternion::from_real(self.into_inner()) + ) * rhs.inverse() + }; 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: &'a UnitQuaternion, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U3; + #[allow(clippy::suspicious_arithmetic_impl)] + { + UnitDualQuaternion::::new_unchecked( + DualQuaternion::from_real(self.into_inner()) + ) * rhs.inverse() + }; 'a); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: UnitQuaternion, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U3; + #[allow(clippy::suspicious_arithmetic_impl)] + { + UnitDualQuaternion::::new_unchecked( + DualQuaternion::from_real(self.into_inner()) + ) * rhs.inverse() + }; 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U4, U1); + self: UnitQuaternion, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U3; + #[allow(clippy::suspicious_arithmetic_impl)] + { + UnitDualQuaternion::::new_unchecked( + DualQuaternion::from_real(self.into_inner()) + ) * rhs.inverse() + };); + +// UnitDualQuaternion × Translation3 +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1); + self: &'a UnitDualQuaternion, rhs: &'b Translation3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_parts(rhs.clone(), UnitQuaternion::identity()); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U3); + self: &'a UnitDualQuaternion, rhs: Translation3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_parts(rhs, UnitQuaternion::identity()); + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: &'b Translation3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_parts(rhs.clone(), UnitQuaternion::identity()); + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: Translation3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_parts(rhs, UnitQuaternion::identity()); ); + +// UnitDualQuaternion ÷ Translation3 +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U1); + self: &'a UnitDualQuaternion, rhs: &'b Translation3, + Output = UnitDualQuaternion => U3, U1; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_parts(rhs.inverse(), UnitQuaternion::identity()) }; + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U3); + self: &'a UnitDualQuaternion, rhs: Translation3, + Output = UnitDualQuaternion => U3, U1; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_parts(rhs.inverse(), UnitQuaternion::identity()) }; + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: &'b Translation3, + Output = UnitDualQuaternion => U3, U1; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_parts(rhs.inverse(), UnitQuaternion::identity()) }; + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: Translation3, + Output = UnitDualQuaternion => U3, U1; + #[allow(clippy::suspicious_arithmetic_impl)] + { self * UnitDualQuaternion::::from_parts(rhs.inverse(), UnitQuaternion::identity()) };); + +// Translation3 × UnitDualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: &'b Translation3, rhs: &'a UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) * rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: &'a Translation3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) * rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: Translation3, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self, UnitQuaternion::identity()) * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: Translation3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self, UnitQuaternion::identity()) * rhs;); + +// Translation3 ÷ UnitDualQuaternion +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: &'b Translation3, rhs: &'a UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) / rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: &'a Translation3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()) / rhs; + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: Translation3, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self, UnitQuaternion::identity()) / rhs; + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: Translation3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_parts(self, UnitQuaternion::identity()) / rhs;); + +// UnitDualQuaternion × Isometry3 +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1); + self: &'a UnitDualQuaternion, rhs: &'b Isometry3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_isometry(rhs); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U3); + self: &'a UnitDualQuaternion, rhs: Isometry3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_isometry(&rhs); + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: &'b Isometry3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_isometry(rhs); + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: Isometry3, + Output = UnitDualQuaternion => U3, U1; + self * UnitDualQuaternion::::from_isometry(&rhs); ); + +// UnitDualQuaternion ÷ Isometry3 +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U1); + self: &'a UnitDualQuaternion, rhs: &'b Isometry3, + Output = UnitDualQuaternion => U3, U1; + // TODO: can we avoid the conversion to a rotation matrix? + self / UnitDualQuaternion::::from_isometry(rhs); + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U3); + self: &'a UnitDualQuaternion, rhs: Isometry3, + Output = UnitDualQuaternion => U3, U1; + self / UnitDualQuaternion::::from_isometry(&rhs); + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: &'b Isometry3, + Output = UnitDualQuaternion => U3, U1; + self / UnitDualQuaternion::::from_isometry(rhs); + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U4, U1), (U3, U3); + self: UnitDualQuaternion, rhs: Isometry3, + Output = UnitDualQuaternion => U3, U1; + self / UnitDualQuaternion::::from_isometry(&rhs); ); + +// Isometry × UnitDualQuaternion +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: &'a Isometry3, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(self) * rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: &'a Isometry3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(self) * rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: Isometry3, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(&self) * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U3, U1), (U4, U1); + self: Isometry3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(&self) * rhs; ); + +// Isometry ÷ UnitDualQuaternion +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: &'a Isometry3, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + // TODO: can we avoid the conversion from a rotation matrix? + UnitDualQuaternion::::from_isometry(self) / rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: &'a Isometry3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(self) / rhs; + 'a); + +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: Isometry3, rhs: &'b UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(&self) / rhs; + 'b); + +dual_quaternion_op_impl!( + Div, div; + (U3, U1), (U4, U1); + self: Isometry3, rhs: UnitDualQuaternion, + Output = UnitDualQuaternion => U3, U1; + UnitDualQuaternion::::from_isometry(&self) / rhs; ); + +// UnitDualQuaternion × Vector +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: &'a UnitDualQuaternion, rhs: &'b Vector, + Output = Vector3 => U3, U1; + Unit::new_unchecked(self.as_ref().real) * rhs; + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: &'a UnitDualQuaternion, rhs: Vector, + Output = Vector3 => U3, U1; + self * &rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: UnitDualQuaternion, rhs: &'b Vector, + Output = Vector3 => U3, U1; + &self * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: UnitDualQuaternion, rhs: Vector, + Output = Vector3 => U3, U1; + &self * &rhs; ); + +// UnitDualQuaternion × Point +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1); + self: &'a UnitDualQuaternion, rhs: &'b Point3, + Output = Point3 => U3, U1; + { + let two: N = crate::convert(2.0f64); + let q_point = Quaternion::from_parts(N::zero(), rhs.coords.clone()); + Point::from( + ((self.as_ref().real * q_point + self.as_ref().dual * two) * self.as_ref().real.conjugate()) + .vector() + .into_owned(), ) + }; + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1); + self: &'a UnitDualQuaternion, rhs: Point3, + Output = Point3 => U3, U1; + self * &rhs; + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1); + self: UnitDualQuaternion, rhs: &'b Point3, + Output = Point3 => U3, U1; + &self * rhs; + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1); + self: UnitDualQuaternion, rhs: Point3, + Output = Point3 => U3, U1; + &self * &rhs; ); + +// UnitDualQuaternion × Unit +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: &'a UnitDualQuaternion, rhs: &'b Unit>, + Output = Unit> => U3, U4; + Unit::new_unchecked(self * rhs.as_ref()); + 'a, 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: &'a UnitDualQuaternion, rhs: Unit>, + Output = Unit> => U3, U4; + Unit::new_unchecked(self * rhs.into_inner()); + 'a); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: UnitDualQuaternion, rhs: &'b Unit>, + Output = Unit> => U3, U4; + Unit::new_unchecked(self * rhs.as_ref()); + 'b); + +dual_quaternion_op_impl!( + Mul, mul; + (U4, U1), (U3, U1) for SB: Storage ; + self: UnitDualQuaternion, rhs: Unit>, + Output = Unit> => U3, U4; + Unit::new_unchecked(self * rhs.into_inner()); ); + +macro_rules! left_scalar_mul_impl( + ($($T: ty),* $(,)*) => {$( + impl Mul> for $T { + type Output = DualQuaternion<$T>; + + #[inline] + fn mul(self, right: DualQuaternion<$T>) -> Self::Output { + DualQuaternion::from_real_and_dual( + self * right.real, + self * right.dual + ) + } + } + + impl<'b> Mul<&'b DualQuaternion<$T>> for $T { + type Output = DualQuaternion<$T>; + + #[inline] + fn mul(self, right: &'b DualQuaternion<$T>) -> Self::Output { + DualQuaternion::from_real_and_dual( + self * &right.real, + self * &right.dual + ) + } + } + )*} +); + +left_scalar_mul_impl!(f32, f64); + +macro_rules! dual_quaternion_op_impl( + ($OpAssign: ident, $op_assign: ident; + ($LhsRDim: ident, $LhsCDim: ident), ($RhsRDim: ident, $RhsCDim: ident); + $lhs: ident: $Lhs: ty, $rhs: ident: $Rhs: ty $(=> $VDimA: ty, $VDimB: ty)*; + $action: expr; $($lives: tt),*) => { + impl<$($lives ,)* N: SimdRealField> $OpAssign<$Rhs> for $Lhs + where N::Element: SimdRealField, + DefaultAllocator: Allocator + + Allocator { + + #[inline] + fn $op_assign(&mut $lhs, $rhs: $Rhs) { + $action + } + } } -} +); -impl Mul for DualQuaternion -where - N::Element: SimdRealField, -{ - type Output = DualQuaternion; +// DualQuaternion += DualQuaternion +dual_quaternion_op_impl!( + AddAssign, add_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b DualQuaternion; + { + self.real += &rhs.real; + self.dual += &rhs.dual; + }; + 'b); - fn mul(self, rhs: N) -> Self::Output { - Self::from_real_and_dual(self.real * rhs, self.dual * rhs) - } -} +dual_quaternion_op_impl!( + AddAssign, add_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: DualQuaternion; + { + self.real += rhs.real; + self.dual += rhs.dual; + };); -impl Add> for DualQuaternion -where - N::Element: SimdRealField, -{ - type Output = DualQuaternion; +// DualQuaternion -= DualQuaternion +dual_quaternion_op_impl!( + SubAssign, sub_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b DualQuaternion; + { + self.real -= &rhs.real; + self.dual -= &rhs.dual; + }; + 'b); - fn add(self, rhs: DualQuaternion) -> Self::Output { - Self::from_real_and_dual(self.real + rhs.real, self.dual + rhs.dual) - } -} +dual_quaternion_op_impl!( + SubAssign, sub_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: DualQuaternion; + { + self.real -= rhs.real; + self.dual -= rhs.dual; + };); -impl Sub> for DualQuaternion -where - N::Element: SimdRealField, -{ - type Output = DualQuaternion; +// DualQuaternion ×= DualQuaternion +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b DualQuaternion; + { + let res = &*self * rhs; + self.real.coords.copy_from(&res.real.coords); + self.dual.coords.copy_from(&res.dual.coords); + }; + 'b); - fn sub(self, rhs: DualQuaternion) -> Self::Output { - Self::from_real_and_dual(self.real - rhs.real, self.dual - rhs.dual) - } -} +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: DualQuaternion; + *self *= &rhs;); + +// DualQuaternion ×= UnitDualQuaternion +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b UnitDualQuaternion; + { + let res = &*self * rhs; + self.real.coords.copy_from(&res.real.coords); + self.dual.coords.copy_from(&res.dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: UnitDualQuaternion; + *self *= &rhs; ); + +// DualQuaternion ÷= UnitDualQuaternion +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: &'b UnitDualQuaternion; + { + let res = &*self / rhs; + self.real.coords.copy_from(&res.real.coords); + self.dual.coords.copy_from(&res.dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: DualQuaternion, rhs: UnitDualQuaternion; + *self /= &rhs; ); + +// UnitDualQuaternion ×= UnitDualQuaternion +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitDualQuaternion; + { + let res = &*self * rhs; + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitDualQuaternion; + *self *= &rhs; ); + +// UnitDualQuaternion ÷= UnitDualQuaternion +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitDualQuaternion; + { + let res = &*self / rhs; + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitDualQuaternion; + *self /= &rhs; ); + +// UnitDualQuaternion ×= UnitQuaternion +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitQuaternion; + { + let res = &*self * UnitDualQuaternion::from_rotation(rhs); + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + };); + +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitQuaternion; + *self *= rhs.clone(); 'b); + +// UnitDualQuaternion ÷= UnitQuaternion +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b UnitQuaternion; + #[allow(clippy::suspicious_op_assign_impl)] + { + let res = &*self * UnitDualQuaternion::from_rotation(rhs.inverse()); + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: UnitQuaternion; + *self /= &rhs; ); + +// UnitDualQuaternion ×= Translation3 +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: Translation3; + { + let res = &*self * UnitDualQuaternion::from_parts(rhs, UnitQuaternion::identity()); + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + };); + +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b Translation3; + *self *= rhs.clone(); 'b); + +// UnitDualQuaternion ÷= Translation3 +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: &'b Translation3; + #[allow(clippy::suspicious_op_assign_impl)] + { + let res = &*self * UnitDualQuaternion::from_parts(rhs.inverse(), UnitQuaternion::identity()); + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U4, U1); + self: UnitDualQuaternion, rhs: Translation3; + *self /= &rhs; ); + +// UnitDualQuaternion ×= Isometry3 +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U3, U1); + self: UnitDualQuaternion, rhs: &'b Isometry3 => U3, U1; + { + let res = &*self * rhs; + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + MulAssign, mul_assign; + (U4, U1), (U3, U1); + self: UnitDualQuaternion, rhs: Isometry3 => U3, U1; + *self *= &rhs; ); + +// UnitDualQuaternion ÷= Isometry3 +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U3, U1); + self: UnitDualQuaternion, rhs: &'b Isometry3 => U3, U1; + { + let res = &*self / rhs; + self.as_mut_unchecked().real.coords.copy_from(&res.as_ref().real.coords); + self.as_mut_unchecked().dual.coords.copy_from(&res.as_ref().dual.coords); + }; + 'b); + +dual_quaternion_op_impl!( + DivAssign, div_assign; + (U4, U1), (U3, U1); + self: UnitDualQuaternion, rhs: Isometry3 => U3, U1; + *self /= &rhs; ); + +macro_rules! scalar_op_impl( + ($($Op: ident, $op: ident, $OpAssign: ident, $op_assign: ident);* $(;)*) => {$( + impl $Op for DualQuaternion + where N::Element: SimdRealField { + type Output = DualQuaternion; + + #[inline] + fn $op(self, n: N) -> Self::Output { + DualQuaternion::from_real_and_dual( + self.real.$op(n), + self.dual.$op(n) + ) + } + } + + impl<'a, N: SimdRealField> $Op for &'a DualQuaternion + where N::Element: SimdRealField { + type Output = DualQuaternion; + + #[inline] + fn $op(self, n: N) -> Self::Output { + DualQuaternion::from_real_and_dual( + self.real.$op(n), + self.dual.$op(n) + ) + } + } + + impl $OpAssign for DualQuaternion + where N::Element: SimdRealField { + + #[inline] + fn $op_assign(&mut self, n: N) { + self.real.$op_assign(n); + self.dual.$op_assign(n); + } + } + )*} +); + +scalar_op_impl!( + Mul, mul, MulAssign, mul_assign; + Div, div, DivAssign, div_assign; +); diff --git a/src/geometry/isometry_construction.rs b/src/geometry/isometry_construction.rs index 0f487547..a64f8208 100644 --- a/src/geometry/isometry_construction.rs +++ b/src/geometry/isometry_construction.rs @@ -102,7 +102,7 @@ where DefaultAllocator: Allocator, { #[inline] - fn arbitrary(rng: &mut G) -> Self { + fn arbitrary(rng: &mut Gen) -> Self { Self::from_parts(Arbitrary::arbitrary(rng), Arbitrary::arbitrary(rng)) } } diff --git a/src/geometry/isometry_conversion.rs b/src/geometry/isometry_conversion.rs index e3416cac..0c89958a 100644 --- a/src/geometry/isometry_conversion.rs +++ b/src/geometry/isometry_conversion.rs @@ -6,7 +6,8 @@ use crate::base::dimension::{DimMin, DimName, DimNameAdd, DimNameSum, U1}; use crate::base::{DefaultAllocator, MatrixN, Scalar}; use crate::geometry::{ - AbstractRotation, Isometry, Similarity, SuperTCategoryOf, TAffine, Transform, Translation, + AbstractRotation, Isometry, Isometry3, Similarity, SuperTCategoryOf, TAffine, Transform, + Translation, UnitDualQuaternion, UnitQuaternion, }; /* @@ -14,6 +15,7 @@ use crate::geometry::{ * ============================================= * * Isometry -> Isometry + * Isometry3 -> UnitDualQuaternion * Isometry -> Similarity * Isometry -> Transform * Isometry -> Matrix (homogeneous) @@ -47,6 +49,30 @@ where } } +impl SubsetOf> for Isometry3 +where + N1: RealField, + N2: RealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> UnitDualQuaternion { + let dq = UnitDualQuaternion::::from_isometry(self); + dq.to_superset() + } + + #[inline] + fn is_in_subset(dq: &UnitDualQuaternion) -> bool { + crate::is_convertible::<_, UnitQuaternion>(&dq.rotation()) + && crate::is_convertible::<_, Translation>(&dq.translation()) + } + + #[inline] + fn from_superset_unchecked(dq: &UnitDualQuaternion) -> Self { + let dq: UnitDualQuaternion = crate::convert_ref_unchecked(dq); + dq.to_isometry() + } +} + impl SubsetOf> for Isometry where N1: RealField, diff --git a/src/geometry/mod.rs b/src/geometry/mod.rs index 19313b65..5fa8c094 100644 --- a/src/geometry/mod.rs +++ b/src/geometry/mod.rs @@ -36,7 +36,10 @@ mod quaternion_ops; mod quaternion_simba; mod dual_quaternion; +#[cfg(feature = "alga")] +mod dual_quaternion_alga; mod dual_quaternion_construction; +mod dual_quaternion_conversion; mod dual_quaternion_ops; mod unit_complex; diff --git a/src/geometry/orthographic.rs b/src/geometry/orthographic.rs index bd1e73c7..bf85a198 100644 --- a/src/geometry/orthographic.rs +++ b/src/geometry/orthographic.rs @@ -705,7 +705,7 @@ impl Arbitrary for Orthographic3 where Matrix4: Send, { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let left = Arbitrary::arbitrary(g); let right = helper::reject(g, |x: &N| *x > left); let bottom = Arbitrary::arbitrary(g); diff --git a/src/geometry/perspective.rs b/src/geometry/perspective.rs index bd8abac2..066ca57a 100644 --- a/src/geometry/perspective.rs +++ b/src/geometry/perspective.rs @@ -283,7 +283,7 @@ where #[cfg(feature = "arbitrary")] impl Arbitrary for Perspective3 { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let znear = Arbitrary::arbitrary(g); let zfar = helper::reject(g, |&x: &N| !(x - znear).is_zero()); let aspect = helper::reject(g, |&x: &N| !x.is_zero()); diff --git a/src/geometry/point.rs b/src/geometry/point.rs index 75410ccd..eeda07e3 100644 --- a/src/geometry/point.rs +++ b/src/geometry/point.rs @@ -65,6 +65,24 @@ where { } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Point +where + VectorN: bytemuck::Zeroable, + DefaultAllocator: Allocator, +{ +} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Point +where + N: Copy, + VectorN: bytemuck::Pod, + DefaultAllocator: Allocator, + >::Buffer: Copy, +{ +} + #[cfg(feature = "serde-serialize")] impl Serialize for Point where @@ -181,7 +199,12 @@ where D: DimNameAdd, DefaultAllocator: Allocator>, { - let mut res = unsafe { VectorN::<_, DimNameSum>::new_uninitialized() }; + let mut res = unsafe { + crate::unimplemented_or_uninitialized_generic!( + as DimName>::name(), + U1 + ) + }; res.fixed_slice_mut::(0, 0).copy_from(&self.coords); res[(D::dim(), 0)] = N::one(); diff --git a/src/geometry/point_construction.rs b/src/geometry/point_construction.rs index f567cfac..e132304b 100644 --- a/src/geometry/point_construction.rs +++ b/src/geometry/point_construction.rs @@ -24,7 +24,10 @@ where /// Creates a new point with uninitialized coordinates. #[inline] pub unsafe fn new_uninitialized() -> Self { - Self::from(VectorN::new_uninitialized()) + Self::from(crate::unimplemented_or_uninitialized_generic!( + D::name(), + U1 + )) } /// Creates a new point with all coordinates equal to zero. @@ -153,7 +156,7 @@ where >::Buffer: Send, { #[inline] - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { Self::from(VectorN::arbitrary(g)) } } diff --git a/src/geometry/quaternion.rs b/src/geometry/quaternion.rs index a5db1c69..172f2e66 100755 --- a/src/geometry/quaternion.rs +++ b/src/geometry/quaternion.rs @@ -40,6 +40,17 @@ impl Default for Quaternion { } } +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Zeroable for Quaternion where Vector4: bytemuck::Zeroable {} + +#[cfg(feature = "bytemuck")] +unsafe impl bytemuck::Pod for Quaternion +where + Vector4: bytemuck::Pod, + N: Copy, +{ +} + #[cfg(feature = "abomonation-serialize")] impl Abomonation for Quaternion where @@ -1542,6 +1553,17 @@ where pub fn inverse_transform_unit_vector(&self, v: &Unit>) -> Unit> { self.inverse() * v } + + /// Appends to `self` a rotation given in the axis-angle form, using a linearized formulation. + /// + /// This is faster, but approximate, way to compute `UnitQuaternion::new(axisangle) * self`. + #[inline] + pub fn append_axisangle_linearized(&self, axisangle: &Vector3) -> Self { + let half: N = crate::convert(0.5); + let q1 = self.into_inner(); + let q2 = Quaternion::from_imag(axisangle * half); + Unit::new_normalize(q1 + q2 * q1) + } } impl Default for UnitQuaternion { diff --git a/src/geometry/quaternion_construction.rs b/src/geometry/quaternion_construction.rs index 03d87d39..ec46b68b 100644 --- a/src/geometry/quaternion_construction.rs +++ b/src/geometry/quaternion_construction.rs @@ -160,7 +160,7 @@ where Owned: Send, { #[inline] - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { Self::new( N::arbitrary(g), N::arbitrary(g), @@ -266,6 +266,17 @@ where Self::new_unchecked(q) } + /// Builds an unit quaternion from a basis assumed to be orthonormal. + /// + /// In order to get a valid unit-quaternion, the input must be an + /// orthonormal basis, i.e., all vectors are normalized, and the are + /// all orthogonal to each other. These invariants are not checked + /// by this method. + pub fn from_basis_unchecked(basis: &[Vector3; 3]) -> Self { + let rot = Rotation3::from_basis_unchecked(basis); + Self::from_rotation_matrix(&rot) + } + /// Builds an unit quaternion from a rotation matrix. /// /// # Example @@ -834,7 +845,7 @@ where Owned: Send, { #[inline] - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let axisangle = Vector3::arbitrary(g); Self::from_scaled_axis(axisangle) } diff --git a/src/geometry/quaternion_conversion.rs b/src/geometry/quaternion_conversion.rs index b57cc52b..2707419e 100644 --- a/src/geometry/quaternion_conversion.rs +++ b/src/geometry/quaternion_conversion.rs @@ -10,7 +10,7 @@ use crate::base::dimension::U3; use crate::base::{Matrix3, Matrix4, Scalar, Vector4}; use crate::geometry::{ AbstractRotation, Isometry, Quaternion, Rotation, Rotation3, Similarity, SuperTCategoryOf, - TAffine, Transform, Translation, UnitQuaternion, + TAffine, Transform, Translation, UnitDualQuaternion, UnitQuaternion, }; /* @@ -21,6 +21,7 @@ use crate::geometry::{ * UnitQuaternion -> UnitQuaternion * UnitQuaternion -> Rotation * UnitQuaternion -> Isometry + * UnitQuaternion -> UnitDualQuaternion * UnitQuaternion -> Similarity * UnitQuaternion -> Transform * UnitQuaternion -> Matrix (homogeneous) @@ -121,6 +122,28 @@ where } } +impl SubsetOf> for UnitQuaternion +where + N1: RealField, + N2: RealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> UnitDualQuaternion { + let q: UnitQuaternion = crate::convert_ref(self); + UnitDualQuaternion::from_rotation(q) + } + + #[inline] + fn is_in_subset(dq: &UnitDualQuaternion) -> bool { + dq.translation().vector.is_zero() + } + + #[inline] + fn from_superset_unchecked(dq: &UnitDualQuaternion) -> Self { + crate::convert_unchecked(dq.rotation()) + } +} + impl SubsetOf> for UnitQuaternion where N1: RealField, diff --git a/src/geometry/rotation_conversion.rs b/src/geometry/rotation_conversion.rs index c49b92c4..accf9a41 100644 --- a/src/geometry/rotation_conversion.rs +++ b/src/geometry/rotation_conversion.rs @@ -12,7 +12,7 @@ use crate::base::{DefaultAllocator, Matrix2, Matrix3, Matrix4, MatrixN, Scalar}; use crate::geometry::{ AbstractRotation, Isometry, Rotation, Rotation2, Rotation3, Similarity, SuperTCategoryOf, - TAffine, Transform, Translation, UnitComplex, UnitQuaternion, + TAffine, Transform, Translation, UnitComplex, UnitDualQuaternion, UnitQuaternion, }; /* @@ -21,6 +21,7 @@ use crate::geometry::{ * * Rotation -> Rotation * Rotation3 -> UnitQuaternion + * Rotation3 -> UnitDualQuaternion * Rotation2 -> UnitComplex * Rotation -> Isometry * Rotation -> Similarity @@ -75,6 +76,31 @@ where } } +impl SubsetOf> for Rotation3 +where + N1: RealField, + N2: RealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> UnitDualQuaternion { + let q = UnitQuaternion::::from_rotation_matrix(self); + let dq = UnitDualQuaternion::from_rotation(q); + dq.to_superset() + } + + #[inline] + fn is_in_subset(dq: &UnitDualQuaternion) -> bool { + crate::is_convertible::<_, UnitQuaternion>(&dq.rotation()) + && dq.translation().vector.is_zero() + } + + #[inline] + fn from_superset_unchecked(dq: &UnitDualQuaternion) -> Self { + let dq: UnitDualQuaternion = crate::convert_ref_unchecked(dq); + dq.rotation().to_rotation_matrix() + } +} + impl SubsetOf> for Rotation2 where N1: RealField, diff --git a/src/geometry/rotation_specialization.rs b/src/geometry/rotation_specialization.rs index afc180d8..de87b40b 100644 --- a/src/geometry/rotation_specialization.rs +++ b/src/geometry/rotation_specialization.rs @@ -12,7 +12,7 @@ use std::ops::Neg; use crate::base::dimension::{U1, U2, U3}; use crate::base::storage::Storage; -use crate::base::{Matrix2, Matrix3, MatrixN, Unit, Vector, Vector1, Vector3, VectorN}; +use crate::base::{Matrix2, Matrix3, MatrixN, Unit, Vector, Vector1, Vector2, Vector3, VectorN}; use crate::geometry::{Rotation2, Rotation3, UnitComplex, UnitQuaternion}; @@ -53,6 +53,17 @@ impl Rotation2 { /// # Construction from an existing 2D matrix or rotations impl Rotation2 { + /// Builds a rotation from a basis assumed to be orthonormal. + /// + /// In order to get a valid unit-quaternion, the input must be an + /// orthonormal basis, i.e., all vectors are normalized, and the are + /// all orthogonal to each other. These invariants are not checked + /// by this method. + pub fn from_basis_unchecked(basis: &[Vector2; 2]) -> Self { + let mat = Matrix2::from_columns(&basis[..]); + Self::from_matrix_unchecked(mat) + } + /// Builds a rotation matrix by extracting the rotation part of the given transformation `m`. /// /// This is an iterative method. See `.from_matrix_eps` to provide mover @@ -264,7 +275,7 @@ where Owned: Send, { #[inline] - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { Self::new(N::arbitrary(g)) } } @@ -655,6 +666,17 @@ where } } + /// Builds a rotation from a basis assumed to be orthonormal. + /// + /// In order to get a valid unit-quaternion, the input must be an + /// orthonormal basis, i.e., all vectors are normalized, and the are + /// all orthogonal to each other. These invariants are not checked + /// by this method. + pub fn from_basis_unchecked(basis: &[Vector3; 3]) -> Self { + let mat = Matrix3::from_columns(&basis[..]); + Self::from_matrix_unchecked(mat) + } + /// Builds a rotation matrix by extracting the rotation part of the given transformation `m`. /// /// This is an iterative method. See `.from_matrix_eps` to provide mover @@ -939,7 +961,7 @@ where Owned: Send, { #[inline] - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { Self::new(VectorN::arbitrary(g)) } } diff --git a/src/geometry/similarity_construction.rs b/src/geometry/similarity_construction.rs index 510758cf..c228c5d0 100644 --- a/src/geometry/similarity_construction.rs +++ b/src/geometry/similarity_construction.rs @@ -114,7 +114,7 @@ where Owned: Send, { #[inline] - fn arbitrary(rng: &mut G) -> Self { + fn arbitrary(rng: &mut Gen) -> Self { let mut s: N = Arbitrary::arbitrary(rng); while s.is_zero() { s = Arbitrary::arbitrary(rng) diff --git a/src/geometry/translation_construction.rs b/src/geometry/translation_construction.rs index 9466816d..d9061ba0 100644 --- a/src/geometry/translation_construction.rs +++ b/src/geometry/translation_construction.rs @@ -61,13 +61,13 @@ where } #[cfg(feature = "arbitrary")] -impl Arbitrary for Translation +impl Arbitrary for Translation where DefaultAllocator: Allocator, Owned: Send, { #[inline] - fn arbitrary(rng: &mut G) -> Self { + fn arbitrary(rng: &mut Gen) -> Self { let v: VectorN = Arbitrary::arbitrary(rng); Self::from(v) } diff --git a/src/geometry/translation_conversion.rs b/src/geometry/translation_conversion.rs index 0754a678..9e915073 100644 --- a/src/geometry/translation_conversion.rs +++ b/src/geometry/translation_conversion.rs @@ -9,6 +9,7 @@ use crate::base::{DefaultAllocator, MatrixN, Scalar, VectorN}; use crate::geometry::{ AbstractRotation, Isometry, Similarity, SuperTCategoryOf, TAffine, Transform, Translation, + Translation3, UnitDualQuaternion, UnitQuaternion, }; /* @@ -17,6 +18,7 @@ use crate::geometry::{ * * Translation -> Translation * Translation -> Isometry + * Translation3 -> UnitDualQuaternion * Translation -> Similarity * Translation -> Transform * Translation -> Matrix (homogeneous) @@ -69,6 +71,30 @@ where } } +impl SubsetOf> for Translation3 +where + N1: RealField, + N2: RealField + SupersetOf, +{ + #[inline] + fn to_superset(&self) -> UnitDualQuaternion { + let dq = UnitDualQuaternion::::from_parts(self.clone(), UnitQuaternion::identity()); + dq.to_superset() + } + + #[inline] + fn is_in_subset(dq: &UnitDualQuaternion) -> bool { + crate::is_convertible::<_, Translation>(&dq.translation()) + && dq.rotation() == UnitQuaternion::identity() + } + + #[inline] + fn from_superset_unchecked(dq: &UnitDualQuaternion) -> Self { + let dq: UnitDualQuaternion = crate::convert_ref_unchecked(dq); + dq.translation() + } +} + impl SubsetOf> for Translation where N1: RealField, diff --git a/src/geometry/unit_complex_construction.rs b/src/geometry/unit_complex_construction.rs index 65d36888..114fea6e 100644 --- a/src/geometry/unit_complex_construction.rs +++ b/src/geometry/unit_complex_construction.rs @@ -8,7 +8,7 @@ use rand::Rng; use crate::base::dimension::{U1, U2}; use crate::base::storage::Storage; -use crate::base::{Matrix2, Unit, Vector}; +use crate::base::{Matrix2, Unit, Vector, Vector2}; use crate::geometry::{Rotation2, UnitComplex}; use simba::scalar::RealField; use simba::simd::SimdRealField; @@ -164,6 +164,18 @@ where Self::new_unchecked(Complex::new(rotmat[(0, 0)], rotmat[(1, 0)])) } + /// Builds a rotation from a basis assumed to be orthonormal. + /// + /// In order to get a valid unit-quaternion, the input must be an + /// orthonormal basis, i.e., all vectors are normalized, and the are + /// all orthogonal to each other. These invariants are not checked + /// by this method. + pub fn from_basis_unchecked(basis: &[Vector2; 2]) -> Self { + let mat = Matrix2::from_columns(&basis[..]); + let rot = Rotation2::from_matrix_unchecked(mat); + Self::from_rotation_matrix(&rot) + } + /// Builds an unit complex by extracting the rotation part of the given transformation `m`. /// /// This is an iterative method. See `.from_matrix_eps` to provide mover @@ -383,7 +395,7 @@ where N::Element: SimdRealField, { #[inline] - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { UnitComplex::from_angle(N::arbitrary(g)) } } diff --git a/src/lib.rs b/src/lib.rs index 12a329be..5e23ab32 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,7 +15,8 @@ Simply add the following to your `Cargo.toml` file: ```.ignore [dependencies] -nalgebra = "0.23" +// TODO: replace the * by the latest version. +nalgebra = "*" ``` @@ -82,10 +83,11 @@ an optimized set of tools for computer graphics and physics. Those features incl #![deny(missing_docs)] #![doc( html_favicon_url = "https://nalgebra.org/img/favicon.ico", - html_root_url = "https://nalgebra.org/rustdoc" + html_root_url = "https://docs.rs/nalgebra/0.25.0" )] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))] +#![cfg_attr(feature = "no_unsound_assume_init", allow(unreachable_code))] #[cfg(feature = "arbitrary")] extern crate quickcheck; @@ -127,6 +129,8 @@ pub mod geometry; #[cfg(feature = "io")] pub mod io; pub mod linalg; +#[cfg(feature = "proptest-support")] +pub mod proptest; #[cfg(feature = "sparse")] pub mod sparse; @@ -151,7 +155,7 @@ pub use num_complex::Complex; pub use simba::scalar::{ ClosedAdd, ClosedDiv, ClosedMul, ClosedSub, ComplexField, Field, RealField, }; -pub use simba::simd::{SimdBool, SimdComplexField, SimdPartialOrd, SimdRealField}; +pub use simba::simd::{SimdBool, SimdComplexField, SimdPartialOrd, SimdRealField, SimdValue}; /// Gets the multiplicative identity element. /// diff --git a/src/linalg/bidiagonal.rs b/src/linalg/bidiagonal.rs index 3ae38432..33fc81e6 100644 --- a/src/linalg/bidiagonal.rs +++ b/src/linalg/bidiagonal.rs @@ -81,11 +81,12 @@ where "Cannot compute the bidiagonalization of an empty matrix." ); - let mut diagonal = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; + let mut diagonal = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; let mut off_diagonal = - unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols.sub(U1), U1) }; - let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; - let mut work = unsafe { MatrixMN::new_uninitialized_generic(nrows, U1) }; + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols.sub(U1), U1) }; + let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; + let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; let upper_diagonal = nrows.value() >= ncols.value(); if upper_diagonal { @@ -239,8 +240,9 @@ where let min_nrows_ncols = nrows.min(ncols); let mut res = Matrix::identity_generic(min_nrows_ncols, ncols); - let mut work = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; - let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; + let mut axis_packed = unsafe { crate::unimplemented_or_uninitialized_generic!(ncols, U1) }; let shift = self.axis_shift().1; diff --git a/src/linalg/cholesky.rs b/src/linalg/cholesky.rs index bd2f9281..a6757b08 100644 --- a/src/linalg/cholesky.rs +++ b/src/linalg/cholesky.rs @@ -223,9 +223,9 @@ where // loads the data into a new matrix with an additional jth row/column let mut chol = unsafe { - Matrix::new_uninitialized_generic( + crate::unimplemented_or_uninitialized_generic!( self.chol.data.shape().0.add(U1), - self.chol.data.shape().1.add(U1), + self.chol.data.shape().1.add(U1) ) }; chol.slice_range_mut(..j, ..j) @@ -288,9 +288,9 @@ where // loads the data into a new matrix except for the jth row/column let mut chol = unsafe { - Matrix::new_uninitialized_generic( + crate::unimplemented_or_uninitialized_generic!( self.chol.data.shape().0.sub(U1), - self.chol.data.shape().1.sub(U1), + self.chol.data.shape().1.sub(U1) ) }; chol.slice_range_mut(..j, ..j) diff --git a/src/linalg/col_piv_qr.rs b/src/linalg/col_piv_qr.rs new file mode 100644 index 00000000..302dcd66 --- /dev/null +++ b/src/linalg/col_piv_qr.rs @@ -0,0 +1,334 @@ +use num::Zero; +#[cfg(feature = "serde-serialize")] +use serde::{Deserialize, Serialize}; + +use crate::allocator::{Allocator, Reallocator}; +use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN}; +use crate::constraint::{SameNumberOfRows, ShapeConstraint}; +use crate::dimension::{Dim, DimMin, DimMinimum, U1}; +use crate::storage::{Storage, StorageMut}; +use crate::ComplexField; + +use crate::geometry::Reflection; +use crate::linalg::{householder, PermutationSequence}; + +/// The QR decomposition (with column pivoting) of a general matrix. +#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] +#[cfg_attr( + feature = "serde-serialize", + serde(bound(serialize = "DefaultAllocator: Allocator + + Allocator>, + MatrixMN: Serialize, + PermutationSequence>: Serialize, + VectorN>: Serialize")) +)] +#[cfg_attr( + feature = "serde-serialize", + serde(bound(deserialize = "DefaultAllocator: Allocator + + Allocator>, + MatrixMN: Deserialize<'de>, + PermutationSequence>: Deserialize<'de>, + VectorN>: Deserialize<'de>")) +)] +#[derive(Clone, Debug)] +pub struct ColPivQR, C: Dim> +where + DefaultAllocator: Allocator + + Allocator> + + Allocator<(usize, usize), DimMinimum>, +{ + col_piv_qr: MatrixMN, + p: PermutationSequence>, + diag: VectorN>, +} + +impl, C: Dim> Copy for ColPivQR +where + DefaultAllocator: Allocator + + Allocator> + + Allocator<(usize, usize), DimMinimum>, + MatrixMN: Copy, + PermutationSequence>: Copy, + VectorN>: Copy, +{ +} + +impl, C: Dim> ColPivQR +where + DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator<(usize, usize), DimMinimum>, +{ + /// Computes the ColPivQR decomposition using householder reflections. + pub fn new(mut matrix: MatrixMN) -> Self { + let (nrows, ncols) = matrix.data.shape(); + let min_nrows_ncols = nrows.min(ncols); + let mut p = PermutationSequence::identity_generic(min_nrows_ncols); + + let mut diag = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; + + if min_nrows_ncols.value() == 0 { + return ColPivQR { + col_piv_qr: matrix, + p, + diag, + }; + } + + for i in 0..min_nrows_ncols.value() { + let piv = matrix.slice_range(i.., i..).icamax_full(); + let col_piv = piv.1 + i; + matrix.swap_columns(i, col_piv); + p.append_permutation(i, col_piv); + + householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); + } + + ColPivQR { + col_piv_qr: matrix, + p, + diag, + } + } + + /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. + #[inline] + pub fn r(&self) -> MatrixMN, C> + where + DefaultAllocator: Allocator, C>, + { + let (nrows, ncols) = self.col_piv_qr.data.shape(); + let mut res = self + .col_piv_qr + .rows_generic(0, nrows.min(ncols)) + .upper_triangle(); + res.set_partial_diagonal(self.diag.iter().map(|e| N::from_real(e.modulus()))); + res + } + + /// Retrieves the upper trapezoidal submatrix `R` of this decomposition. + /// + /// This is usually faster than `r` but consumes `self`. + #[inline] + pub fn unpack_r(self) -> MatrixMN, C> + where + DefaultAllocator: Reallocator, C>, + { + let (nrows, ncols) = self.col_piv_qr.data.shape(); + let mut res = self + .col_piv_qr + .resize_generic(nrows.min(ncols), ncols, N::zero()); + res.fill_lower_triangle(N::zero(), 1); + res.set_partial_diagonal(self.diag.iter().map(|e| N::from_real(e.modulus()))); + res + } + + /// Computes the orthogonal matrix `Q` of this decomposition. + pub fn q(&self) -> MatrixMN> + where + DefaultAllocator: Allocator>, + { + let (nrows, ncols) = self.col_piv_qr.data.shape(); + + // NOTE: we could build the identity matrix and call q_mul on it. + // Instead we don't so that we take in account the matrix sparseness. + let mut res = Matrix::identity_generic(nrows, nrows.min(ncols)); + let dim = self.diag.len(); + + for i in (0..dim).rev() { + let axis = self.col_piv_qr.slice_range(i.., i); + // TODO: sometimes, the axis might have a zero magnitude. + let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); + + let mut res_rows = res.slice_range_mut(i.., i..); + refl.reflect_with_sign(&mut res_rows, self.diag[i].signum()); + } + + res + } + /// Retrieves the column permutation of this decomposition. + #[inline] + pub fn p(&self) -> &PermutationSequence> { + &self.p + } + + /// Unpacks this decomposition into its two matrix factors. + pub fn unpack( + self, + ) -> ( + MatrixMN>, + MatrixMN, C>, + PermutationSequence>, + ) + where + DimMinimum: DimMin>, + DefaultAllocator: Allocator> + + Reallocator, C> + + Allocator<(usize, usize), DimMinimum>, + { + (self.q(), self.r(), self.p) + } + + #[doc(hidden)] + pub fn col_piv_qr_internal(&self) -> &MatrixMN { + &self.col_piv_qr + } + + /// Multiplies the provided matrix by the transpose of the `Q` matrix of this decomposition. + pub fn q_tr_mul(&self, rhs: &mut Matrix) + where + S2: StorageMut, + { + let dim = self.diag.len(); + + for i in 0..dim { + let axis = self.col_piv_qr.slice_range(i.., i); + let refl = Reflection::new(Unit::new_unchecked(axis), N::zero()); + + let mut rhs_rows = rhs.rows_range_mut(i..); + refl.reflect_with_sign(&mut rhs_rows, self.diag[i].signum().conjugate()); + } + } +} + +impl> ColPivQR +where + DefaultAllocator: + Allocator + Allocator + Allocator<(usize, usize), DimMinimum>, +{ + /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. + /// + /// Returns `None` if `self` is not invertible. + pub fn solve( + &self, + b: &Matrix, + ) -> Option> + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + DefaultAllocator: Allocator, + { + let mut res = b.clone_owned(); + + if self.solve_mut(&mut res) { + Some(res) + } else { + None + } + } + + /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined. + /// + /// If the decomposed matrix is not invertible, this returns `false` and its input `b` is + /// overwritten with garbage. + pub fn solve_mut(&self, b: &mut Matrix) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + assert_eq!( + self.col_piv_qr.nrows(), + b.nrows(), + "ColPivQR solve matrix dimension mismatch." + ); + assert!( + self.col_piv_qr.is_square(), + "ColPivQR solve: unable to solve a non-square system." + ); + + self.q_tr_mul(b); + let solved = self.solve_upper_triangular_mut(b); + self.p.inv_permute_rows(b); + + solved + } + + // TODO: duplicate code from the `solve` module. + fn solve_upper_triangular_mut( + &self, + b: &mut Matrix, + ) -> bool + where + S2: StorageMut, + ShapeConstraint: SameNumberOfRows, + { + let dim = self.col_piv_qr.nrows(); + + for k in 0..b.ncols() { + let mut b = b.column_mut(k); + for i in (0..dim).rev() { + let coeff; + + unsafe { + let diag = self.diag.vget_unchecked(i).modulus(); + + if diag.is_zero() { + return false; + } + + coeff = b.vget_unchecked(i).unscale(diag); + *b.vget_unchecked_mut(i) = coeff; + } + + b.rows_range_mut(..i) + .axpy(-coeff, &self.col_piv_qr.slice_range(..i, i), N::one()); + } + } + + true + } + + /// Computes the inverse of the decomposed matrix. + /// + /// Returns `None` if the decomposed matrix is not invertible. + pub fn try_inverse(&self) -> Option> { + assert!( + self.col_piv_qr.is_square(), + "ColPivQR inverse: unable to compute the inverse of a non-square matrix." + ); + + // TODO: is there a less naive method ? + let (nrows, ncols) = self.col_piv_qr.data.shape(); + let mut res = MatrixN::identity_generic(nrows, ncols); + + if self.solve_mut(&mut res) { + Some(res) + } else { + None + } + } + + /// Indicates if the decomposed matrix is invertible. + pub fn is_invertible(&self) -> bool { + assert!( + self.col_piv_qr.is_square(), + "ColPivQR: unable to test the invertibility of a non-square matrix." + ); + + for i in 0..self.diag.len() { + if self.diag[i].is_zero() { + return false; + } + } + + true + } + + /// Computes the determinant of the decomposed matrix. + pub fn determinant(&self) -> N { + let dim = self.col_piv_qr.nrows(); + assert!( + self.col_piv_qr.is_square(), + "ColPivQR determinant: unable to compute the determinant of a non-square matrix." + ); + + let mut res = N::one(); + for i in 0..dim { + res *= unsafe { *self.diag.vget_unchecked(i) }; + } + + res * self.p.determinant() + } +} diff --git a/src/linalg/decomposition.rs b/src/linalg/decomposition.rs index 67cc4c6a..6428856b 100644 --- a/src/linalg/decomposition.rs +++ b/src/linalg/decomposition.rs @@ -1,8 +1,8 @@ use crate::storage::Storage; use crate::{ - Allocator, Bidiagonal, Cholesky, ComplexField, DefaultAllocator, Dim, DimDiff, DimMin, - DimMinimum, DimSub, FullPivLU, Hessenberg, Matrix, Schur, SymmetricEigen, SymmetricTridiagonal, - LU, QR, SVD, U1, + Allocator, Bidiagonal, Cholesky, ColPivQR, ComplexField, DefaultAllocator, Dim, DimDiff, + DimMin, DimMinimum, DimSub, FullPivLU, Hessenberg, Matrix, RealField, Schur, SymmetricEigen, + SymmetricTridiagonal, LU, QR, SVD, U1, UDU, }; /// # Rectangular matrix decomposition @@ -13,8 +13,9 @@ use crate::{ /// | Decomposition | Factors | Details | /// | -------------------------|---------------------|--------------| /// | QR | `Q * R` | `Q` is an unitary matrix, and `R` is upper-triangular. | +/// | QR with column pivoting | `Q * R * P⁻¹` | `Q` is an unitary matrix, and `R` is upper-triangular. `P` is a permutation matrix. | /// | LU with partial pivoting | `P⁻¹ * L * U` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` is a permutation matrix. | -/// | LU with full pivoting | `P⁻¹ * L * U ~ Q⁻¹` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` and `Q` are permutation matrices. | +/// | LU with full pivoting | `P⁻¹ * L * U * Q⁻¹` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` and `Q` are permutation matrices. | /// | SVD | `U * Σ * Vᵀ` | `U` and `V` are two orthogonal matrices and `Σ` is a diagonal matrix containing the singular values. | impl> Matrix { /// Computes the bidiagonalization using householder reflections. @@ -60,6 +61,18 @@ impl> Matrix { QR::new(self.into_owned()) } + /// Computes the QR decomposition (with column pivoting) of this matrix. + pub fn col_piv_qr(self) -> ColPivQR + where + R: DimMin, + DefaultAllocator: Allocator + + Allocator + + Allocator> + + Allocator<(usize, usize), DimMinimum>, + { + ColPivQR::new(self.into_owned()) + } + /// Computes the Singular Value Decomposition using implicit shift. pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD where @@ -121,6 +134,7 @@ impl> Matrix { /// | -------------------------|---------------------------|--------------| /// | Hessenberg | `Q * H * Qᵀ` | `Q` is a unitary matrix and `H` an upper-Hessenberg matrix. | /// | Cholesky | `L * Lᵀ` | `L` is a lower-triangular matrix. | +/// | UDU | `U * D * Uᵀ` | `U` is a upper-triangular matrix, and `D` a diagonal matrix. | /// | Schur decomposition | `Q * T * Qᵀ` | `Q` is an unitary matrix and `T` a quasi-upper-triangular matrix. | /// | Symmetric eigendecomposition | `Q ~ Λ ~ Qᵀ` | `Q` is an unitary matrix, and `Λ` is a real diagonal matrix. | /// | Symmetric tridiagonalization | `Q ~ T ~ Qᵀ` | `Q` is an unitary matrix, and `T` is a tridiagonal matrix. | @@ -136,6 +150,18 @@ impl> Matrix { Cholesky::new(self.into_owned()) } + /// Attempts to compute the UDU decomposition of this matrix. + /// + /// The input matrix `self` is assumed to be symmetric and this decomposition will only read + /// the upper-triangular part of `self`. + pub fn udu(self) -> Option> + where + N: RealField, + DefaultAllocator: Allocator + Allocator, + { + UDU::new(self.into_owned()) + } + /// Computes the Hessenberg decomposition of this matrix using householder reflections. pub fn hessenberg(self) -> Hessenberg where diff --git a/src/linalg/hessenberg.rs b/src/linalg/hessenberg.rs index beff5420..ac3e82b8 100644 --- a/src/linalg/hessenberg.rs +++ b/src/linalg/hessenberg.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN}; +use crate::base::{DefaultAllocator, MatrixN, VectorN}; use crate::dimension::{DimDiff, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -48,7 +48,8 @@ where { /// Computes the Hessenberg decomposition using householder reflections. pub fn new(hess: MatrixN) -> Self { - let mut work = unsafe { MatrixMN::new_uninitialized_generic(hess.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(hess.data.shape().0, U1) }; Self::new_with_workspace(hess, &mut work) } @@ -74,7 +75,8 @@ where "Hessenberg: invalid workspace size." ); - let mut subdiag = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; + let mut subdiag = + unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; if dim.value() == 0 { return Hessenberg { hess, subdiag }; diff --git a/src/linalg/mod.rs b/src/linalg/mod.rs index c602bfa7..075d91c2 100644 --- a/src/linalg/mod.rs +++ b/src/linalg/mod.rs @@ -8,6 +8,7 @@ mod determinant; // TODO: this should not be needed. However, the exp uses // explicit float operations on `f32` and `f64`. We need to // get rid of these to allow exp to be used on a no-std context. +mod col_piv_qr; mod decomposition; #[cfg(feature = "std")] mod exp; @@ -24,6 +25,7 @@ mod solve; mod svd; mod symmetric_eigen; mod symmetric_tridiagonal; +mod udu; //// TODO: Not complete enough for publishing. //// This handles only cases where each eigenvalue has multiplicity one. @@ -31,6 +33,7 @@ mod symmetric_tridiagonal; pub use self::bidiagonal::*; pub use self::cholesky::*; +pub use self::col_piv_qr::*; pub use self::convolution::*; #[cfg(feature = "std")] pub use self::exp::*; @@ -43,3 +46,4 @@ pub use self::schur::*; pub use self::svd::*; pub use self::symmetric_eigen::*; pub use self::symmetric_tridiagonal::*; +pub use self::udu::*; diff --git a/src/linalg/permutation_sequence.rs b/src/linalg/permutation_sequence.rs index 47255832..dd389188 100644 --- a/src/linalg/permutation_sequence.rs +++ b/src/linalg/permutation_sequence.rs @@ -72,7 +72,7 @@ where unsafe { Self { len: 0, - ipiv: VectorN::new_uninitialized_generic(dim, U1), + ipiv: crate::unimplemented_or_uninitialized_generic!(dim, U1), } } } diff --git a/src/linalg/qr.rs b/src/linalg/qr.rs index f404aa5a..5c231c82 100644 --- a/src/linalg/qr.rs +++ b/src/linalg/qr.rs @@ -54,14 +54,15 @@ where let (nrows, ncols) = matrix.data.shape(); let min_nrows_ncols = nrows.min(ncols); - let mut diag = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) }; + let mut diag = + unsafe { crate::unimplemented_or_uninitialized_generic!(min_nrows_ncols, U1) }; if min_nrows_ncols.value() == 0 { return QR { qr: matrix, diag }; } - for ite in 0..min_nrows_ncols.value() { - householder::clear_column_unchecked(&mut matrix, &mut diag[ite], ite, 0, None); + for i in 0..min_nrows_ncols.value() { + householder::clear_column_unchecked(&mut matrix, &mut diag[i], i, 0, None); } QR { qr: matrix, diag } diff --git a/src/linalg/schur.rs b/src/linalg/schur.rs index 72c9b5ac..4b89567b 100644 --- a/src/linalg/schur.rs +++ b/src/linalg/schur.rs @@ -71,7 +71,8 @@ where /// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm /// continues indefinitely until convergence. pub fn try_new(m: MatrixN, eps: N::RealField, max_niter: usize) -> Option { - let mut work = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; Self::do_decompose(m, &mut work, eps, max_niter, true) .map(|(q, t)| Schur { q: q.unwrap(), t }) @@ -378,7 +379,8 @@ where /// /// Return `None` if some eigenvalues are complex. pub fn eigenvalues(&self) -> Option> { - let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; + let mut out = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, U1) }; if Self::do_eigenvalues(&self.t, &mut out) { Some(out) } else { @@ -392,7 +394,8 @@ where N: RealField, DefaultAllocator: Allocator, D>, { - let mut out = unsafe { VectorN::new_uninitialized_generic(self.t.data.shape().0, U1) }; + let mut out = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.t.data.shape().0, U1) }; Self::do_complex_eigenvalues(&self.t, &mut out); out } @@ -503,7 +506,8 @@ where "Unable to compute eigenvalues of a non-square matrix." ); - let mut work = unsafe { VectorN::new_uninitialized_generic(self.data.shape().0, U1) }; + let mut work = + unsafe { crate::unimplemented_or_uninitialized_generic!(self.data.shape().0, U1) }; // Special case for 2x2 matrices. if self.nrows() == 2 { @@ -544,7 +548,7 @@ where DefaultAllocator: Allocator, D>, { let dim = self.data.shape().0; - let mut work = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; + let mut work = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; let schur = Schur::do_decompose( self.clone_owned(), @@ -554,7 +558,7 @@ where false, ) .unwrap(); - let mut eig = unsafe { VectorN::new_uninitialized_generic(dim, U1) }; + let mut eig = unsafe { crate::unimplemented_or_uninitialized_generic!(dim, U1) }; Schur::do_complex_eigenvalues(&schur.1, &mut eig); eig } diff --git a/src/linalg/symmetric_tridiagonal.rs b/src/linalg/symmetric_tridiagonal.rs index e8d9fb5d..c05b5558 100644 --- a/src/linalg/symmetric_tridiagonal.rs +++ b/src/linalg/symmetric_tridiagonal.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use crate::allocator::Allocator; -use crate::base::{DefaultAllocator, MatrixMN, MatrixN, VectorN}; +use crate::base::{DefaultAllocator, MatrixN, VectorN}; use crate::dimension::{DimDiff, DimSub, U1}; use crate::storage::Storage; use simba::scalar::ComplexField; @@ -61,8 +61,9 @@ where "Unable to compute the symmetric tridiagonal decomposition of an empty matrix." ); - let mut off_diagonal = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; - let mut p = unsafe { MatrixMN::new_uninitialized_generic(dim.sub(U1), U1) }; + let mut off_diagonal = + unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; + let mut p = unsafe { crate::unimplemented_or_uninitialized_generic!(dim.sub(U1), U1) }; for i in 0..dim.value() - 1 { let mut m = m.rows_range_mut(i + 1..); diff --git a/src/linalg/udu.rs b/src/linalg/udu.rs new file mode 100644 index 00000000..70ff84a7 --- /dev/null +++ b/src/linalg/udu.rs @@ -0,0 +1,98 @@ +#[cfg(feature = "serde-serialize")] +use serde::{Deserialize, Serialize}; + +use crate::allocator::Allocator; +use crate::base::{DefaultAllocator, MatrixN, VectorN, U1}; +use crate::dimension::Dim; +use crate::storage::Storage; +use simba::scalar::RealField; + +/// UDU factorization. +#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] +#[cfg_attr( + feature = "serde-serialize", + serde(bound(serialize = "VectorN: Serialize, MatrixN: Serialize")) +)] +#[cfg_attr( + feature = "serde-serialize", + serde(bound( + deserialize = "VectorN: Deserialize<'de>, MatrixN: Deserialize<'de>" + )) +)] +#[derive(Clone, Debug)] +pub struct UDU +where + DefaultAllocator: Allocator + Allocator, +{ + /// The upper triangular matrix resulting from the factorization + pub u: MatrixN, + /// The diagonal matrix resulting from the factorization + pub d: VectorN, +} + +impl Copy for UDU +where + DefaultAllocator: Allocator + Allocator, + VectorN: Copy, + MatrixN: Copy, +{ +} + +impl UDU +where + DefaultAllocator: Allocator + Allocator, +{ + /// Computes the UDU^T factorization. + /// + /// The input matrix `p` is assumed to be symmetric and this decomposition will only read + /// the upper-triangular part of `p`. + /// + /// Ref.: "Optimal control and estimation-Dover Publications", Robert F. Stengel, (1994) page 360 + pub fn new(p: MatrixN) -> Option { + let n = p.ncols(); + let n_dim = p.data.shape().1; + + let mut d = VectorN::zeros_generic(n_dim, U1); + let mut u = MatrixN::zeros_generic(n_dim, n_dim); + + d[n - 1] = p[(n - 1, n - 1)]; + + if d[n - 1].is_zero() { + return None; + } + + u.column_mut(n - 1) + .axpy(N::one() / d[n - 1], &p.column(n - 1), N::zero()); + + for j in (0..n - 1).rev() { + let mut d_j = d[j]; + for k in j + 1..n { + d_j += d[k] * u[(j, k)].powi(2); + } + + d[j] = p[(j, j)] - d_j; + + if d[j].is_zero() { + return None; + } + + for i in (0..=j).rev() { + let mut u_ij = u[(i, j)]; + for k in j + 1..n { + u_ij += d[k] * u[(j, k)] * u[(i, k)]; + } + + u[(i, j)] = (p[(i, j)] - u_ij) / d[j]; + } + + u[(j, j)] = N::one(); + } + + Some(Self { u, d }) + } + + /// Returns the diagonal elements as a matrix + pub fn d_matrix(&self) -> MatrixN { + MatrixN::from_diagonal(&self.d) + } +} diff --git a/src/proptest/mod.rs b/src/proptest/mod.rs new file mode 100644 index 00000000..ae263956 --- /dev/null +++ b/src/proptest/mod.rs @@ -0,0 +1,476 @@ +//! `proptest`-related features for `nalgebra` data structures. +//! +//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**. +//! +//! `proptest` is a library for *property-based testing*. While similar to QuickCheck, +//! which may be more familiar to some users, it has a more sophisticated design that +//! provides users with automatic invariant-preserving shrinking. This means that when using +//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult - +//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical +//! `Arbitrary` trait implementation like QuickCheck, though it does also provide this. For +//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/) +//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html). +//! +//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in +//! `proptest` tests. At present, this integration is at an early stage, and only +//! provides tools for generating matrices and vectors, and not any of the geometry types. +//! There are essentially two ways of using this functionality: +//! +//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints +//! on dimensions and elements. +//! - Relying on the `Arbitrary` implementation of `MatrixMN`. +//! +//! The first variant is almost always preferred in practice. Read on to discover why. +//! +//! ### Using free function strategies +//! +//! In `proptest`, it is usually preferable to have free functions that generate *strategies*. +//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for +//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used: +//! ```rust +//! use nalgebra::proptest::matrix; +//! use proptest::prelude::*; +//! +//! proptest! { +//! # /* +//! #[test] +//! # */ +//! fn my_test(a in matrix(-5 ..= 5, 2 ..= 4, 1..=4)) { +//! // Generates matrices with elements in the range -5 ..= 5, rows in 2..=4 and +//! // columns in 1..=4. +//! } +//! } +//! +//! # fn main() { my_test(); } +//! ``` +//! +//! In the above example, we generate matrices with constraints on the elements, as well as the +//! on the allowed dimensions. When a failing example is found, the resulting shrinking process +//! will preserve these invariants. We can use this to compose more advanced strategies. +//! For example, let's consider a toy example where we need to generate pairs of matrices +//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the +//! number of columns to vary. One way to do this is to use `proptest` combinators in combination +//! with [matrix](fn.matrix.html) as follows: +//! +//! ```rust +//! use nalgebra::{Dynamic, MatrixMN, U3}; +//! use nalgebra::proptest::matrix; +//! use proptest::prelude::*; +//! +//! type MyMatrix = MatrixMN; +//! +//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of +//! /// columns. +//! fn matrix_pairs() -> impl Strategy { +//! matrix(-5 ..= 5, U3, 0 ..= 10) +//! // We first generate the initial matrix `a`, and then depending on the concrete +//! // instances of `a`, we pick a second matrix with the same number of columns +//! .prop_flat_map(|a| { +//! let b = matrix(-5 .. 5, U3, a.ncols()); +//! // This returns a new tuple strategy where we keep `a` fixed while +//! // the second item is a strategy that generates instances with the same +//! // dimensions as `a` +//! (Just(a), b) +//! }) +//! } +//! +//! proptest! { +//! # /* +//! #[test] +//! # */ +//! fn my_test((a, b) in matrix_pairs()) { +//! // Let's double-check that the two matrices do indeed have the same number of +//! // columns +//! prop_assert_eq!(a.ncols(), b.ncols()); +//! } +//! } +//! +//! # fn main() { my_test(); } +//! ``` +//! +//! ### The `Arbitrary` implementation +//! +//! If you don't care about the dimensions of matrices, you can write tests like these: +//! +//! ```rust +//! use nalgebra::{DMatrix, DVector, Dynamic, Matrix3, MatrixMN, Vector3, U3}; +//! use proptest::prelude::*; +//! +//! proptest! { +//! # /* +//! #[test] +//! # */ +//! fn test_dynamic(matrix: DMatrix) { +//! // This will generate arbitrary instances of `DMatrix` and also attempt +//! // to shrink/simplify them when test failures are encountered. +//! } +//! +//! # /* +//! #[test] +//! # */ +//! fn test_static_and_mixed(matrix: Matrix3, matrix2: MatrixMN) { +//! // Test some property involving these matrices +//! } +//! +//! # /* +//! #[test] +//! # */ +//! fn test_vectors(fixed_size_vector: Vector3, dyn_vector: DVector) { +//! // Test some property involving these vectors +//! } +//! } +//! +//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); } +//! ``` +//! +//! While this may be convenient, the default strategies for built-in types in `proptest` can +//! generate *any* number, including integers large enough to easily lead to overflow when used in +//! matrix operations, or even infinity or NaN values for floating-point types. Therefore +//! `Arbitrary` is rarely the method of choice for writing property-based tests. +//! +//! ### Notes on shrinking +//! +//! Due to some limitations of the current implementation, shrinking takes place by first +//! shrinking the matrix elements before trying to shrink the dimensions of the matrix. +//! This unfortunately often leads to the fact that a large number of shrinking iterations +//! are necessary to find a (nearly) minimal failing test case. As a workaround for this, +//! you can increase the maximum number of shrinking iterations when debugging. To do this, +//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example: +//! +//! ```text +//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test +//! ``` +use crate::allocator::Allocator; +use crate::{DefaultAllocator, Dim, DimName, Dynamic, MatrixMN, Scalar, U1}; +use proptest::arbitrary::Arbitrary; +use proptest::collection::vec; +use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree}; +use proptest::test_runner::TestRunner; + +use std::ops::RangeInclusive; + +/// Parameters for arbitrary matrix generation. +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct MatrixParameters { + /// The range of rows that may be generated. + pub rows: DimRange, + /// The range of columns that may be generated. + pub cols: DimRange, + /// Parameters for the `Arbitrary` implementation of the scalar values. + pub value_parameters: NParameters, +} + +/// A range of allowed dimensions for use in generation of matrices. +/// +/// The `DimRange` type is used to encode the range of dimensions that can be used for generation +/// of matrices with `proptest`. In most cases, you do not need to concern yourself with +/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive +/// ranges such as `5 ..= 6`. The latter example corresponds to dimensions from (inclusive) +/// `Dynamic::new(5)` to `Dynamic::new(6)` (inclusive). +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DimRange(RangeInclusive); + +impl DimRange { + /// The lower bound for dimensions generated. + pub fn lower_bound(&self) -> D { + *self.0.start() + } + + /// The upper bound for dimensions generated. + pub fn upper_bound(&self) -> D { + *self.0.end() + } +} + +impl From for DimRange { + fn from(dim: D) -> Self { + DimRange(dim..=dim) + } +} + +impl From> for DimRange { + fn from(range: RangeInclusive) -> Self { + DimRange(range) + } +} + +impl From> for DimRange { + fn from(range: RangeInclusive) -> Self { + DimRange::from(Dynamic::new(*range.start())..=Dynamic::new(*range.end())) + } +} + +impl DimRange { + /// Converts the `DimRange` into an instance of `RangeInclusive`. + pub fn to_range_inclusive(&self) -> RangeInclusive { + self.lower_bound().value()..=self.upper_bound().value() + } +} + +impl From for DimRange { + fn from(dim: usize) -> Self { + DimRange::from(Dynamic::new(dim)) + } +} + +/// The default range used for Dynamic dimensions when generating arbitrary matrices. +fn dynamic_dim_range() -> DimRange { + DimRange::from(0..=6) +} + +/// Create a strategy to generate matrices containing values drawn from the given strategy, +/// with rows and columns in the provided ranges. +/// +/// ## Examples +/// ``` +/// use nalgebra::proptest::matrix; +/// use nalgebra::{MatrixMN, U3, Dynamic}; +/// use proptest::prelude::*; +/// +/// proptest! { +/// # /* +/// #[test] +/// # */ +/// fn my_test(a in matrix(0 .. 5i32, U3, 0 ..= 5)) { +/// // Let's make sure we've got the correct type first +/// let a: MatrixMN<_, U3, Dynamic> = a; +/// prop_assert!(a.nrows() == 3); +/// prop_assert!(a.ncols() <= 5); +/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5)); +/// } +/// } +/// +/// # fn main() { my_test(); } +/// ``` +/// +/// ## Limitations +/// The current implementation has some limitations that lead to suboptimal shrinking behavior. +/// See the [module-level documentation](index.html) for more. +pub fn matrix( + value_strategy: ScalarStrategy, + rows: impl Into>, + cols: impl Into>, +) -> MatrixStrategy +where + ScalarStrategy: Strategy + Clone + 'static, + ScalarStrategy::Value: Scalar, + R: Dim, + C: Dim, + DefaultAllocator: Allocator, +{ + matrix_(value_strategy, rows.into(), cols.into()) +} + +/// Same as `matrix`, but without the additional anonymous generic types +fn matrix_( + value_strategy: ScalarStrategy, + rows: DimRange, + cols: DimRange, +) -> MatrixStrategy +where + ScalarStrategy: Strategy + Clone + 'static, + ScalarStrategy::Value: Scalar, + R: Dim, + C: Dim, + DefaultAllocator: Allocator, +{ + let nrows = rows.lower_bound().value()..=rows.upper_bound().value(); + let ncols = cols.lower_bound().value()..=cols.upper_bound().value(); + + // Even though we can use this function to generate fixed-size matrices, + // we currently generate all matrices with heap allocated Vec data. + // TODO: Avoid heap allocation for fixed-size matrices. + // Doing this *properly* would probably require us to implement a custom + // strategy and valuetree with custom shrinking logic, which is not trivial + + // Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to. + // Shrinking in proptest basically happens in "reverse" of the combinators, so + // by first generating the dimensions and then the elements, we get shrinking that first + // tries to completely shrink the individual elements before trying to reduce the dimension. + // This is clearly the opposite of what we want. I can't find any good way around this + // short of writing our own custom value tree, which we should probably do at some point. + // TODO: Custom implementation of value tree for better shrinking behavior. + + let strategy = nrows + .prop_flat_map(move |nrows| (Just(nrows), ncols.clone())) + .prop_flat_map(move |(nrows, ncols)| { + ( + Just(nrows), + Just(ncols), + vec(value_strategy.clone(), nrows * ncols), + ) + }) + .prop_map(|(nrows, ncols, values)| { + // Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type. + // However, this should never fail, because we should only be generating + // this stuff in the first place + MatrixMN::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values) + }) + .boxed(); + + MatrixStrategy { strategy } +} + +/// Create a strategy to generate column vectors containing values drawn from the given strategy, +/// with length in the provided range. +/// +/// This is a convenience function for calling +/// [matrix(value_strategy, length, U1)](fn.matrix.html) and should +/// be used when you only want to generate column vectors, as it's simpler and makes the intent +/// clear. +pub fn vector( + value_strategy: ScalarStrategy, + length: impl Into>, +) -> MatrixStrategy +where + ScalarStrategy: Strategy + Clone + 'static, + ScalarStrategy::Value: Scalar, + D: Dim, + DefaultAllocator: Allocator, +{ + matrix_(value_strategy, length.into(), U1.into()) +} + +impl Default for MatrixParameters +where + NParameters: Default, + R: DimName, + C: DimName, +{ + fn default() -> Self { + Self { + rows: DimRange::from(R::name()), + cols: DimRange::from(C::name()), + value_parameters: NParameters::default(), + } + } +} + +impl Default for MatrixParameters +where + NParameters: Default, + R: DimName, +{ + fn default() -> Self { + Self { + rows: DimRange::from(R::name()), + cols: dynamic_dim_range(), + value_parameters: NParameters::default(), + } + } +} + +impl Default for MatrixParameters +where + NParameters: Default, + C: DimName, +{ + fn default() -> Self { + Self { + rows: dynamic_dim_range(), + cols: DimRange::from(C::name()), + value_parameters: NParameters::default(), + } + } +} + +impl Default for MatrixParameters +where + NParameters: Default, +{ + fn default() -> Self { + Self { + rows: dynamic_dim_range(), + cols: dynamic_dim_range(), + value_parameters: NParameters::default(), + } + } +} + +impl Arbitrary for MatrixMN +where + N: Scalar + Arbitrary, + ::Strategy: Clone, + R: Dim, + C: Dim, + MatrixParameters: Default, + DefaultAllocator: Allocator, +{ + type Parameters = MatrixParameters; + + fn arbitrary_with(args: Self::Parameters) -> Self::Strategy { + let value_strategy = N::arbitrary_with(args.value_parameters); + matrix(value_strategy, args.rows, args.cols) + } + + type Strategy = MatrixStrategy; +} + +/// A strategy for generating matrices. +#[derive(Debug, Clone)] +pub struct MatrixStrategy +where + NStrategy: Strategy, + NStrategy::Value: Scalar, + DefaultAllocator: Allocator, +{ + // For now we only internally hold a boxed strategy. The reason for introducing this + // separate wrapper struct is so that we can replace the strategy logic with custom logic + // later down the road without introducing significant breaking changes + strategy: BoxedStrategy>, +} + +impl Strategy for MatrixStrategy +where + NStrategy: Strategy, + NStrategy::Value: Scalar, + R: Dim, + C: Dim, + DefaultAllocator: Allocator, +{ + type Tree = MatrixValueTree; + type Value = MatrixMN; + + fn new_tree(&self, runner: &mut TestRunner) -> NewTree { + let underlying_tree = self.strategy.new_tree(runner)?; + Ok(MatrixValueTree { + value_tree: underlying_tree, + }) + } +} + +/// A value tree for matrices. +pub struct MatrixValueTree +where + N: Scalar, + R: Dim, + C: Dim, + DefaultAllocator: Allocator, +{ + // For now we only wrap a boxed value tree. The reason for wrapping is that this allows us + // to swap out the value tree logic down the road without significant breaking changes. + value_tree: Box>>, +} + +impl ValueTree for MatrixValueTree +where + N: Scalar, + R: Dim, + C: Dim, + DefaultAllocator: Allocator, +{ + type Value = MatrixMN; + + fn current(&self) -> Self::Value { + self.value_tree.current() + } + + fn simplify(&mut self) -> bool { + self.value_tree.simplify() + } + + fn complicate(&mut self) -> bool { + self.value_tree.complicate() + } +} diff --git a/src/sparse/cs_matrix.rs b/src/sparse/cs_matrix.rs index 45a2bbf7..3b056ab7 100644 --- a/src/sparse/cs_matrix.rs +++ b/src/sparse/cs_matrix.rs @@ -460,7 +460,7 @@ where { // Size = R let nrows = self.data.shape().0; - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows, U1) }; + let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows, U1) }; self.sort_with_workspace(workspace.as_mut_slice()); } diff --git a/src/sparse/cs_matrix_cholesky.rs b/src/sparse/cs_matrix_cholesky.rs index 277f9316..1a0c15dc 100644 --- a/src/sparse/cs_matrix_cholesky.rs +++ b/src/sparse/cs_matrix_cholesky.rs @@ -48,8 +48,10 @@ where let (l, u) = Self::nonzero_pattern(m); // Workspaces. - let work_x = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; - let work_c = unsafe { VectorN::new_uninitialized_generic(m.data.shape().1, U1) }; + let work_x = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; + let work_c = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().1, U1) }; let mut original_p = m.data.p.as_slice().to_vec(); original_p.push(m.data.i.len()); @@ -291,7 +293,8 @@ where let etree = Self::elimination_tree(m); let (nrows, ncols) = m.data.shape(); let mut rows = Vec::with_capacity(m.len()); - let mut cols = unsafe { VectorN::new_uninitialized_generic(m.data.shape().0, U1) }; + let mut cols = + unsafe { crate::unimplemented_or_uninitialized_generic!(m.data.shape().0, U1) }; let mut marks = Vec::new(); // NOTE: the following will actually compute the non-zero pattern of diff --git a/src/sparse/cs_matrix_ops.rs b/src/sparse/cs_matrix_ops.rs index 803bc61f..a440882c 100644 --- a/src/sparse/cs_matrix_ops.rs +++ b/src/sparse/cs_matrix_ops.rs @@ -242,7 +242,7 @@ where let mut res = CsMatrix::new_uninitialized_generic(nrows1, ncols2, self.len() + rhs.len()); let mut timestamps = VectorN::zeros_generic(nrows1, U1); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(nrows1, U1) }; + let mut workspace = unsafe { crate::unimplemented_or_uninitialized_generic!(nrows1, U1) }; let mut nz = 0; for j in 0..ncols2.value() { diff --git a/src/sparse/cs_matrix_solve.rs b/src/sparse/cs_matrix_solve.rs index 73b50db3..bc7c5bc7 100644 --- a/src/sparse/cs_matrix_solve.rs +++ b/src/sparse/cs_matrix_solve.rs @@ -149,7 +149,8 @@ impl> CsMatrix { self.lower_triangular_reach(b, &mut reach); // We sort the reach so the result matrix has sorted indices. reach.sort(); - let mut workspace = unsafe { VectorN::new_uninitialized_generic(b.data.shape().0, U1) }; + let mut workspace = + unsafe { crate::unimplemented_or_uninitialized_generic!(b.data.shape().0, U1) }; for i in reach.iter().cloned() { workspace[i] = N::zero(); diff --git a/tests/core/blas.rs b/tests/core/blas.rs index 9b7be4af..ef5c1c2f 100644 --- a/tests/core/blas.rs +++ b/tests/core/blas.rs @@ -21,19 +21,20 @@ fn gemm_noncommutative() { assert_eq!(res, Matrix2::zero()); } -#[cfg(feature = "arbitrary")] -mod blas_quickcheck { +#[cfg(feature = "proptest-support")] +mod blas_proptest { + use crate::proptest::{PROPTEST_F64, PROPTEST_MATRIX_DIM}; use na::{DMatrix, DVector}; - use std::cmp; + use proptest::{prop_assert, proptest}; - quickcheck! { + proptest! { /* * * Symmetric operators. * */ - fn gemv_symm(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); + #[test] + fn gemv_symm(n in PROPTEST_MATRIX_DIM, alpha in PROPTEST_F64, beta in PROPTEST_F64) { let a = DMatrix::::new_random(n, n); let a = &a * a.transpose(); @@ -44,18 +45,16 @@ mod blas_quickcheck { y1.gemv(alpha, &a, &x, beta); y2.sygemv(alpha, &a.lower_triangle(), &x, beta); - if !relative_eq!(y1, y2, epsilon = 1.0e-10) { - return false; - } + prop_assert!(relative_eq!(y1, y2, epsilon = 1.0e-10)); y1.gemv(alpha, &a, &x, 0.0); y2.sygemv(alpha, &a.lower_triangle(), &x, 0.0); - relative_eq!(y1, y2, epsilon = 1.0e-10) + prop_assert!(relative_eq!(y1, y2, epsilon = 1.0e-10)) } - fn gemv_tr(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); + #[test] + fn gemv_tr(n in PROPTEST_MATRIX_DIM, alpha in PROPTEST_F64, beta in PROPTEST_F64) { let a = DMatrix::::new_random(n, n); let x = DVector::new_random(n); let mut y1 = DVector::new_random(n); @@ -64,18 +63,16 @@ mod blas_quickcheck { y1.gemv(alpha, &a, &x, beta); y2.gemv_tr(alpha, &a.transpose(), &x, beta); - if !relative_eq!(y1, y2, epsilon = 1.0e-10) { - return false; - } + prop_assert!(relative_eq!(y1, y2, epsilon = 1.0e-10)); y1.gemv(alpha, &a, &x, 0.0); y2.gemv_tr(alpha, &a.transpose(), &x, 0.0); - relative_eq!(y1, y2, epsilon = 1.0e-10) + prop_assert!(relative_eq!(y1, y2, epsilon = 1.0e-10)) } - fn ger_symm(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); + #[test] + fn ger_symm(n in PROPTEST_MATRIX_DIM, alpha in PROPTEST_F64, beta in PROPTEST_F64) { let a = DMatrix::::new_random(n, n); let mut a1 = &a * a.transpose(); let mut a2 = a1.lower_triangle(); @@ -86,18 +83,16 @@ mod blas_quickcheck { a1.ger(alpha, &x, &y, beta); a2.syger(alpha, &x, &y, beta); - if !relative_eq!(a1.lower_triangle(), a2) { - return false; - } + prop_assert!(relative_eq!(a1.lower_triangle(), a2)); a1.ger(alpha, &x, &y, 0.0); a2.syger(alpha, &x, &y, 0.0); - relative_eq!(a1.lower_triangle(), a2) + prop_assert!(relative_eq!(a1.lower_triangle(), a2)) } - fn quadform(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); + #[test] + fn quadform(n in PROPTEST_MATRIX_DIM, alpha in PROPTEST_F64, beta in PROPTEST_F64) { let rhs = DMatrix::::new_random(6, n); let mid = DMatrix::::new_random(6, 6); let mut res = DMatrix::new_random(n, n); @@ -106,13 +101,11 @@ mod blas_quickcheck { res.quadform(alpha, &mid, &rhs, beta); - println!("{}{}", res, expected); - - relative_eq!(res, expected, epsilon = 1.0e-7) + prop_assert!(relative_eq!(res, expected, epsilon = 1.0e-7)) } - fn quadform_tr(n: usize, alpha: f64, beta: f64) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); + #[test] + fn quadform_tr(n in PROPTEST_MATRIX_DIM, alpha in PROPTEST_F64, beta in PROPTEST_F64) { let lhs = DMatrix::::new_random(6, n); let mid = DMatrix::::new_random(n, n); let mut res = DMatrix::new_random(6, 6); @@ -121,9 +114,7 @@ mod blas_quickcheck { res.quadform_tr(alpha, &lhs, &mid , beta); - println!("{}{}", res, expected); - - relative_eq!(res, expected, epsilon = 1.0e-7) + prop_assert!(relative_eq!(res, expected, epsilon = 1.0e-7)) } } } diff --git a/tests/core/conversion.rs b/tests/core/conversion.rs index b7a8c5f8..93545004 100644 --- a/tests/core/conversion.rs +++ b/tests/core/conversion.rs @@ -1,44 +1,49 @@ -#![cfg(all(feature = "arbitrary", feature = "alga"))] +#![cfg(all(feature = "proptest-support", feature = "alga"))] use alga::linear::Transformation; use na::{ self, Affine3, Isometry3, Matrix2, Matrix2x3, Matrix2x4, Matrix2x5, Matrix2x6, Matrix3, Matrix3x2, Matrix3x4, Matrix3x5, Matrix3x6, Matrix4, Matrix4x2, Matrix4x3, Matrix4x5, Matrix4x6, Matrix5, Matrix5x2, Matrix5x3, Matrix5x4, Matrix5x6, Matrix6, Matrix6x2, Matrix6x3, - Matrix6x4, Matrix6x5, Point3, Projective3, Rotation3, RowVector1, RowVector2, RowVector3, - RowVector4, RowVector5, RowVector6, Similarity3, Transform3, Translation3, UnitQuaternion, - Vector1, Vector2, Vector3, Vector4, Vector5, Vector6, + Matrix6x4, Matrix6x5, Projective3, Rotation3, RowVector1, RowVector2, RowVector3, RowVector4, + RowVector5, RowVector6, Similarity3, Transform3, UnitQuaternion, Vector1, Vector2, Vector3, + Vector4, Vector5, Vector6, }; use na::{DMatrix, DMatrixSlice, DMatrixSliceMut, MatrixSlice, MatrixSliceMut}; use na::{U1, U3, U4}; -quickcheck! { - fn translation_conversion(t: Translation3, v: Vector3, p: Point3) -> bool { +use crate::proptest::*; +use proptest::{prop_assert, prop_assert_eq, proptest}; + +proptest! { + #[test] + fn translation_conversion(t in translation3(), v in vector3(), p in point3()) { let iso: Isometry3 = na::convert(t); let sim: Similarity3 = na::convert(t); let aff: Affine3 = na::convert(t); let prj: Projective3 = na::convert(t); let tr: Transform3 = na::convert(t); - t == na::try_convert(iso).unwrap() && - t == na::try_convert(sim).unwrap() && - t == na::try_convert(aff).unwrap() && - t == na::try_convert(prj).unwrap() && - t == na::try_convert(tr).unwrap() && + prop_assert_eq!(t, na::try_convert(iso).unwrap()); + prop_assert_eq!(t, na::try_convert(sim).unwrap()); + prop_assert_eq!(t, na::try_convert(aff).unwrap()); + prop_assert_eq!(t, na::try_convert(prj).unwrap()); + prop_assert_eq!(t, na::try_convert(tr).unwrap() ); - t.transform_vector(&v) == iso * v && - t.transform_vector(&v) == sim * v && - t.transform_vector(&v) == aff * v && - t.transform_vector(&v) == prj * v && - t.transform_vector(&v) == tr * v && + prop_assert_eq!(t.transform_vector(&v), iso * v); + prop_assert_eq!(t.transform_vector(&v), sim * v); + prop_assert_eq!(t.transform_vector(&v), aff * v); + prop_assert_eq!(t.transform_vector(&v), prj * v); + prop_assert_eq!(t.transform_vector(&v), tr * v); - t * p == iso * p && - t * p == sim * p && - t * p == aff * p && - t * p == prj * p && - t * p == tr * p + prop_assert_eq!(t * p, iso * p); + prop_assert_eq!(t * p, sim * p); + prop_assert_eq!(t * p, aff * p); + prop_assert_eq!(t * p, prj * p); + prop_assert_eq!(t * p, tr * p); } - fn rotation_conversion(r: Rotation3, v: Vector3, p: Point3) -> bool { + #[test] + fn rotation_conversion(r in rotation3(), v in vector3(), p in point3()) { let uq: UnitQuaternion = na::convert(r); let iso: Isometry3 = na::convert(r); let sim: Similarity3 = na::convert(r); @@ -46,30 +51,31 @@ quickcheck! { let prj: Projective3 = na::convert(r); let tr: Transform3 = na::convert(r); - relative_eq!(r, na::try_convert(uq).unwrap(), epsilon = 1.0e-7) && - relative_eq!(r, na::try_convert(iso).unwrap(), epsilon = 1.0e-7) && - relative_eq!(r, na::try_convert(sim).unwrap(), epsilon = 1.0e-7) && - r == na::try_convert(aff).unwrap() && - r == na::try_convert(prj).unwrap() && - r == na::try_convert(tr).unwrap() && + prop_assert!(relative_eq!(r, na::try_convert(uq).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r, na::try_convert(iso).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r, na::try_convert(sim).unwrap(), epsilon = 1.0e-7)); + prop_assert_eq!(r, na::try_convert(aff).unwrap()); + prop_assert_eq!(r, na::try_convert(prj).unwrap()); + prop_assert_eq!(r, na::try_convert(tr).unwrap() ); // NOTE: we need relative_eq because Isometry and Similarity use quaternions. - relative_eq!(r * v, uq * v, epsilon = 1.0e-7) && - relative_eq!(r * v, iso * v, epsilon = 1.0e-7) && - relative_eq!(r * v, sim * v, epsilon = 1.0e-7) && - r * v == aff * v && - r * v == prj * v && - r * v == tr * v && + prop_assert!(relative_eq!(r * v, uq * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r * v, iso * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r * v, sim * v, epsilon = 1.0e-7)); + prop_assert_eq!(r * v, aff * v); + prop_assert_eq!(r * v, prj * v); + prop_assert_eq!(r * v, tr * v); - relative_eq!(r * p, uq * p, epsilon = 1.0e-7) && - relative_eq!(r * p, iso * p, epsilon = 1.0e-7) && - relative_eq!(r * p, sim * p, epsilon = 1.0e-7) && - r * p == aff * p && - r * p == prj * p && - r * p == tr * p + prop_assert!(relative_eq!(r * p, uq * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r * p, iso * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r * p, sim * p, epsilon = 1.0e-7)); + prop_assert_eq!(r * p, aff * p); + prop_assert_eq!(r * p, prj * p); + prop_assert_eq!(r * p, tr * p); } - fn unit_quaternion_conversion(uq: UnitQuaternion, v: Vector3, p: Point3) -> bool { + #[test] + fn unit_quaternion_conversion(uq in unit_quaternion(), v in vector3(), p in point3()) { let rot: Rotation3 = na::convert(uq); let iso: Isometry3 = na::convert(uq); let sim: Similarity3 = na::convert(uq); @@ -77,68 +83,70 @@ quickcheck! { let prj: Projective3 = na::convert(uq); let tr: Transform3 = na::convert(uq); - uq == na::try_convert(iso).unwrap() && - uq == na::try_convert(sim).unwrap() && - relative_eq!(uq, na::try_convert(rot).unwrap(), epsilon = 1.0e-7) && - relative_eq!(uq, na::try_convert(aff).unwrap(), epsilon = 1.0e-7) && - relative_eq!(uq, na::try_convert(prj).unwrap(), epsilon = 1.0e-7) && - relative_eq!(uq, na::try_convert(tr).unwrap(), epsilon = 1.0e-7) && + prop_assert_eq!(uq, na::try_convert(iso).unwrap()); + prop_assert_eq!(uq, na::try_convert(sim).unwrap()); + prop_assert!(relative_eq!(uq, na::try_convert(rot).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq, na::try_convert(aff).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq, na::try_convert(prj).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq, na::try_convert(tr).unwrap(), epsilon = 1.0e-7) ); // NOTE: iso and sim use unit quaternions for the rotation so conversions to them are exact. - relative_eq!(uq * v, rot * v, epsilon = 1.0e-7) && - uq * v == iso * v && - uq * v == sim * v && - relative_eq!(uq * v, aff * v, epsilon = 1.0e-7) && - relative_eq!(uq * v, prj * v, epsilon = 1.0e-7) && - relative_eq!(uq * v, tr * v, epsilon = 1.0e-7) && + prop_assert!(relative_eq!(uq * v, rot * v, epsilon = 1.0e-7)); + prop_assert_eq!(uq * v, iso * v); + prop_assert_eq!(uq * v, sim * v); + prop_assert!(relative_eq!(uq * v, aff * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq * v, prj * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq * v, tr * v, epsilon = 1.0e-7)); - relative_eq!(uq * p, rot * p, epsilon = 1.0e-7) && - uq * p == iso * p && - uq * p == sim * p && - relative_eq!(uq * p, aff * p, epsilon = 1.0e-7) && - relative_eq!(uq * p, prj * p, epsilon = 1.0e-7) && - relative_eq!(uq * p, tr * p, epsilon = 1.0e-7) + prop_assert!(relative_eq!(uq * p, rot * p, epsilon = 1.0e-7)); + prop_assert_eq!(uq * p, iso * p); + prop_assert_eq!(uq * p, sim * p); + prop_assert!(relative_eq!(uq * p, aff * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq * p, prj * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(uq * p, tr * p, epsilon = 1.0e-7)); } - fn isometry_conversion(iso: Isometry3, v: Vector3, p: Point3) -> bool { + #[test] + fn isometry_conversion(iso in isometry3(), v in vector3(), p in point3()) { let sim: Similarity3 = na::convert(iso); let aff: Affine3 = na::convert(iso); let prj: Projective3 = na::convert(iso); let tr: Transform3 = na::convert(iso); - iso == na::try_convert(sim).unwrap() && - relative_eq!(iso, na::try_convert(aff).unwrap(), epsilon = 1.0e-7) && - relative_eq!(iso, na::try_convert(prj).unwrap(), epsilon = 1.0e-7) && - relative_eq!(iso, na::try_convert(tr).unwrap(), epsilon = 1.0e-7) && + prop_assert_eq!(iso, na::try_convert(sim).unwrap()); + prop_assert!(relative_eq!(iso, na::try_convert(aff).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso, na::try_convert(prj).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso, na::try_convert(tr).unwrap(), epsilon = 1.0e-7) ); - iso * v == sim * v && - relative_eq!(iso * v, aff * v, epsilon = 1.0e-7) && - relative_eq!(iso * v, prj * v, epsilon = 1.0e-7) && - relative_eq!(iso * v, tr * v, epsilon = 1.0e-7) && + prop_assert_eq!(iso * v, sim * v); + prop_assert!(relative_eq!(iso * v, aff * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso * v, prj * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso * v, tr * v, epsilon = 1.0e-7)); - iso * p == sim * p && - relative_eq!(iso * p, aff * p, epsilon = 1.0e-7) && - relative_eq!(iso * p, prj * p, epsilon = 1.0e-7) && - relative_eq!(iso * p, tr * p, epsilon = 1.0e-7) + prop_assert_eq!(iso * p, sim * p); + prop_assert!(relative_eq!(iso * p, aff * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso * p, prj * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso * p, tr * p, epsilon = 1.0e-7)); } - fn similarity_conversion(sim: Similarity3, v: Vector3, p: Point3) -> bool { + #[test] + fn similarity_conversion(sim in similarity3(), v in vector3(), p in point3()) { let aff: Affine3 = na::convert(sim); let prj: Projective3 = na::convert(sim); let tr: Transform3 = na::convert(sim); - relative_eq!(sim, na::try_convert(aff).unwrap(), epsilon = 1.0e-7) && - relative_eq!(sim, na::try_convert(prj).unwrap(), epsilon = 1.0e-7) && - relative_eq!(sim, na::try_convert(tr).unwrap(), epsilon = 1.0e-7) && + prop_assert!(relative_eq!(sim, na::try_convert(aff).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(sim, na::try_convert(prj).unwrap(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(sim, na::try_convert(tr).unwrap(), epsilon = 1.0e-7)); - relative_eq!(sim * v, aff * v, epsilon = 1.0e-7) && - relative_eq!(sim * v, prj * v, epsilon = 1.0e-7) && - relative_eq!(sim * v, tr * v, epsilon = 1.0e-7) && + prop_assert!(relative_eq!(sim * v, aff * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(sim * v, prj * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(sim * v, tr * v, epsilon = 1.0e-7)); - relative_eq!(sim * p, aff * p, epsilon = 1.0e-7) && - relative_eq!(sim * p, prj * p, epsilon = 1.0e-7) && - relative_eq!(sim * p, tr * p, epsilon = 1.0e-7) + prop_assert!(relative_eq!(sim * p, aff * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(sim * p, prj * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(sim * p, tr * p, epsilon = 1.0e-7)); } // XXX test Transform diff --git a/tests/core/helper.rs b/tests/core/helper.rs index 42938580..ef749da6 100644 --- a/tests/core/helper.rs +++ b/tests/core/helper.rs @@ -12,7 +12,7 @@ pub struct RandComplex(pub Complex); impl Arbitrary for RandComplex { #[inline] - fn arbitrary(rng: &mut G) -> Self { + fn arbitrary(rng: &mut Gen) -> Self { let im = Arbitrary::arbitrary(rng); let re = Arbitrary::arbitrary(rng); RandComplex(Complex::new(re, im)) @@ -38,7 +38,7 @@ pub struct RandScalar(pub N); impl Arbitrary for RandScalar { #[inline] - fn arbitrary(rng: &mut G) -> Self { + fn arbitrary(rng: &mut Gen) -> Self { RandScalar(Arbitrary::arbitrary(rng)) } } diff --git a/tests/core/matrix.rs b/tests/core/matrix.rs index ed7b26d2..daa8b72f 100644 --- a/tests/core/matrix.rs +++ b/tests/core/matrix.rs @@ -21,6 +21,24 @@ fn iter() { assert_eq!(*it.next().unwrap(), 6.0); assert!(it.next().is_none()); + let mut it = a.iter(); + assert_eq!(*it.next().unwrap(), 1.0); + assert_eq!(*it.next_back().unwrap(), 6.0); + assert_eq!(*it.next_back().unwrap(), 3.0); + assert_eq!(*it.next_back().unwrap(), 5.0); + assert_eq!(*it.next().unwrap(), 4.0); + assert_eq!(*it.next().unwrap(), 2.0); + assert!(it.next().is_none()); + + let mut it = a.iter().rev(); + assert_eq!(*it.next().unwrap(), 6.0); + assert_eq!(*it.next().unwrap(), 3.0); + assert_eq!(*it.next().unwrap(), 5.0); + assert_eq!(*it.next().unwrap(), 2.0); + assert_eq!(*it.next().unwrap(), 4.0); + assert_eq!(*it.next().unwrap(), 1.0); + assert!(it.next().is_none()); + let row = a.row(0); let mut it = row.iter(); assert_eq!(*it.next().unwrap(), 1.0); @@ -811,151 +829,145 @@ fn swizzle() { assert_eq!(c.zyz(), Vector3::new(3.0, 2.0, 3.0)); } -#[cfg(feature = "arbitrary")] +#[cfg(feature = "proptest-support")] mod transposition_tests { use super::*; - use na::Matrix4x6; + use crate::proptest::{dmatrix, matrix, vector4, PROPTEST_F64}; + use na::{U2, U3, U4, U6}; + use proptest::{prop_assert, prop_assert_eq, proptest}; - quickcheck! { - fn transpose_transpose_is_self(m: Matrix2x3) -> bool { - m.transpose().transpose() == m + proptest! { + #[test] + fn transpose_transpose_is_self(m in matrix(PROPTEST_F64, U2, U3)) { + prop_assert_eq!(m.transpose().transpose(), m) } - fn transpose_mut_transpose_mut_is_self(m: Matrix3) -> bool { + #[test] + fn transpose_mut_transpose_mut_is_self(m in matrix(PROPTEST_F64, U3, U3)) { let mut mm = m; mm.transpose_mut(); mm.transpose_mut(); - m == mm + prop_assert_eq!(m, mm) } - fn transpose_transpose_is_id_dyn(m: DMatrix) -> bool { - m.transpose().transpose() == m + #[test] + fn transpose_transpose_is_id_dyn(m in dmatrix()) { + prop_assert_eq!(m.transpose().transpose(), m) } - fn check_transpose_components_dyn(m: DMatrix) -> bool { + #[test] + fn check_transpose_components_dyn(m in dmatrix()) { let tr = m.transpose(); let (nrows, ncols) = m.shape(); - if nrows != tr.shape().1 || ncols != tr.shape().0 { - return false - } + prop_assert!(nrows == tr.shape().1 && ncols == tr.shape().0); for i in 0 .. nrows { for j in 0 .. ncols { - if m[(i, j)] != tr[(j, i)] { - return false - } + prop_assert_eq!(m[(i, j)], tr[(j, i)]); } } - - true } - fn tr_mul_is_transpose_then_mul(m: Matrix4x6, v: Vector4) -> bool { - relative_eq!(m.transpose() * v, m.tr_mul(&v), epsilon = 1.0e-7) + #[test] + fn tr_mul_is_transpose_then_mul(m in matrix(PROPTEST_F64, U4, U6), v in vector4()) { + prop_assert!(relative_eq!(m.transpose() * v, m.tr_mul(&v), epsilon = 1.0e-7)) } } } -#[cfg(feature = "arbitrary")] +#[cfg(feature = "proptest-support")] mod inversion_tests { use super::*; + use crate::proptest::*; use na::Matrix1; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn self_mul_inv_is_id_dim1(m: Matrix1) -> bool { + proptest! { + #[test] + fn self_mul_inv_is_id_dim1(m in matrix1()) { if let Some(im) = m.try_inverse() { let id = Matrix1::one(); - relative_eq!(im * m, id, epsilon = 1.0e-7) && - relative_eq!(m * im, id, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(im * m, id, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * im, id, epsilon = 1.0e-7)); } } - fn self_mul_inv_is_id_dim2(m: Matrix2) -> bool { + #[test] + fn self_mul_inv_is_id_dim2(m in matrix2()) { if let Some(im) = m.try_inverse() { let id = Matrix2::one(); - relative_eq!(im * m, id, epsilon = 1.0e-7) && - relative_eq!(m * im, id, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(im * m, id, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * im, id, epsilon = 1.0e-7)); } } - fn self_mul_inv_is_id_dim3(m: Matrix3) -> bool { + #[test] + fn self_mul_inv_is_id_dim3(m in matrix3()) { if let Some(im) = m.try_inverse() { let id = Matrix3::one(); - relative_eq!(im * m, id, epsilon = 1.0e-7) && - relative_eq!(m * im, id, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(im * m, id, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * im, id, epsilon = 1.0e-7)); } } - fn self_mul_inv_is_id_dim4(m: Matrix4) -> bool { + #[test] + fn self_mul_inv_is_id_dim4(m in matrix4()) { if let Some(im) = m.try_inverse() { let id = Matrix4::one(); - relative_eq!(im * m, id, epsilon = 1.0e-7) && - relative_eq!(m * im, id, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(im * m, id, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * im, id, epsilon = 1.0e-7)); } } - fn self_mul_inv_is_id_dim6(m: Matrix6) -> bool { + #[test] + fn self_mul_inv_is_id_dim6(m in matrix6()) { if let Some(im) = m.try_inverse() { let id = Matrix6::one(); - relative_eq!(im * m, id, epsilon = 1.0e-7) && - relative_eq!(m * im, id, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(im * m, id, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * im, id, epsilon = 1.0e-7)); } } } } -#[cfg(feature = "arbitrary")] +#[cfg(feature = "proptest-support")] mod normalization_tests { - use super::*; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn normalized_vec_norm_is_one(v: Vector3) -> bool { + proptest! { + #[test] + fn normalized_vec_norm_is_one(v in vector3()) { if let Some(nv) = v.try_normalize(1.0e-10) { - relative_eq!(nv.norm(), 1.0, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(nv.norm(), 1.0, epsilon = 1.0e-7)); } } - fn normalized_vec_norm_is_one_dyn(v: DVector) -> bool { + #[test] + fn normalized_vec_norm_is_one_dyn(v in dvector()) { if let Some(nv) = v.try_normalize(1.0e-10) { - relative_eq!(nv.norm(), 1.0, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(nv.norm(), 1.0, epsilon = 1.0e-7)); } } } } -#[cfg(all(feature = "arbitrary", feature = "alga"))] -// TODO: move this to alga ? +#[cfg(all(feature = "proptest-support", feature = "alga"))] +// TODO: move this to alga ? mod finite_dim_inner_space_tests { use super::*; + use crate::proptest::*; use alga::linear::FiniteDimInnerSpace; + use proptest::collection::vec; + use proptest::{prop_assert, proptest}; use std::fmt::Display; macro_rules! finite_dim_inner_space_test( - ($($Vector: ident, $orthonormal_subspace: ident, $orthonormalization: ident);* $(;)*) => {$( - quickcheck!{ - fn $orthonormal_subspace(vs: Vec<$Vector>) -> bool { + ($($Vector: ident, $vstrategy: ident, $orthonormal_subspace: ident, $orthonormalization: ident);* $(;)*) => {$( + proptest! { + #[test] + fn $orthonormal_subspace(vs in vec($vstrategy(), 0..10)) { let mut given_basis = vs.clone(); let given_basis_dim = $Vector::orthonormalize(&mut given_basis[..]); let mut ortho_basis = Vec::new(); @@ -964,29 +976,21 @@ mod finite_dim_inner_space_tests { |e| { ortho_basis.push(*e); true } ); - if !is_subspace_basis(&ortho_basis[..]) { - return false; - } + prop_assert!(is_subspace_basis(&ortho_basis[..])); for v in vs { for b in &ortho_basis { - if !relative_eq!(v.dot(b), 0.0, epsilon = 1.0e-7) { - println!("Found dot product: {} · {} = {}", v, b, v.dot(b)); - return false; - } + prop_assert!(relative_eq!(v.dot(b), 0.0, epsilon = 1.0e-7)); } } - - true } - fn $orthonormalization(vs: Vec<$Vector>) -> bool { + #[test] + fn $orthonormalization(vs in vec($vstrategy(), 0..10)) { let mut basis = vs.clone(); let subdim = $Vector::orthonormalize(&mut basis[..]); - if !is_subspace_basis(&basis[.. subdim]) { - return false; - } + prop_assert!(is_subspace_basis(&basis[.. subdim])); for mut e in vs { for b in &basis[.. subdim] { @@ -994,26 +998,20 @@ mod finite_dim_inner_space_tests { } // Any element of `e` must be a linear combination of the basis elements. - if !relative_eq!(e.norm(), 0.0, epsilon = 1.0e-7) { - println!("Orthonormalization; element decomposition failure: {}", e); - println!("... the non-zero norm is: {}", e.norm()); - return false; - } + prop_assert!(relative_eq!(e.norm(), 0.0, epsilon = 1.0e-7)); } - - true } } )*} ); finite_dim_inner_space_test!( - Vector1, orthonormal_subspace_basis1, orthonormalize1; - Vector2, orthonormal_subspace_basis2, orthonormalize2; - Vector3, orthonormal_subspace_basis3, orthonormalize3; - Vector4, orthonormal_subspace_basis4, orthonormalize4; - Vector5, orthonormal_subspace_basis5, orthonormalize5; - Vector6, orthonormal_subspace_basis6, orthonormalize6; + Vector1, vector1, orthonormal_subspace_basis1, orthonormalize1; + Vector2, vector2, orthonormal_subspace_basis2, orthonormalize2; + Vector3, vector3, orthonormal_subspace_basis3, orthonormalize3; + Vector4, vector4, orthonormal_subspace_basis4, orthonormalize4; + Vector5, vector5, orthonormal_subspace_basis5, orthonormalize5; + Vector6, vector6, orthonormal_subspace_basis6, orthonormalize6; ); /* @@ -1021,7 +1019,6 @@ mod finite_dim_inner_space_tests { * Helper functions. * */ - #[cfg(feature = "arbitrary")] fn is_subspace_basis + Display>( vs: &[T], ) -> bool { diff --git a/tests/core/matrixcompare.rs b/tests/core/matrixcompare.rs index cdd93ea3..ab3ecc2c 100644 --- a/tests/core/matrixcompare.rs +++ b/tests/core/matrixcompare.rs @@ -4,34 +4,32 @@ //! The tests here only check that the necessary trait implementations are correctly implemented, //! in addition to some sanity checks with example input. +use matrixcompare::assert_matrix_eq; use nalgebra::{MatrixMN, U4, U5}; -#[cfg(feature = "arbitrary")] -use nalgebra::DMatrix; +#[cfg(feature = "proptest-support")] +use { + crate::proptest::*, + matrixcompare::DenseAccess, + nalgebra::DMatrix, + proptest::{prop_assert_eq, proptest}, +}; -use matrixcompare::assert_matrix_eq; - -#[cfg(feature = "arbitrary")] -use matrixcompare::DenseAccess; - -#[cfg(feature = "arbitrary")] -quickcheck! { - fn fetch_single_is_equivalent_to_index_f64(matrix: DMatrix) -> bool { +#[cfg(feature = "proptest-support")] +proptest! { + #[test] + fn fetch_single_is_equivalent_to_index_f64(matrix in dmatrix()) { for i in 0 .. matrix.nrows() { for j in 0 .. matrix.ncols() { - if matrix.fetch_single(i, j) != *matrix.index((i, j)) { - return false; - } + prop_assert_eq!(matrix.fetch_single(i, j), *matrix.index((i, j))); } } - - true } - fn matrixcompare_shape_agrees_with_matrix(matrix: DMatrix) -> bool { - matrix.nrows() == as matrixcompare::Matrix>::rows(&matrix) - && - matrix.ncols() == as matrixcompare::Matrix>::cols(&matrix) + #[test] + fn matrixcompare_shape_agrees_with_matrix(matrix in dmatrix()) { + prop_assert_eq!(matrix.nrows(), as matrixcompare::Matrix>::rows(&matrix)); + prop_assert_eq!(matrix.ncols(), as matrixcompare::Matrix>::cols(&matrix)); } } diff --git a/tests/geometry/dual_quaternion.rs b/tests/geometry/dual_quaternion.rs new file mode 100644 index 00000000..6cc975a5 --- /dev/null +++ b/tests/geometry/dual_quaternion.rs @@ -0,0 +1,210 @@ +#![cfg(feature = "proptest-support")] +#![allow(non_snake_case)] + +use na::{DualQuaternion, Point3, UnitDualQuaternion, Vector3}; + +use crate::proptest::*; +use proptest::{prop_assert, proptest}; + +proptest!( + #[test] + fn isometry_equivalence(iso in isometry3(), p in point3(), v in vector3()) { + let dq = UnitDualQuaternion::from_isometry(&iso); + + prop_assert!(relative_eq!(iso * p, dq * p, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(iso * v, dq * v, epsilon = 1.0e-7)); + } + + #[test] + fn inverse_is_identity(i in unit_dual_quaternion(), p in point3(), v in vector3()) { + let ii = i.inverse(); + + prop_assert!(relative_eq!(i * ii, UnitDualQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(ii * i, UnitDualQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) + && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) + && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) + && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7)); + } + + #[cfg_attr(rustfmt, rustfmt_skip)] + #[test] + fn multiply_equals_alga_transform( + dq in unit_dual_quaternion(), + v in vector3(), + p in point3() + ) { + prop_assert!(dq * v == dq.transform_vector(&v) + && dq * p == dq.transform_point(&p) + && relative_eq!( + dq.inverse() * v, + dq.inverse_transform_vector(&v), + epsilon = 1.0e-7 + ) + && relative_eq!( + dq.inverse() * p, + dq.inverse_transform_point(&p), + epsilon = 1.0e-7 + )); + } + + #[cfg_attr(rustfmt, rustfmt_skip)] + #[test] + fn composition( + dq in unit_dual_quaternion(), + uq in unit_quaternion(), + t in translation3(), + v in vector3(), + p in point3() + ) { + // (rotation × dual quaternion) * point = rotation × (dual quaternion * point) + prop_assert!(relative_eq!((uq * dq) * v, uq * (dq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * dq) * p, uq * (dq * p), epsilon = 1.0e-7)); + + // (dual quaternion × rotation) * point = dual quaternion × (rotation * point) + prop_assert!(relative_eq!((dq * uq) * v, dq * (uq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((dq * uq) * p, dq * (uq * p), epsilon = 1.0e-7)); + + // (translation × dual quaternion) * point = translation × (dual quaternion * point) + prop_assert!(relative_eq!((t * dq) * v, (dq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * dq) * p, t * (dq * p), epsilon = 1.0e-7)); + + // (dual quaternion × translation) * point = dual quaternion × (translation * point) + prop_assert!(relative_eq!((dq * t) * v, dq * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((dq * t) * p, dq * (t * p), epsilon = 1.0e-7)); + } + + #[cfg_attr(rustfmt, rustfmt_skip)] + #[test] + fn all_op_exist( + dq in dual_quaternion(), + udq in unit_dual_quaternion(), + uq in unit_quaternion(), + s in PROPTEST_F64, + t in translation3(), + v in vector3(), + p in point3() + ) { + let dqMs: DualQuaternion<_> = dq * s; + + let dqMdq: DualQuaternion<_> = dq * dq; + let dqMudq: DualQuaternion<_> = dq * udq; + let udqMdq: DualQuaternion<_> = udq * dq; + + let iMi: UnitDualQuaternion<_> = udq * udq; + let iMuq: UnitDualQuaternion<_> = udq * uq; + let iDi: UnitDualQuaternion<_> = udq / udq; + let iDuq: UnitDualQuaternion<_> = udq / uq; + + let iMp: Point3<_> = udq * p; + let iMv: Vector3<_> = udq * v; + + let iMt: UnitDualQuaternion<_> = udq * t; + let tMi: UnitDualQuaternion<_> = t * udq; + + let uqMi: UnitDualQuaternion<_> = uq * udq; + let uqDi: UnitDualQuaternion<_> = uq / udq; + + let mut dqMs1 = dq; + + let mut dqMdq1 = dq; + let mut dqMdq2 = dq; + + let mut dqMudq1 = dq; + let mut dqMudq2 = dq; + + let mut iMt1 = udq; + let mut iMt2 = udq; + + let mut iMi1 = udq; + let mut iMi2 = udq; + + let mut iMuq1 = udq; + let mut iMuq2 = udq; + + let mut iDi1 = udq; + let mut iDi2 = udq; + + let mut iDuq1 = udq; + let mut iDuq2 = udq; + + dqMs1 *= s; + + dqMdq1 *= dq; + dqMdq2 *= &dq; + + dqMudq1 *= udq; + dqMudq2 *= &udq; + + iMt1 *= t; + iMt2 *= &t; + + iMi1 *= udq; + iMi2 *= &udq; + + iMuq1 *= uq; + iMuq2 *= &uq; + + iDi1 /= udq; + iDi2 /= &udq; + + iDuq1 /= uq; + iDuq2 /= &uq; + + prop_assert!(dqMs == dqMs1 + && dqMdq == dqMdq1 + && dqMdq == dqMdq2 + && dqMudq == dqMudq1 + && dqMudq == dqMudq2 + && iMt == iMt1 + && iMt == iMt2 + && iMi == iMi1 + && iMi == iMi2 + && iMuq == iMuq1 + && iMuq == iMuq2 + && iDi == iDi1 + && iDi == iDi2 + && iDuq == iDuq1 + && iDuq == iDuq2 + && dqMs == &dq * s + && dqMdq == &dq * &dq + && dqMdq == dq * &dq + && dqMdq == &dq * dq + && dqMudq == &dq * &udq + && dqMudq == dq * &udq + && dqMudq == &dq * udq + && udqMdq == &udq * &dq + && udqMdq == udq * &dq + && udqMdq == &udq * dq + && iMi == &udq * &udq + && iMi == udq * &udq + && iMi == &udq * udq + && iMuq == &udq * &uq + && iMuq == udq * &uq + && iMuq == &udq * uq + && iDi == &udq / &udq + && iDi == udq / &udq + && iDi == &udq / udq + && iDuq == &udq / &uq + && iDuq == udq / &uq + && iDuq == &udq / uq + && iMp == &udq * &p + && iMp == udq * &p + && iMp == &udq * p + && iMv == &udq * &v + && iMv == udq * &v + && iMv == &udq * v + && iMt == &udq * &t + && iMt == udq * &t + && iMt == &udq * t + && tMi == &t * &udq + && tMi == t * &udq + && tMi == &t * udq + && uqMi == &uq * &udq + && uqMi == uq * &udq + && uqMi == &uq * udq + && uqDi == &uq / &udq + && uqDi == uq / &udq + && uqDi == &uq / udq) + } +); diff --git a/tests/geometry/isometry.rs b/tests/geometry/isometry.rs index 6d48c6bf..cfacaffd 100644 --- a/tests/geometry/isometry.rs +++ b/tests/geometry/isometry.rs @@ -1,67 +1,74 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] #![allow(non_snake_case)] -use na::{ - Isometry2, Isometry3, Point2, Point3, Rotation2, Rotation3, Translation2, Translation3, - UnitComplex, UnitQuaternion, Vector2, Vector3, -}; +use na::{Isometry3, Point3, Vector3}; -quickcheck!( - fn append_rotation_wrt_point_to_id(r: UnitQuaternion, p: Point3) -> bool { +use crate::proptest::*; +use proptest::{prop_assert, prop_assert_eq, proptest}; + +proptest!( + #[test] + fn append_rotation_wrt_point_to_id(r in unit_quaternion(), p in point3()) { let mut iso = Isometry3::identity(); iso.append_rotation_wrt_point_mut(&r, &p); - iso == Isometry3::rotation_wrt_point(r, p) + prop_assert_eq!(iso, Isometry3::rotation_wrt_point(r, p)) } - fn rotation_wrt_point_invariance(r: UnitQuaternion, p: Point3) -> bool { + #[test] + fn rotation_wrt_point_invariance(r in unit_quaternion(), p in point3()) { let iso = Isometry3::rotation_wrt_point(r, p); - relative_eq!(iso * p, p, epsilon = 1.0e-7) + prop_assert!(relative_eq!(iso * p, p, epsilon = 1.0e-7)) } - fn look_at_rh_3(eye: Point3, target: Point3, up: Vector3) -> bool { + #[test] + fn look_at_rh_3(eye in point3(), target in point3(), up in vector3()) { let viewmatrix = Isometry3::look_at_rh(&eye, &target, &up); - let origin = Point3::origin(); - relative_eq!(viewmatrix * eye, origin, epsilon = 1.0e-7) + + prop_assert!(relative_eq!(viewmatrix * eye, origin, epsilon = 1.0e-7) && relative_eq!( (viewmatrix * (target - eye)).normalize(), -Vector3::z(), epsilon = 1.0e-7 - ) + )) } - fn observer_frame_3(eye: Point3, target: Point3, up: Vector3) -> bool { + #[test] + fn observer_frame_3(eye in point3(), target in point3(), up in vector3()) { let observer = Isometry3::face_towards(&eye, &target, &up); - let origin = Point3::origin(); - relative_eq!(observer * origin, eye, epsilon = 1.0e-7) + + prop_assert!(relative_eq!(observer * origin, eye, epsilon = 1.0e-7) && relative_eq!( observer * Vector3::z(), (target - eye).normalize(), epsilon = 1.0e-7 - ) + )) } - fn inverse_is_identity(i: Isometry3, p: Point3, v: Vector3) -> bool { + #[test] + fn inverse_is_identity(i in isometry3(), p in point3(), v in vector3()) { let ii = i.inverse(); - relative_eq!(i * ii, Isometry3::identity(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(i * ii, Isometry3::identity(), epsilon = 1.0e-7) && relative_eq!(ii * i, Isometry3::identity(), epsilon = 1.0e-7) && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) - && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) + && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7)) } - fn inverse_is_parts_inversion(t: Translation3, r: UnitQuaternion) -> bool { + #[test] + fn inverse_is_parts_inversion(t in translation3(), r in unit_quaternion()) { let i = t * r; - i.inverse() == r.inverse() * t.inverse() + prop_assert!(i.inverse() == r.inverse() * t.inverse()) } - fn multiply_equals_alga_transform(i: Isometry3, v: Vector3, p: Point3) -> bool { - i * v == i.transform_vector(&v) + #[test] + fn multiply_equals_alga_transform(i in isometry3(), v in vector3(), p in point3()) { + prop_assert!(i * v == i.transform_vector(&v) && i * p == i.transform_point(&p) && relative_eq!( i.inverse() * v, @@ -72,94 +79,97 @@ quickcheck!( i.inverse() * p, i.inverse_transform_point(&p), epsilon = 1.0e-7 - ) + )) } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn composition2( - i: Isometry2, - uc: UnitComplex, - r: Rotation2, - t: Translation2, - v: Vector2, - p: Point2 - ) -> bool { + i in isometry2(), + uc in unit_complex(), + r in rotation2(), + t in translation2(), + v in vector2(), + p in point2() + ) { // (rotation × translation) * point = rotation × (translation * point) - relative_eq!((uc * t) * v, uc * v, epsilon = 1.0e-7) && - relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) && - relative_eq!((uc * t) * p, uc * (t * p), epsilon = 1.0e-7) && - relative_eq!((r * t) * p, r * (t * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((uc * t) * v, uc * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uc * t) * p, uc * (t * p), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((r * t) * p, r * (t * p), epsilon = 1.0e-7)); // (translation × rotation) * point = translation × (rotation * point) - (t * uc) * v == uc * v && - (t * r) * v == r * v && - (t * uc) * p == t * (uc * p) && - (t * r) * p == t * (r * p) && + prop_assert_eq!((t * uc) * v, uc * v); + prop_assert_eq!((t * r) * v, r * v); + prop_assert_eq!((t * uc) * p, t * (uc * p)); + prop_assert_eq!((t * r) * p, t * (r * p)); // (rotation × isometry) * point = rotation × (isometry * point) - relative_eq!((uc * i) * v, uc * (i * v), epsilon = 1.0e-7) && - relative_eq!((uc * i) * p, uc * (i * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((uc * i) * v, uc * (i * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uc * i) * p, uc * (i * p), epsilon = 1.0e-7)); // (isometry × rotation) * point = isometry × (rotation * point) - relative_eq!((i * uc) * v, i * (uc * v), epsilon = 1.0e-7) && - relative_eq!((i * uc) * p, i * (uc * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((i * uc) * v, i * (uc * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * uc) * p, i * (uc * p), epsilon = 1.0e-7)); // (translation × isometry) * point = translation × (isometry * point) - relative_eq!((t * i) * v, (i * v), epsilon = 1.0e-7) && - relative_eq!((t * i) * p, t * (i * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((t * i) * v, (i * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * i) * p, t * (i * p), epsilon = 1.0e-7)); // (isometry × translation) * point = isometry × (translation * point) - relative_eq!((i * t) * v, i * v, epsilon = 1.0e-7) && - relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7) + prop_assert!(relative_eq!((i * t) * v, i * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7)); } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn composition3( - i: Isometry3, - uq: UnitQuaternion, - r: Rotation3, - t: Translation3, - v: Vector3, - p: Point3 - ) -> bool { + i in isometry3(), + uq in unit_quaternion(), + r in rotation3(), + t in translation3(), + v in vector3(), + p in point3() + ) { // (rotation × translation) * point = rotation × (translation * point) - relative_eq!((uq * t) * v, uq * v, epsilon = 1.0e-7) && - relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7) && - relative_eq!((uq * t) * p, uq * (t * p), epsilon = 1.0e-7) && - relative_eq!((r * t) * p, r * (t * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((uq * t) * v, uq * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((r * t) * v, r * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * t) * p, uq * (t * p), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((r * t) * p, r * (t * p), epsilon = 1.0e-7)); // (translation × rotation) * point = translation × (rotation * point) - (t * uq) * v == uq * v && - (t * r) * v == r * v && - (t * uq) * p == t * (uq * p) && - (t * r) * p == t * (r * p) && + prop_assert_eq!((t * uq) * v, uq * v); + prop_assert_eq!((t * r) * v, r * v); + prop_assert_eq!((t * uq) * p, t * (uq * p)); + prop_assert_eq!((t * r) * p, t * (r * p)); // (rotation × isometry) * point = rotation × (isometry * point) - relative_eq!((uq * i) * v, uq * (i * v), epsilon = 1.0e-7) && - relative_eq!((uq * i) * p, uq * (i * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((uq * i) * v, uq * (i * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * i) * p, uq * (i * p), epsilon = 1.0e-7)); // (isometry × rotation) * point = isometry × (rotation * point) - relative_eq!((i * uq) * v, i * (uq * v), epsilon = 1.0e-7) && - relative_eq!((i * uq) * p, i * (uq * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((i * uq) * v, i * (uq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * uq) * p, i * (uq * p), epsilon = 1.0e-7)); // (translation × isometry) * point = translation × (isometry * point) - relative_eq!((t * i) * v, (i * v), epsilon = 1.0e-7) && - relative_eq!((t * i) * p, t * (i * p), epsilon = 1.0e-7) && + prop_assert!(relative_eq!((t * i) * v, (i * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * i) * p, t * (i * p), epsilon = 1.0e-7)); // (isometry × translation) * point = isometry × (translation * point) - relative_eq!((i * t) * v, i * v, epsilon = 1.0e-7) && - relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7) + prop_assert!(relative_eq!((i * t) * v, i * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * t) * p, i * (t * p), epsilon = 1.0e-7)); } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn all_op_exist( - i: Isometry3, - uq: UnitQuaternion, - t: Translation3, - v: Vector3, - p: Point3, - r: Rotation3 - ) -> bool { + i in isometry3(), + uq in unit_quaternion(), + t in translation3(), + v in vector3(), + p in point3(), + r in rotation3() + ) { let iMi = i * i; let iMuq = i * uq; let iDi = i / i; @@ -210,7 +220,7 @@ quickcheck!( iDuq1 /= uq; iDuq2 /= &uq; - iMt == iMt1 + prop_assert!(iMt == iMt1 && iMt == iMt2 && iMi == iMi1 && iMi == iMi2 @@ -261,6 +271,6 @@ quickcheck!( && rMt == &r * t && uqMt == &uq * &t && uqMt == uq * &t - && uqMt == &uq * t + && uqMt == &uq * t) } ); diff --git a/tests/geometry/mod.rs b/tests/geometry/mod.rs index ec9755a0..2363d411 100644 --- a/tests/geometry/mod.rs +++ b/tests/geometry/mod.rs @@ -1,3 +1,4 @@ +mod dual_quaternion; mod isometry; mod point; mod projection; diff --git a/tests/geometry/point.rs b/tests/geometry/point.rs index 896a09d6..22b0f598 100644 --- a/tests/geometry/point.rs +++ b/tests/geometry/point.rs @@ -92,11 +92,3 @@ fn to_homogeneous() { assert_eq!(a.to_homogeneous(), expected); } - -#[cfg(feature = "arbitrary")] -quickcheck!( - fn point_sub(pt1: Point3, pt2: Point3) -> bool { - let dpt = &pt2 - &pt1; - relative_eq!(pt2, pt1 + dpt, epsilon = 1.0e-7) - } -); diff --git a/tests/geometry/projection.rs b/tests/geometry/projection.rs index 626c4ffb..1e0c9fd5 100644 --- a/tests/geometry/projection.rs +++ b/tests/geometry/projection.rs @@ -22,7 +22,7 @@ fn orthographic_inverse() { #[test] fn perspective_matrix_point_transformation() { - // https://github.com/rustsim/nalgebra/issues/640 + // https://github.com/dimforge/nalgebra/issues/640 let proj = Perspective3::new(4.0 / 3.0, 90.0, 0.1, 100.0); let perspective_inv = proj.as_matrix().try_inverse().unwrap(); let some_point = Point3::new(1.0, 2.0, 0.0); @@ -33,27 +33,32 @@ fn perspective_matrix_point_transformation() { ); } -#[cfg(feature = "arbitrary")] -mod quickcheck_tests { - use na::{Orthographic3, Perspective3, Point3}; +#[cfg(feature = "proptest-support")] +mod proptest_tests { + use na::{Orthographic3, Perspective3}; - quickcheck! { - fn perspective_project_unproject(pt: Point3) -> bool { + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + + proptest! { + #[test] + fn perspective_project_unproject(pt in point3()) { let proj = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0); let projected = proj.project_point(&pt); let unprojected = proj.unproject_point(&projected); - relative_eq!(pt, unprojected, epsilon = 1.0e-7) + prop_assert!(relative_eq!(pt, unprojected, epsilon = 1.0e-7)) } - fn orthographic_project_unproject(pt: Point3) -> bool { + #[test] + fn orthographic_project_unproject(pt in point3()) { let proj = Orthographic3::new(1.0, 2.0, -3.0, -2.5, 10.0, 900.0); let projected = proj.project_point(&pt); let unprojected = proj.unproject_point(&projected); - relative_eq!(pt, unprojected, epsilon = 1.0e-7) + prop_assert!(relative_eq!(pt, unprojected, epsilon = 1.0e-7)) } } } diff --git a/tests/geometry/quaternion.rs b/tests/geometry/quaternion.rs index 5ff20a0e..75d8b870 100644 --- a/tests/geometry/quaternion.rs +++ b/tests/geometry/quaternion.rs @@ -1,15 +1,19 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] #![allow(non_snake_case)] -use na::{Point3, Quaternion, Rotation3, Unit, UnitQuaternion, Vector3}; +use na::{Unit, UnitQuaternion}; -quickcheck!( +use crate::proptest::*; +use proptest::{prop_assert, proptest}; + +proptest!( /* * * Euler angles. * */ - fn from_euler_angles(r: f64, p: f64, y: f64) -> bool { + #[test] + fn from_euler_angles(r in PROPTEST_F64, p in PROPTEST_F64, y in PROPTEST_F64) { let roll = UnitQuaternion::from_euler_angles(r, 0.0, 0.0); let pitch = UnitQuaternion::from_euler_angles(0.0, p, 0.0); let yaw = UnitQuaternion::from_euler_angles(0.0, 0.0, y); @@ -20,20 +24,21 @@ quickcheck!( let rpitch = pitch.to_rotation_matrix(); let ryaw = yaw.to_rotation_matrix(); - relative_eq!(rroll[(0, 0)], 1.0, epsilon = 1.0e-7) && // rotation wrt. x axis. - relative_eq!(rpitch[(1, 1)], 1.0, epsilon = 1.0e-7) && // rotation wrt. y axis. - relative_eq!(ryaw[(2, 2)], 1.0, epsilon = 1.0e-7) && // rotation wrt. z axis. - relative_eq!(yaw * pitch * roll, rpy, epsilon = 1.0e-7) + prop_assert!(relative_eq!(rroll[(0, 0)], 1.0, epsilon = 1.0e-7)); // rotation wrt. x axis. + prop_assert!(relative_eq!(rpitch[(1, 1)], 1.0, epsilon = 1.0e-7)); // rotation wrt. y axis. + prop_assert!(relative_eq!(ryaw[(2, 2)], 1.0, epsilon = 1.0e-7)); // rotation wrt. z axis. + prop_assert!(relative_eq!(yaw * pitch * roll, rpy, epsilon = 1.0e-7)); } - fn euler_angles(r: f64, p: f64, y: f64) -> bool { + #[test] + fn euler_angles(r in PROPTEST_F64, p in PROPTEST_F64, y in PROPTEST_F64) { let rpy = UnitQuaternion::from_euler_angles(r, p, y); let (roll, pitch, yaw) = rpy.euler_angles(); - relative_eq!( + prop_assert!(relative_eq!( UnitQuaternion::from_euler_angles(roll, pitch, yaw), rpy, epsilon = 1.0e-7 - ) + )) } /* @@ -41,12 +46,13 @@ quickcheck!( * From/to rotation matrix. * */ - fn unit_quaternion_rotation_conversion(q: UnitQuaternion) -> bool { + #[test] + fn unit_quaternion_rotation_conversion(q in unit_quaternion()) { let r = q.to_rotation_matrix(); let qq = UnitQuaternion::from_rotation_matrix(&r); let rr = qq.to_rotation_matrix(); - relative_eq!(q, qq, epsilon = 1.0e-7) && relative_eq!(r, rr, epsilon = 1.0e-7) + prop_assert!(relative_eq!(q, qq, epsilon = 1.0e-7) && relative_eq!(r, rr, epsilon = 1.0e-7)) } /* @@ -55,24 +61,25 @@ quickcheck!( * */ + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn unit_quaternion_transformation( - q: UnitQuaternion, - v: Vector3, - p: Point3 - ) -> bool { + q in unit_quaternion(), + v in vector3(), + p in point3() + ) { let r = q.to_rotation_matrix(); let rv = r * v; let rp = r * p; - relative_eq!(q * v, rv, epsilon = 1.0e-7) + prop_assert!(relative_eq!(q * v, rv, epsilon = 1.0e-7) && relative_eq!(q * &v, rv, epsilon = 1.0e-7) && relative_eq!(&q * v, rv, epsilon = 1.0e-7) && relative_eq!(&q * &v, rv, epsilon = 1.0e-7) && relative_eq!(q * p, rp, epsilon = 1.0e-7) && relative_eq!(q * &p, rp, epsilon = 1.0e-7) && relative_eq!(&q * p, rp, epsilon = 1.0e-7) - && relative_eq!(&q * &p, rp, epsilon = 1.0e-7) + && relative_eq!(&q * &p, rp, epsilon = 1.0e-7)) } /* @@ -80,16 +87,17 @@ quickcheck!( * Inversion. * */ - fn unit_quaternion_inv(q: UnitQuaternion) -> bool { + #[test] + fn unit_quaternion_inv(q in unit_quaternion()) { let iq = q.inverse(); - relative_eq!(&iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(&iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) && relative_eq!(iq * &q, UnitQuaternion::identity(), epsilon = 1.0e-7) && relative_eq!(&iq * q, UnitQuaternion::identity(), epsilon = 1.0e-7) && relative_eq!(iq * q, UnitQuaternion::identity(), epsilon = 1.0e-7) && relative_eq!(&q * &iq, UnitQuaternion::identity(), epsilon = 1.0e-7) && relative_eq!(q * &iq, UnitQuaternion::identity(), epsilon = 1.0e-7) && relative_eq!(&q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7) - && relative_eq!(q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7) + && relative_eq!(q * iq, UnitQuaternion::identity(), epsilon = 1.0e-7)) } /* @@ -97,14 +105,15 @@ quickcheck!( * Quaterion * Vector == Rotation * Vector * */ - fn unit_quaternion_mul_vector(q: UnitQuaternion, v: Vector3, p: Point3) -> bool { + #[test] + fn unit_quaternion_mul_vector(q in unit_quaternion(), v in vector3(), p in point3()) { let r = q.to_rotation_matrix(); - relative_eq!(q * v, r * v, epsilon = 1.0e-7) && - relative_eq!(q * p, r * p, epsilon = 1.0e-7) && + prop_assert!(relative_eq!(q * v, r * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(q * p, r * p, epsilon = 1.0e-7)); // Equivalence q = -q - relative_eq!(UnitQuaternion::new_unchecked(-q.into_inner()) * v, r * v, epsilon = 1.0e-7) && - relative_eq!(UnitQuaternion::new_unchecked(-q.into_inner()) * p, r * p, epsilon = 1.0e-7) + prop_assert!(relative_eq!(UnitQuaternion::new_unchecked(-q.into_inner()) * v, r * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(UnitQuaternion::new_unchecked(-q.into_inner()) * p, r * p, epsilon = 1.0e-7)); } /* @@ -112,23 +121,25 @@ quickcheck!( * Unit quaternion double-covering. * */ - fn unit_quaternion_double_covering(q: UnitQuaternion) -> bool { + #[test] + fn unit_quaternion_double_covering(q in unit_quaternion()) { let mq = UnitQuaternion::new_unchecked(-q.into_inner()); - mq == q && mq.angle() == q.angle() && mq.axis() == q.axis() + prop_assert!(mq == q && mq.angle() == q.angle() && mq.axis() == q.axis()) } // Test that all operators (incl. all combinations of references) work. // See the top comment on `geometry/quaternion_ops.rs` for details on which operations are // supported. + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn all_op_exist( - q: Quaternion, - uq: UnitQuaternion, - v: Vector3, - p: Point3, - r: Rotation3, - s: f64 - ) -> bool { + q in quaternion(), + uq in unit_quaternion(), + v in vector3(), + p in point3(), + r in rotation3(), + s in PROPTEST_F64 + ) { let uv = Unit::new_normalize(v); let qpq = q + q; @@ -196,7 +207,7 @@ quickcheck!( uqDr1 /= r; uqDr2 /= &r; - qMs1 == qMs + prop_assert!(qMs1 == qMs && qMq1 == qMq && qMq1 == qMq2 && qpq1 == qpq @@ -250,6 +261,6 @@ quickcheck!( && uqMv == &uq * v && uqMuv == &uq * &uv && uqMuv == uq * &uv - && uqMuv == &uq * uv + && uqMuv == &uq * uv) } ); diff --git a/tests/geometry/rotation.rs b/tests/geometry/rotation.rs index 2ada2939..9a29772e 100644 --- a/tests/geometry/rotation.rs +++ b/tests/geometry/rotation.rs @@ -30,44 +30,50 @@ fn quaternion_euler_angles_issue_494() { assert_eq!(angs.2, 0.0); } -#[cfg(feature = "arbitrary")] -mod quickcheck_tests { - use na::{self, Rotation2, Rotation3, Unit, Vector2, Vector3}; +#[cfg(feature = "proptest-support")] +mod proptest_tests { + use na::{self, Rotation2, Rotation3, Unit}; use simba::scalar::RealField; use std::f64; - quickcheck! { + use crate::proptest::*; + use proptest::{prop_assert, prop_assert_eq, proptest}; + + proptest! { /* * * Euler angles. * */ - fn from_euler_angles(r: f64, p: f64, y: f64) -> bool { + #[test] + fn from_euler_angles(r in PROPTEST_F64, p in PROPTEST_F64, y in PROPTEST_F64) { let roll = Rotation3::from_euler_angles(r, 0.0, 0.0); let pitch = Rotation3::from_euler_angles(0.0, p, 0.0); let yaw = Rotation3::from_euler_angles(0.0, 0.0, y); let rpy = Rotation3::from_euler_angles(r, p, y); - roll[(0, 0)] == 1.0 && // rotation wrt. x axis. - pitch[(1, 1)] == 1.0 && // rotation wrt. y axis. - yaw[(2, 2)] == 1.0 && // rotation wrt. z axis. - yaw * pitch * roll == rpy + prop_assert_eq!(roll[(0, 0)], 1.0); // rotation wrt. x axis. + prop_assert_eq!(pitch[(1, 1)], 1.0); // rotation wrt. y axis. + prop_assert_eq!(yaw[(2, 2)], 1.0); // rotation wrt. z axis. + prop_assert_eq!(yaw * pitch * roll, rpy); } - fn euler_angles(r: f64, p: f64, y: f64) -> bool { + #[test] + fn euler_angles(r in PROPTEST_F64, p in PROPTEST_F64, y in PROPTEST_F64) { let rpy = Rotation3::from_euler_angles(r, p, y); let (roll, pitch, yaw) = rpy.euler_angles(); - relative_eq!(Rotation3::from_euler_angles(roll, pitch, yaw), rpy, epsilon = 1.0e-7) + prop_assert!(relative_eq!(Rotation3::from_euler_angles(roll, pitch, yaw), rpy, epsilon = 1.0e-7)); } - fn euler_angles_gimble_lock(r: f64, y: f64) -> bool { + #[test] + fn euler_angles_gimble_lock(r in PROPTEST_F64, y in PROPTEST_F64) { let pos = Rotation3::from_euler_angles(r, f64::frac_pi_2(), y); let neg = Rotation3::from_euler_angles(r, -f64::frac_pi_2(), y); let (pos_r, pos_p, pos_y) = pos.euler_angles(); let (neg_r, neg_p, neg_y) = neg.euler_angles(); - relative_eq!(Rotation3::from_euler_angles(pos_r, pos_p, pos_y), pos, epsilon = 1.0e-7) && - relative_eq!(Rotation3::from_euler_angles(neg_r, neg_p, neg_y), neg, epsilon = 1.0e-7) + prop_assert!(relative_eq!(Rotation3::from_euler_angles(pos_r, pos_p, pos_y), pos, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(Rotation3::from_euler_angles(neg_r, neg_p, neg_y), neg, epsilon = 1.0e-7)); } /* @@ -75,26 +81,28 @@ mod quickcheck_tests { * Inversion is transposition. * */ - fn rotation_inv_3(a: Rotation3) -> bool { + #[test] + fn rotation_inv_3(a in rotation3()) { let ta = a.transpose(); let ia = a.inverse(); - ta == ia && - relative_eq!(&ta * &a, Rotation3::identity(), epsilon = 1.0e-7) && - relative_eq!(&ia * a, Rotation3::identity(), epsilon = 1.0e-7) && - relative_eq!( a * &ta, Rotation3::identity(), epsilon = 1.0e-7) && - relative_eq!( a * ia, Rotation3::identity(), epsilon = 1.0e-7) + prop_assert_eq!(ta, ia); + prop_assert!(relative_eq!(&ta * &a, Rotation3::identity(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(&ia * a, Rotation3::identity(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!( a * &ta, Rotation3::identity(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!( a * ia, Rotation3::identity(), epsilon = 1.0e-7)); } - fn rotation_inv_2(a: Rotation2) -> bool { + #[test] + fn rotation_inv_2(a in rotation2()) { let ta = a.transpose(); let ia = a.inverse(); - ta == ia && - relative_eq!(&ta * &a, Rotation2::identity(), epsilon = 1.0e-7) && - relative_eq!(&ia * a, Rotation2::identity(), epsilon = 1.0e-7) && - relative_eq!( a * &ta, Rotation2::identity(), epsilon = 1.0e-7) && - relative_eq!( a * ia, Rotation2::identity(), epsilon = 1.0e-7) + prop_assert_eq!(ta, ia); + prop_assert!(relative_eq!(&ta * &a, Rotation2::identity(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!(&ia * a, Rotation2::identity(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!( a * &ta, Rotation2::identity(), epsilon = 1.0e-7)); + prop_assert!(relative_eq!( a * ia, Rotation2::identity(), epsilon = 1.0e-7)); } /* @@ -102,12 +110,14 @@ mod quickcheck_tests { * Angle between vectors. * */ - fn angle_is_commutative_2(a: Vector2, b: Vector2) -> bool { - a.angle(&b) == b.angle(&a) + #[test] + fn angle_is_commutative_2(a in vector2(), b in vector2()) { + prop_assert_eq!(a.angle(&b), b.angle(&a)) } - fn angle_is_commutative_3(a: Vector3, b: Vector3) -> bool { - a.angle(&b) == b.angle(&a) + #[test] + fn angle_is_commutative_3(a in vector3(), b in vector3()) { + prop_assert_eq!(a.angle(&b), b.angle(&a)) } /* @@ -115,50 +125,46 @@ mod quickcheck_tests { * Rotation matrix between vectors. * */ - fn rotation_between_is_anticommutative_2(a: Vector2, b: Vector2) -> bool { + #[test] + fn rotation_between_is_anticommutative_2(a in vector2(), b in vector2()) { let rab = Rotation2::rotation_between(&a, &b); let rba = Rotation2::rotation_between(&b, &a); - relative_eq!(rab * rba, Rotation2::identity()) + prop_assert!(relative_eq!(rab * rba, Rotation2::identity())); } - fn rotation_between_is_anticommutative_3(a: Vector3, b: Vector3) -> bool { + #[test] + fn rotation_between_is_anticommutative_3(a in vector3(), b in vector3()) { let rots = (Rotation3::rotation_between(&a, &b), Rotation3::rotation_between(&b, &a)); if let (Some(rab), Some(rba)) = rots { - relative_eq!(rab * rba, Rotation3::identity(), epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!(rab * rba, Rotation3::identity(), epsilon = 1.0e-7)); } } - fn rotation_between_is_identity(v2: Vector2, v3: Vector3) -> bool { + #[test] + fn rotation_between_is_identity(v2 in vector2(), v3 in vector3()) { let vv2 = 3.42 * v2; let vv3 = 4.23 * v3; - relative_eq!(v2.angle(&vv2), 0.0, epsilon = 1.0e-7) && - relative_eq!(v3.angle(&vv3), 0.0, epsilon = 1.0e-7) && - relative_eq!(Rotation2::rotation_between(&v2, &vv2), Rotation2::identity()) && - Rotation3::rotation_between(&v3, &vv3).unwrap() == Rotation3::identity() + prop_assert!(relative_eq!(v2.angle(&vv2), 0.0, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(v3.angle(&vv3), 0.0, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(Rotation2::rotation_between(&v2, &vv2), Rotation2::identity())); + prop_assert_eq!(Rotation3::rotation_between(&v3, &vv3).unwrap(), Rotation3::identity()); } - fn rotation_between_2(a: Vector2, b: Vector2) -> bool { + #[test] + fn rotation_between_2(a in vector2(), b in vector2()) { if !relative_eq!(a.angle(&b), 0.0, epsilon = 1.0e-7) { let r = Rotation2::rotation_between(&a, &b); - relative_eq!((r * a).angle(&b), 0.0, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!((r * a).angle(&b), 0.0, epsilon = 1.0e-7)) } } - fn rotation_between_3(a: Vector3, b: Vector3) -> bool { + #[test] + fn rotation_between_3(a in vector3(), b in vector3()) { if !relative_eq!(a.angle(&b), 0.0, epsilon = 1.0e-7) { let r = Rotation3::rotation_between(&a, &b).unwrap(); - relative_eq!((r * a).angle(&b), 0.0, epsilon = 1.0e-7) - } - else { - true + prop_assert!(relative_eq!((r * a).angle(&b), 0.0, epsilon = 1.0e-7)) } } @@ -168,25 +174,27 @@ mod quickcheck_tests { * Rotation construction. * */ - fn new_rotation_2(angle: f64) -> bool { + #[test] + fn new_rotation_2(angle in PROPTEST_F64) { let r = Rotation2::new(angle); let angle = na::wrap(angle, -f64::pi(), f64::pi()); - relative_eq!(r.angle(), angle, epsilon = 1.0e-7) + prop_assert!(relative_eq!(r.angle(), angle, epsilon = 1.0e-7)) } - fn new_rotation_3(axisangle: Vector3) -> bool { + #[test] + fn new_rotation_3(axisangle in vector3()) { let r = Rotation3::new(axisangle); if let Some((axis, angle)) = Unit::try_new_and_get(axisangle, 0.0) { let angle = na::wrap(angle, -f64::pi(), f64::pi()); - (relative_eq!(r.angle(), angle, epsilon = 1.0e-7) && + prop_assert!((relative_eq!(r.angle(), angle, epsilon = 1.0e-7) && relative_eq!(r.axis().unwrap(), axis, epsilon = 1.0e-7)) || (relative_eq!(r.angle(), -angle, epsilon = 1.0e-7) && - relative_eq!(r.axis().unwrap(), -axis, epsilon = 1.0e-7)) + relative_eq!(r.axis().unwrap(), -axis, epsilon = 1.0e-7))) } else { - r == Rotation3::identity() + prop_assert_eq!(r, Rotation3::identity()) } } @@ -195,28 +203,30 @@ mod quickcheck_tests { * Rotation pow. * */ - fn powf_rotation_2(angle: f64, pow: f64) -> bool { + #[test] + fn powf_rotation_2(angle in PROPTEST_F64, pow in PROPTEST_F64) { let r = Rotation2::new(angle).powf(pow); let angle = na::wrap(angle, -f64::pi(), f64::pi()); let pangle = na::wrap(angle * pow, -f64::pi(), f64::pi()); - relative_eq!(r.angle(), pangle, epsilon = 1.0e-7) + prop_assert!(relative_eq!(r.angle(), pangle, epsilon = 1.0e-7)); } - fn powf_rotation_3(axisangle: Vector3, pow: f64) -> bool { + #[test] + fn powf_rotation_3(axisangle in vector3(), pow in PROPTEST_F64) { let r = Rotation3::new(axisangle).powf(pow); if let Some((axis, angle)) = Unit::try_new_and_get(axisangle, 0.0) { let angle = na::wrap(angle, -f64::pi(), f64::pi()); let pangle = na::wrap(angle * pow, -f64::pi(), f64::pi()); - (relative_eq!(r.angle(), pangle, epsilon = 1.0e-7) && + prop_assert!((relative_eq!(r.angle(), pangle, epsilon = 1.0e-7) && relative_eq!(r.axis().unwrap(), axis, epsilon = 1.0e-7)) || (relative_eq!(r.angle(), -pangle, epsilon = 1.0e-7) && - relative_eq!(r.axis().unwrap(), -axis, epsilon = 1.0e-7)) + relative_eq!(r.axis().unwrap(), -axis, epsilon = 1.0e-7))); } else { - r == Rotation3::identity() + prop_assert_eq!(r, Rotation3::identity()) } } } diff --git a/tests/geometry/similarity.rs b/tests/geometry/similarity.rs index b93d2e51..bcd430e9 100644 --- a/tests/geometry/similarity.rs +++ b/tests/geometry/similarity.rs @@ -1,41 +1,45 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] #![allow(non_snake_case)] -use na::{Isometry3, Point3, Similarity3, Translation3, UnitQuaternion, Vector3}; +use na::Similarity3; -quickcheck!( - fn inverse_is_identity(i: Similarity3, p: Point3, v: Vector3) -> bool { +use crate::proptest::*; +use proptest::{prop_assert, prop_assert_eq, proptest}; + +proptest!( + #[test] + fn inverse_is_identity(i in similarity3(), p in point3(), v in vector3()) { let ii = i.inverse(); - relative_eq!(i * ii, Similarity3::identity(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(i * ii, Similarity3::identity(), epsilon = 1.0e-7) && relative_eq!(ii * i, Similarity3::identity(), epsilon = 1.0e-7) && relative_eq!((i * ii) * p, p, epsilon = 1.0e-7) && relative_eq!((ii * i) * p, p, epsilon = 1.0e-7) && relative_eq!((i * ii) * v, v, epsilon = 1.0e-7) - && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7) + && relative_eq!((ii * i) * v, v, epsilon = 1.0e-7)) } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn inverse_is_parts_inversion( - t: Translation3, - r: UnitQuaternion, - scaling: f64 - ) -> bool { - if relative_eq!(scaling, 0.0) { - true - } else { + t in translation3(), + r in unit_quaternion(), + scaling in PROPTEST_F64 + ) { + if !relative_eq!(scaling, 0.0) { let s = Similarity3::from_isometry(t * r, scaling); - s.inverse() == Similarity3::from_scaling(1.0 / scaling) * r.inverse() * t.inverse() + prop_assert_eq!(s.inverse(), Similarity3::from_scaling(1.0 / scaling) * r.inverse() * t.inverse()) } } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn multiply_equals_alga_transform( - s: Similarity3, - v: Vector3, - p: Point3 - ) -> bool { - s * v == s.transform_vector(&v) + s in similarity3(), + v in vector3(), + p in point3() + ) { + prop_assert!(s * v == s.transform_vector(&v) && s * p == s.transform_point(&p) && relative_eq!( s.inverse() * v, @@ -46,114 +50,114 @@ quickcheck!( s.inverse() * p, s.inverse_transform_point(&p), epsilon = 1.0e-7 - ) + )) } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn composition( - i: Isometry3, - uq: UnitQuaternion, - t: Translation3, - v: Vector3, - p: Point3, - scaling: f64 - ) -> bool { - if relative_eq!(scaling, 0.0) { - return true; + i in isometry3(), + uq in unit_quaternion(), + t in translation3(), + v in vector3(), + p in point3(), + scaling in PROPTEST_F64 + ) { + if !relative_eq!(scaling, 0.0) { + let s = Similarity3::from_scaling(scaling); + + // (rotation × translation × scaling) × point = rotation × (translation × (scaling × point)) + prop_assert!(relative_eq!((uq * t * s) * v, uq * (scaling * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * t * s) * p, uq * (t * (scaling * p)), epsilon = 1.0e-7)); + + // (translation × rotation × scaling) × point = translation × (rotation × (scaling × point)) + prop_assert!(relative_eq!((t * uq * s) * v, uq * (scaling * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * uq * s) * p, t * (uq * (scaling * p)), epsilon = 1.0e-7)); + + // (rotation × isometry × scaling) × point = rotation × (isometry × (scaling × point)) + prop_assert!(relative_eq!((uq * i * s) * v, uq * (i * (scaling * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * i * s) * p, uq * (i * (scaling * p)), epsilon = 1.0e-7)); + + // (isometry × rotation × scaling) × point = isometry × (rotation × (scaling × point)) + prop_assert!(relative_eq!((i * uq * s) * v, i * (uq * (scaling * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * uq * s) * p, i * (uq * (scaling * p)), epsilon = 1.0e-7)); + + // (translation × isometry × scaling) × point = translation × (isometry × (scaling × point)) + prop_assert!(relative_eq!((t * i * s) * v, (i * (scaling * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * i * s) * p, t * (i * (scaling * p)), epsilon = 1.0e-7)); + + // (isometry × translation × scaling) × point = isometry × (translation × (scaling × point)) + prop_assert!(relative_eq!((i * t * s) * v, i * (scaling * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * t * s) * p, i * (t * (scaling * p)), epsilon = 1.0e-7)); + + + /* + * Same as before but with scaling on the middle. + */ + // (rotation × scaling × translation) × point = rotation × (scaling × (translation × point)) + prop_assert!(relative_eq!((uq * s * t) * v, uq * (scaling * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * s * t) * p, uq * (scaling * (t * p)), epsilon = 1.0e-7)); + + // (translation × scaling × rotation) × point = translation × (scaling × (rotation × point)) + prop_assert!(relative_eq!((t * s * uq) * v, scaling * (uq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * s * uq) * p, t * (scaling * (uq * p)), epsilon = 1.0e-7)); + + // (rotation × scaling × isometry) × point = rotation × (scaling × (isometry × point)) + prop_assert!(relative_eq!((uq * s * i) * v, uq * (scaling * (i * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((uq * s * i) * p, uq * (scaling * (i * p)), epsilon = 1.0e-7)); + + // (isometry × scaling × rotation) × point = isometry × (scaling × (rotation × point)) + prop_assert!(relative_eq!((i * s * uq) * v, i * (scaling * (uq * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * s * uq) * p, i * (scaling * (uq * p)), epsilon = 1.0e-7)); + + // (translation × scaling × isometry) × point = translation × (scaling × (isometry × point)) + prop_assert!(relative_eq!((t * s * i) * v, (scaling * (i * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((t * s * i) * p, t * (scaling * (i * p)), epsilon = 1.0e-7)); + + // (isometry × scaling × translation) × point = isometry × (scaling × (translation × point)) + prop_assert!(relative_eq!((i * s * t) * v, i * (scaling * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((i * s * t) * p, i * (scaling * (t * p)), epsilon = 1.0e-7)); + + + /* + * Same as before but with scaling on the left. + */ + // (scaling × rotation × translation) × point = scaling × (rotation × (translation × point)) + prop_assert!(relative_eq!((s * uq * t) * v, scaling * (uq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((s * uq * t) * p, scaling * (uq * (t * p)), epsilon = 1.0e-7)); + + // (scaling × translation × rotation) × point = scaling × (translation × (rotation × point)) + prop_assert!(relative_eq!((s * t * uq) * v, scaling * (uq * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((s * t * uq) * p, scaling * (t * (uq * p)), epsilon = 1.0e-7)); + + // (scaling × rotation × isometry) × point = scaling × (rotation × (isometry × point)) + prop_assert!(relative_eq!((s * uq * i) * v, scaling * (uq * (i * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((s * uq * i) * p, scaling * (uq * (i * p)), epsilon = 1.0e-7)); + + // (scaling × isometry × rotation) × point = scaling × (isometry × (rotation × point)) + prop_assert!(relative_eq!((s * i * uq) * v, scaling * (i * (uq * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((s * i * uq) * p, scaling * (i * (uq * p)), epsilon = 1.0e-7)); + + // (scaling × translation × isometry) × point = scaling × (translation × (isometry × point)) + prop_assert!(relative_eq!((s * t * i) * v, (scaling * (i * v)), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((s * t * i) * p, scaling * (t * (i * p)), epsilon = 1.0e-7)); + + // (scaling × isometry × translation) × point = scaling × (isometry × (translation × point)) + prop_assert!(relative_eq!((s * i * t) * v, scaling * (i * v), epsilon = 1.0e-7)); + prop_assert!(relative_eq!((s * i * t) * p, scaling * (i * (t * p)), epsilon = 1.0e-7)); } - - let s = Similarity3::from_scaling(scaling); - - // (rotation × translation × scaling) × point = rotation × (translation × (scaling × point)) - relative_eq!((uq * t * s) * v, uq * (scaling * v), epsilon = 1.0e-7) && - relative_eq!((uq * t * s) * p, uq * (t * (scaling * p)), epsilon = 1.0e-7) && - - // (translation × rotation × scaling) × point = translation × (rotation × (scaling × point)) - relative_eq!((t * uq * s) * v, uq * (scaling * v), epsilon = 1.0e-7) && - relative_eq!((t * uq * s) * p, t * (uq * (scaling * p)), epsilon = 1.0e-7) && - - // (rotation × isometry × scaling) × point = rotation × (isometry × (scaling × point)) - relative_eq!((uq * i * s) * v, uq * (i * (scaling * v)), epsilon = 1.0e-7) && - relative_eq!((uq * i * s) * p, uq * (i * (scaling * p)), epsilon = 1.0e-7) && - - // (isometry × rotation × scaling) × point = isometry × (rotation × (scaling × point)) - relative_eq!((i * uq * s) * v, i * (uq * (scaling * v)), epsilon = 1.0e-7) && - relative_eq!((i * uq * s) * p, i * (uq * (scaling * p)), epsilon = 1.0e-7) && - - // (translation × isometry × scaling) × point = translation × (isometry × (scaling × point)) - relative_eq!((t * i * s) * v, (i * (scaling * v)), epsilon = 1.0e-7) && - relative_eq!((t * i * s) * p, t * (i * (scaling * p)), epsilon = 1.0e-7) && - - // (isometry × translation × scaling) × point = isometry × (translation × (scaling × point)) - relative_eq!((i * t * s) * v, i * (scaling * v), epsilon = 1.0e-7) && - relative_eq!((i * t * s) * p, i * (t * (scaling * p)), epsilon = 1.0e-7) && - - - /* - * Same as before but with scaling on the middle. - */ - // (rotation × scaling × translation) × point = rotation × (scaling × (translation × point)) - relative_eq!((uq * s * t) * v, uq * (scaling * v), epsilon = 1.0e-7) && - relative_eq!((uq * s * t) * p, uq * (scaling * (t * p)), epsilon = 1.0e-7) && - - // (translation × scaling × rotation) × point = translation × (scaling × (rotation × point)) - relative_eq!((t * s * uq) * v, scaling * (uq * v), epsilon = 1.0e-7) && - relative_eq!((t * s * uq) * p, t * (scaling * (uq * p)), epsilon = 1.0e-7) && - - // (rotation × scaling × isometry) × point = rotation × (scaling × (isometry × point)) - relative_eq!((uq * s * i) * v, uq * (scaling * (i * v)), epsilon = 1.0e-7) && - relative_eq!((uq * s * i) * p, uq * (scaling * (i * p)), epsilon = 1.0e-7) && - - // (isometry × scaling × rotation) × point = isometry × (scaling × (rotation × point)) - relative_eq!((i * s * uq) * v, i * (scaling * (uq * v)), epsilon = 1.0e-7) && - relative_eq!((i * s * uq) * p, i * (scaling * (uq * p)), epsilon = 1.0e-7) && - - // (translation × scaling × isometry) × point = translation × (scaling × (isometry × point)) - relative_eq!((t * s * i) * v, (scaling * (i * v)), epsilon = 1.0e-7) && - relative_eq!((t * s * i) * p, t * (scaling * (i * p)), epsilon = 1.0e-7) && - - // (isometry × scaling × translation) × point = isometry × (scaling × (translation × point)) - relative_eq!((i * s * t) * v, i * (scaling * v), epsilon = 1.0e-7) && - relative_eq!((i * s * t) * p, i * (scaling * (t * p)), epsilon = 1.0e-7) && - - - /* - * Same as before but with scaling on the left. - */ - // (scaling × rotation × translation) × point = scaling × (rotation × (translation × point)) - relative_eq!((s * uq * t) * v, scaling * (uq * v), epsilon = 1.0e-7) && - relative_eq!((s * uq * t) * p, scaling * (uq * (t * p)), epsilon = 1.0e-7) && - - // (scaling × translation × rotation) × point = scaling × (translation × (rotation × point)) - relative_eq!((s * t * uq) * v, scaling * (uq * v), epsilon = 1.0e-7) && - relative_eq!((s * t * uq) * p, scaling * (t * (uq * p)), epsilon = 1.0e-7) && - - // (scaling × rotation × isometry) × point = scaling × (rotation × (isometry × point)) - relative_eq!((s * uq * i) * v, scaling * (uq * (i * v)), epsilon = 1.0e-7) && - relative_eq!((s * uq * i) * p, scaling * (uq * (i * p)), epsilon = 1.0e-7) && - - // (scaling × isometry × rotation) × point = scaling × (isometry × (rotation × point)) - relative_eq!((s * i * uq) * v, scaling * (i * (uq * v)), epsilon = 1.0e-7) && - relative_eq!((s * i * uq) * p, scaling * (i * (uq * p)), epsilon = 1.0e-7) && - - // (scaling × translation × isometry) × point = scaling × (translation × (isometry × point)) - relative_eq!((s * t * i) * v, (scaling * (i * v)), epsilon = 1.0e-7) && - relative_eq!((s * t * i) * p, scaling * (t * (i * p)), epsilon = 1.0e-7) && - - // (scaling × isometry × translation) × point = scaling × (isometry × (translation × point)) - relative_eq!((s * i * t) * v, scaling * (i * v), epsilon = 1.0e-7) && - relative_eq!((s * i * t) * p, scaling * (i * (t * p)), epsilon = 1.0e-7) } + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn all_op_exist( - s: Similarity3, - i: Isometry3, - uq: UnitQuaternion, - t: Translation3, - v: Vector3, - p: Point3 - ) -> bool { + s in similarity3(), + i in isometry3(), + uq in unit_quaternion(), + t in translation3(), + v in vector3(), + p in point3() + ) { let sMs = s * s; let sMuq = s * uq; let sDs = s / s; @@ -216,7 +220,7 @@ quickcheck!( sDi1 /= i; sDi2 /= &i; - sMt == sMt1 + prop_assert!(sMt == sMt1 && sMt == sMt2 && sMs == sMs1 && sMs == sMs2 @@ -271,6 +275,6 @@ quickcheck!( && iMs == &i * s && iDs == &i / &s && iDs == i / &s - && iDs == &i / s + && iDs == &i / s) } ); diff --git a/tests/geometry/unit_complex.rs b/tests/geometry/unit_complex.rs index 263ecd33..a24a80e2 100644 --- a/tests/geometry/unit_complex.rs +++ b/tests/geometry/unit_complex.rs @@ -1,20 +1,25 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] #![allow(non_snake_case)] -use na::{Point2, Rotation2, Unit, UnitComplex, Vector2}; +use na::{Unit, UnitComplex}; -quickcheck!( +use crate::proptest::*; +use proptest::{prop_assert, proptest}; + +proptest!( /* * * From/to rotation matrix. * */ - fn unit_complex_rotation_conversion(c: UnitComplex) -> bool { + #[test] + fn unit_complex_rotation_conversion(c in unit_complex()) { let r = c.to_rotation_matrix(); let cc = UnitComplex::from_rotation_matrix(&r); let rr = cc.to_rotation_matrix(); - relative_eq!(c, cc, epsilon = 1.0e-7) && relative_eq!(r, rr, epsilon = 1.0e-7) + prop_assert!(relative_eq!(c, cc, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(r, rr, epsilon = 1.0e-7)); } /* @@ -22,19 +27,20 @@ quickcheck!( * Point/Vector transformation. * */ - fn unit_complex_transformation(c: UnitComplex, v: Vector2, p: Point2) -> bool { + #[test] + fn unit_complex_transformation(c in unit_complex(), v in vector2(), p in point2()) { let r = c.to_rotation_matrix(); let rv = r * v; let rp = r * p; - relative_eq!(c * v, rv, epsilon = 1.0e-7) + prop_assert!(relative_eq!(c * v, rv, epsilon = 1.0e-7) && relative_eq!(c * &v, rv, epsilon = 1.0e-7) && relative_eq!(&c * v, rv, epsilon = 1.0e-7) && relative_eq!(&c * &v, rv, epsilon = 1.0e-7) && relative_eq!(c * p, rp, epsilon = 1.0e-7) && relative_eq!(c * &p, rp, epsilon = 1.0e-7) && relative_eq!(&c * p, rp, epsilon = 1.0e-7) - && relative_eq!(&c * &p, rp, epsilon = 1.0e-7) + && relative_eq!(&c * &p, rp, epsilon = 1.0e-7)) } /* @@ -42,39 +48,43 @@ quickcheck!( * Inversion. * */ - fn unit_complex_inv(c: UnitComplex) -> bool { + #[test] + fn unit_complex_inv(c in unit_complex()) { let iq = c.inverse(); - relative_eq!(&iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(&iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(iq * &c, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(&iq * c, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(iq * c, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(&c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(c * &iq, UnitComplex::identity(), epsilon = 1.0e-7) && relative_eq!(&c * iq, UnitComplex::identity(), epsilon = 1.0e-7) - && relative_eq!(c * iq, UnitComplex::identity(), epsilon = 1.0e-7) + && relative_eq!(c * iq, UnitComplex::identity(), epsilon = 1.0e-7)) } /* * - * Quaterion * Vector == Rotation * Vector + * Quaternion * Vector == Rotation * Vector * */ - fn unit_complex_mul_vector(c: UnitComplex, v: Vector2, p: Point2) -> bool { + #[test] + fn unit_complex_mul_vector(c in unit_complex(), v in vector2(), p in point2()) { let r = c.to_rotation_matrix(); - relative_eq!(c * v, r * v, epsilon = 1.0e-7) && relative_eq!(c * p, r * p, epsilon = 1.0e-7) + prop_assert!(relative_eq!(c * v, r * v, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(c * p, r * p, epsilon = 1.0e-7)); } // Test that all operators (incl. all combinations of references) work. // See the top comment on `geometry/quaternion_ops.rs` for details on which operations are // supported. + #[test] #[cfg_attr(rustfmt, rustfmt_skip)] fn all_op_exist( - uc: UnitComplex, - v: Vector2, - p: Point2, - r: Rotation2 - ) -> bool { + uc in unit_complex(), + v in vector2(), + p in point2(), + r in rotation2() + ) { let uv = Unit::new_normalize(v); let ucMuc = uc * uc; @@ -112,7 +122,7 @@ quickcheck!( ucDr1 /= r; ucDr2 /= &r; - ucMuc1 == ucMuc + prop_assert!(ucMuc1 == ucMuc && ucMuc1 == ucMuc2 && ucMr1 == ucMr && ucMr1 == ucMr2 @@ -146,6 +156,6 @@ quickcheck!( && ucMv == &uc * v && ucMuv == &uc * &uv && ucMuv == uc * &uv - && ucMuv == &uc * uv + && ucMuv == &uc * uv) } ); diff --git a/tests/lib.rs b/tests/lib.rs index 02044b97..ca7faa74 100644 --- a/tests/lib.rs +++ b/tests/lib.rs @@ -12,12 +12,13 @@ extern crate approx; extern crate mint; extern crate nalgebra as na; extern crate num_traits as num; -#[cfg(feature = "arbitrary")] -#[macro_use] -extern crate quickcheck; mod core; mod geometry; mod linalg; + +#[cfg(feature = "proptest-support")] +mod proptest; + //#[cfg(feature = "sparse")] //mod sparse; diff --git a/tests/linalg/balancing.rs b/tests/linalg/balancing.rs index 401f672a..5d250b2c 100644 --- a/tests/linalg/balancing.rs +++ b/tests/linalg/balancing.rs @@ -1,26 +1,28 @@ -#![cfg(feature = "arbitrary")] - -use std::cmp; +#![cfg(feature = "proptest-support")] use na::balancing; -use na::{DMatrix, Matrix4}; +use na::DMatrix; -quickcheck! { - fn balancing_parlett_reinsch(n: usize) -> bool { - let n = cmp::min(n, 10); +use crate::proptest::*; +use proptest::{prop_assert_eq, proptest}; + +proptest! { + #[test] + fn balancing_parlett_reinsch(n in PROPTEST_MATRIX_DIM) { let m = DMatrix::::new_random(n, n); let mut balanced = m.clone(); let d = balancing::balance_parlett_reinsch(&mut balanced); balancing::unbalance(&mut balanced, &d); - balanced == m + prop_assert_eq!(balanced, m); } - fn balancing_parlett_reinsch_static(m: Matrix4) -> bool { + #[test] + fn balancing_parlett_reinsch_static(m in matrix4()) { let mut balanced = m; let d = balancing::balance_parlett_reinsch(&mut balanced); balancing::unbalance(&mut balanced, &d); - balanced == m + prop_assert_eq!(balanced, m); } } diff --git a/tests/linalg/bidiagonal.rs b/tests/linalg/bidiagonal.rs index 8fefb4a2..aaee393f 100644 --- a/tests/linalg/bidiagonal.rs +++ b/tests/linalg/bidiagonal.rs @@ -1,63 +1,61 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr) => { mod $module { - use na::{DMatrix, Matrix2, Matrix3x5, Matrix4, Matrix5x3}; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; - quickcheck! { - fn bidiagonal(m: DMatrix<$scalar>) -> bool { - let m = m.map(|e| e.0); - if m.len() == 0 { - return true; - } + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + proptest! { + #[test] + fn bidiagonal(m in dmatrix_($scalar)) { let bidiagonal = m.clone().bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)) } - fn bidiagonal_static_5_3(m: Matrix5x3<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn bidiagonal_static_5_3(m in matrix5x3_($scalar)) { let bidiagonal = m.bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)) } - fn bidiagonal_static_3_5(m: Matrix3x5<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn bidiagonal_static_3_5(m in matrix3x5_($scalar)) { let bidiagonal = m.bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)) } - fn bidiagonal_static_square(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn bidiagonal_static_square(m in matrix4_($scalar)) { let bidiagonal = m.bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)) } - fn bidiagonal_static_square_2x2(m: Matrix2<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn bidiagonal_static_square_2x2(m in matrix2_($scalar)) { let bidiagonal = m.bidiagonalize(); let (u, d, v_t) = bidiagonal.unpack(); - relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &u * d * &v_t, epsilon = 1.0e-7)) } } } } ); -gen_tests!(complex, RandComplex); -gen_tests!(f64, RandScalar); +gen_tests!(complex, complex_f64()); +gen_tests!(f64, PROPTEST_F64); #[test] fn bidiagonal_identity() { diff --git a/tests/linalg/cholesky.rs b/tests/linalg/cholesky.rs index a89802b2..5ea0edaf 100644 --- a/tests/linalg/cholesky.rs +++ b/tests/linalg/cholesky.rs @@ -1,4 +1,4 @@ -#![cfg(all(feature = "arbitrary", feature = "debug"))] +#![cfg(all(feature = "proptest-support", feature = "debug"))] macro_rules! gen_tests( ($module: ident, $scalar: ty) => { @@ -9,32 +9,30 @@ macro_rules! gen_tests( use rand::random; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; - use std::cmp; - quickcheck! { - fn cholesky(n: usize) -> bool { - let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap(); + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + + proptest! { + #[test] + fn cholesky(n in PROPTEST_MATRIX_DIM) { + let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); let l = m.clone().cholesky().unwrap().unpack(); - relative_eq!(m, &l * l.adjoint(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &l * l.adjoint(), epsilon = 1.0e-7)); } - fn cholesky_static(_m: RandomSDP) -> bool { + #[test] + fn cholesky_static(_n in PROPTEST_MATRIX_DIM) { let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); let chol = m.cholesky().unwrap(); let l = chol.unpack(); - if !relative_eq!(m, &l * l.adjoint(), epsilon = 1.0e-7) { - false - } - else { - true - } + prop_assert!(relative_eq!(m, &l * l.adjoint(), epsilon = 1.0e-7)); } - fn cholesky_solve(n: usize, nb: usize) -> bool { - let n = n.max(1).min(50); + #[test] + fn cholesky_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); - let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. let chol = m.clone().cholesky().unwrap(); let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); @@ -43,11 +41,12 @@ macro_rules! gen_tests( let sol1 = chol.solve(&b1); let sol2 = chol.solve(&b2); - relative_eq!(&m * &sol1, b1, epsilon = 1.0e-7) && - relative_eq!(&m * &sol2, b2, epsilon = 1.0e-7) + prop_assert!(relative_eq!(&m * &sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(&m * &sol2, b2, epsilon = 1.0e-7)); } - fn cholesky_solve_static(_n: usize) -> bool { + #[test] + fn cholesky_solve_static(_n in PROPTEST_MATRIX_DIM) { let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); let chol = m.clone().cholesky().unwrap(); let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); @@ -56,29 +55,32 @@ macro_rules! gen_tests( let sol1 = chol.solve(&b1); let sol2 = chol.solve(&b2); - relative_eq!(m * sol1, b1, epsilon = 1.0e-7) && - relative_eq!(m * sol2, b2, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m * sol1, b1, epsilon = 1.0e-7)); + prop_assert!(relative_eq!(m * sol2, b2, epsilon = 1.0e-7)); } - fn cholesky_inverse(n: usize) -> bool { - let m = RandomSDP::new(Dynamic::new(n.max(1).min(50)), || random::<$scalar>().0).unwrap(); + #[test] + fn cholesky_inverse(n in PROPTEST_MATRIX_DIM) { + let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); let m1 = m.clone().cholesky().unwrap().inverse(); let id1 = &m * &m1; let id2 = &m1 * &m; - id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) + prop_assert!(id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)); } - fn cholesky_inverse_static(_n: usize) -> bool { + #[test] + fn cholesky_inverse_static(_n in PROPTEST_MATRIX_DIM) { let m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); let m1 = m.clone().cholesky().unwrap().inverse(); let id1 = &m * &m1; let id2 = &m1 * &m; - id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7) + prop_assert!(id1.is_identity(1.0e-7) && id2.is_identity(1.0e-7)); } - fn cholesky_rank_one_update(_n: usize) -> bool { + #[test] + fn cholesky_rank_one_update(_n in PROPTEST_MATRIX_DIM) { let mut m = RandomSDP::new(U4, || random::<$scalar>().0).unwrap(); let x = Vector4::<$scalar>::new_random().map(|e| e.0); @@ -96,10 +98,11 @@ macro_rules! gen_tests( // updates m manually m.gerc(sigma_scalar, &x, &x, one); // m += sigma * x * x.adjoint() - relative_eq!(m, m_chol_updated, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, m_chol_updated, epsilon = 1.0e-7)); } - fn cholesky_insert_column(n: usize) -> bool { + #[test] + fn cholesky_insert_column(n in PROPTEST_MATRIX_DIM) { let n = n.max(1).min(10); let j = random::() % n; let m_updated = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); @@ -112,10 +115,11 @@ macro_rules! gen_tests( let chol = m.clone().cholesky().unwrap().insert_column(j, col); let m_chol_updated = chol.l() * chol.l().adjoint(); - relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7)); } - fn cholesky_remove_column(n: usize) -> bool { + #[test] + fn cholesky_remove_column(n in PROPTEST_MATRIX_DIM) { let n = n.max(1).min(10); let j = random::() % n; let m = RandomSDP::new(Dynamic::new(n), || random::<$scalar>().0).unwrap(); @@ -127,7 +131,7 @@ macro_rules! gen_tests( // remove column from m let m_updated = m.remove_column(j).remove_row(j); - relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m_updated, m_chol_updated, epsilon = 1.0e-7)); } } } diff --git a/tests/linalg/col_piv_qr.rs b/tests/linalg/col_piv_qr.rs new file mode 100644 index 00000000..40ebfb01 --- /dev/null +++ b/tests/linalg/col_piv_qr.rs @@ -0,0 +1,152 @@ +#[cfg_attr(rustfmt, rustfmt_skip)] + +use na::Matrix4; + +#[test] +fn col_piv_qr() { + let m = Matrix4::new( + 1.0, -1.0, 2.0, 1.0, -1.0, 3.0, -1.0, -1.0, 3.0, -5.0, 5.0, 3.0, 1.0, 2.0, 1.0, -2.0, + ); + let col_piv_qr = m.col_piv_qr(); + assert!(relative_eq!( + col_piv_qr.determinant(), + 0.0, + epsilon = 1.0e-7 + )); + + let (q, r, p) = col_piv_qr.unpack(); + + let mut qr = q * r; + p.inv_permute_columns(&mut qr); + + assert!(relative_eq!(m, qr, epsilon = 1.0e-7)); +} + +#[cfg(feature = "proptest-support")] +mod proptest_tests { + macro_rules! gen_tests( + ($module: ident, $scalar: expr ,$scalar_type: ty) => { + mod $module { + use na::{DMatrix, DVector, Matrix4x3, Vector4}; + use std::cmp; + + #[allow(unused_imports)] + use crate::core::helper::{RandComplex, RandScalar}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + + proptest! { + #[test] + fn col_piv_qr(m in dmatrix_($scalar)) { + let col_piv_qr = m.clone().col_piv_qr(); + let (q, r, p) = col_piv_qr.unpack(); + let mut qr = &q * &r; + p.inv_permute_columns(&mut qr); + + prop_assert!(relative_eq!(m, &qr, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); + } + + #[test] + fn col_piv_qr_static_5_3(m in matrix5x3_($scalar)) { + let col_piv_qr = m.col_piv_qr(); + let (q, r, p) = col_piv_qr.unpack(); + let mut qr = q * r; + p.inv_permute_columns(&mut qr); + + prop_assert!(relative_eq!(m, qr, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); + } + + #[test] + fn col_piv_qr_static_3_5(m in matrix3x5_($scalar)) { + let col_piv_qr = m.col_piv_qr(); + let (q, r, p) = col_piv_qr.unpack(); + let mut qr = q * r; + p.inv_permute_columns(&mut qr); + + prop_assert!(relative_eq!(m, qr, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); + } + + #[test] + fn col_piv_qr_static_square(m in matrix4_($scalar)) { + let col_piv_qr = m.col_piv_qr(); + let (q, r, p) = col_piv_qr.unpack(); + let mut qr = q * r; + p.inv_permute_columns(&mut qr); + + prop_assert!(relative_eq!(m, qr, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); + } + + #[test] + fn col_piv_qr_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { + if n != 0 && nb != 0 { + let n = cmp::min(n, 50); // To avoid slowing down the test too much. + let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); + + let col_piv_qr = m.clone().col_piv_qr(); + let b1 = DVector::<$scalar_type>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar_type>::new_random(n, nb).map(|e| e.0); + + if col_piv_qr.is_invertible() { + let sol1 = col_piv_qr.solve(&b1).unwrap(); + let sol2 = col_piv_qr.solve(&b2).unwrap(); + + prop_assert!(relative_eq!(&m * sol1, b1, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(&m * sol2, b2, epsilon = 1.0e-6)); + } + } + } + + #[test] + fn col_piv_qr_solve_static(m in matrix4_($scalar)) { + let col_piv_qr = m.col_piv_qr(); + let b1 = Vector4::<$scalar_type>::new_random().map(|e| e.0); + let b2 = Matrix4x3::<$scalar_type>::new_random().map(|e| e.0); + + if col_piv_qr.is_invertible() { + let sol1 = col_piv_qr.solve(&b1).unwrap(); + let sol2 = col_piv_qr.solve(&b2).unwrap(); + + prop_assert!(relative_eq!(m * sol1, b1, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(m * sol2, b2, epsilon = 1.0e-6)); + } + } + + #[test] + fn col_piv_qr_inverse(n in PROPTEST_MATRIX_DIM) { + let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much. + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); + + if let Some(m1) = m.clone().col_piv_qr().try_inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; + + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); + } + } + + #[test] + fn col_piv_qr_inverse_static(m in matrix4_($scalar)) { + let col_piv_qr = m.col_piv_qr(); + + if let Some(m1) = col_piv_qr.try_inverse() { + let id1 = &m * &m1; + let id2 = &m1 * &m; + + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); + } + } + } + } + } + ); + + gen_tests!(complex, complex_f64(), RandComplex); + gen_tests!(f64, PROPTEST_F64, RandScalar); +} diff --git a/tests/linalg/eigen.rs b/tests/linalg/eigen.rs index c0d3171f..e9c0522b 100644 --- a/tests/linalg/eigen.rs +++ b/tests/linalg/eigen.rs @@ -1,66 +1,74 @@ use na::DMatrix; -#[cfg(feature = "arbitrary")] -mod quickcheck_tests { +#[cfg(feature = "proptest-support")] +mod proptest_tests { macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { - use na::{DMatrix, Matrix2, Matrix3, Matrix4}; + use na::DMatrix; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; use std::cmp; - quickcheck! { - fn symmetric_eigen(n: usize) -> bool { + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + + proptest! { + #[test] + fn symmetric_eigen(n in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 10)); - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0).hermitian_part(); let eig = m.clone().symmetric_eigen(); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } - fn symmetric_eigen_singular(n: usize) -> bool { + #[test] + fn symmetric_eigen_singular(n in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 10)); - let mut m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + let mut m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0).hermitian_part(); m.row_mut(n / 2).fill(na::zero()); m.column_mut(n / 2).fill(na::zero()); let eig = m.clone().symmetric_eigen(); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } - fn symmetric_eigen_static_square_4x4(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); + #[test] + fn symmetric_eigen_static_square_4x4(m in matrix4_($scalar)) { + let m = m.hermitian_part(); let eig = m.symmetric_eigen(); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } - fn symmetric_eigen_static_square_3x3(m: Matrix3<$scalar>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); + #[test] + fn symmetric_eigen_static_square_3x3(m in matrix3_($scalar)) { + let m = m.hermitian_part(); let eig = m.symmetric_eigen(); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } - fn symmetric_eigen_static_square_2x2(m: Matrix2<$scalar>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); + #[test] + fn symmetric_eigen_static_square_2x2(m in matrix2_($scalar)) { + let m = m.hermitian_part(); let eig = m.symmetric_eigen(); let recomp = eig.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-5)) } } } } ); - gen_tests!(complex, RandComplex); - gen_tests!(f64, RandScalar); + gen_tests!(complex, complex_f64(), RandComplex); + gen_tests!(f64, PROPTEST_F64, RandScalar); } // Test proposed on the issue #176 of rulinalg. diff --git a/tests/linalg/exp.rs b/tests/linalg/exp.rs index f5b5243a..6a643037 100644 --- a/tests/linalg/exp.rs +++ b/tests/linalg/exp.rs @@ -71,7 +71,6 @@ mod tests { let m22 = ad_2.exp() * (delta * delta_2.cosh() + (d - a) * delta_2.sinh()); let f = Matrix2::new(m11, m12, m21, m22) / delta; - println!("a: {}", m); assert!(relative_eq!(f, m.exp(), epsilon = 1.0e-7)); break; } diff --git a/tests/linalg/full_piv_lu.rs b/tests/linalg/full_piv_lu.rs index 0bb832cd..f782d8fd 100644 --- a/tests/linalg/full_piv_lu.rs +++ b/tests/linalg/full_piv_lu.rs @@ -40,101 +40,96 @@ fn full_piv_lu_simple_with_pivot() { } #[cfg(feature = "arbitrary")] -mod quickcheck_tests { +mod proptest_tests { macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { use std::cmp; use num::One; - use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4}; + use na::{DMatrix, Matrix4x3, DVector, Vector4}; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; - quickcheck! { - fn full_piv_lu(m: DMatrix<$scalar>) -> bool { - let mut m = m.map(|e| e.0); - if m.len() == 0 { - m = DMatrix::<$scalar>::new_random(1, 1).map(|e| e.0); - } + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + proptest! { + #[test] + fn full_piv_lu(m in dmatrix_($scalar)) { let lu = m.clone().full_piv_lu(); let (p, l, u, q) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)) } - fn full_piv_lu_static_3_5(m: Matrix3x5<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn full_piv_lu_static_3_5(m in matrix3x5_($scalar)) { let lu = m.full_piv_lu(); let (p, l, u, q) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)) } - fn full_piv_lu_static_5_3(m: Matrix5x3<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn full_piv_lu_static_5_3(m in matrix5x3_($scalar)) { let lu = m.full_piv_lu(); let (p, l, u, q) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)) } - fn full_piv_lu_static_square(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn full_piv_lu_static_square(m in matrix4_($scalar)) { let lu = m.full_piv_lu(); let (p, l, u, q) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); q.inv_permute_columns(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)) } - fn full_piv_lu_solve(n: usize, nb: usize) -> bool { - if n != 0 && nb != 0 { - let n = cmp::min(n, 50); // To avoid slowing down the test too much. - let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + #[test] + fn full_piv_lu_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); - let lu = m.clone().full_piv_lu(); - let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); - let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); + let lu = m.clone().full_piv_lu(); + let b1 = DVector::<$scalar_type>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar_type>::new_random(n, nb).map(|e| e.0); - let sol1 = lu.solve(&b1); - let sol2 = lu.solve(&b2); + let sol1 = lu.solve(&b1); + let sol2 = lu.solve(&b2); - return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && - (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) - } - - return true; + prop_assert!(sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)); + prop_assert!(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)); } - fn full_piv_lu_solve_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn full_piv_lu_solve_static(m in matrix4_($scalar)) { let lu = m.full_piv_lu(); - let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); - let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0); + let b1 = Vector4::<$scalar_type>::new_random().map(|e| e.0); + let b2 = Matrix4x3::<$scalar_type>::new_random().map(|e| e.0); let sol1 = lu.solve(&b1); let sol2 = lu.solve(&b2); - return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && - (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) + prop_assert!(sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)); + prop_assert!(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)); } - fn full_piv_lu_inverse(n: usize) -> bool { + #[test] + fn full_piv_lu_inverse(n in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much. - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); let mut l = m.lower_triangle(); let mut u = m.upper_triangle(); @@ -148,21 +143,20 @@ mod quickcheck_tests { let id1 = &m * &m1; let id2 = &m1 * &m; - return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5); + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); } - fn full_piv_lu_inverse_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn full_piv_lu_inverse_static(m in matrix4_($scalar)) { let lu = m.full_piv_lu(); if let Some(m1) = lu.try_inverse() { let id1 = &m * &m1; let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - } - else { - true + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); } } } @@ -170,8 +164,8 @@ mod quickcheck_tests { } ); - gen_tests!(complex, RandComplex); - gen_tests!(f64, RandScalar); + gen_tests!(complex, complex_f64(), RandComplex); + gen_tests!(f64, PROPTEST_F64, RandScalar); } /* diff --git a/tests/linalg/hessenberg.rs b/tests/linalg/hessenberg.rs index ec499f82..c0783de2 100644 --- a/tests/linalg/hessenberg.rs +++ b/tests/linalg/hessenberg.rs @@ -1,4 +1,4 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] use na::Matrix2; @@ -11,40 +11,41 @@ fn hessenberg_simple() { } macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { - use na::{DMatrix, Matrix2, Matrix4}; - use std::cmp; + use na::DMatrix; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; - quickcheck! { - fn hessenberg(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + proptest! { + #[test] + fn hessenberg(n in PROPTEST_MATRIX_DIM) { + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); let hess = m.clone().hessenberg(); let (p, h) = hess.unpack(); - relative_eq!(m, &p * h * p.adjoint(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, &p * h * p.adjoint(), epsilon = 1.0e-7)) } - fn hessenberg_static_mat2(m: Matrix2<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn hessenberg_static_mat2(m in matrix2_($scalar)) { let hess = m.hessenberg(); let (p, h) = hess.unpack(); - relative_eq!(m, p * h * p.adjoint(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, p * h * p.adjoint(), epsilon = 1.0e-7)) } - fn hessenberg_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn hessenberg_static(m in matrix4_($scalar)) { let hess = m.hessenberg(); let (p, h) = hess.unpack(); - relative_eq!(m, p * h * p.adjoint(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, p * h * p.adjoint(), epsilon = 1.0e-7)) } } } } ); -gen_tests!(complex, RandComplex); -gen_tests!(f64, RandScalar); +gen_tests!(complex, complex_f64(), RandComplex); +gen_tests!(f64, PROPTEST_F64, RandScalar); diff --git a/tests/linalg/lu.rs b/tests/linalg/lu.rs index 7fab6b01..58a661c4 100644 --- a/tests/linalg/lu.rs +++ b/tests/linalg/lu.rs @@ -38,103 +38,88 @@ fn lu_simple_with_pivot() { assert!(relative_eq!(m, lu, epsilon = 1.0e-7)); } -#[cfg(feature = "arbitrary")] -mod quickcheck_tests { - #[allow(unused_imports)] - use crate::core::helper::{RandComplex, RandScalar}; - +#[cfg(feature = "proptest-support")] +mod proptest_tests { macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { - use std::cmp; - use na::{DMatrix, Matrix4, Matrix4x3, Matrix5x3, Matrix3x5, DVector, Vector4}; + use na::{DMatrix, Matrix4x3, DVector, Vector4}; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn lu(m: DMatrix<$scalar>) -> bool { - let mut m = m; - if m.len() == 0 { - m = DMatrix::<$scalar>::new_random(1, 1); - } - - let m = m.map(|e| e.0); - + proptest! { + #[test] + fn lu(m in dmatrix_($scalar)) { let lu = m.clone().lu(); let (p, l, u) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)) } - fn lu_static_3_5(m: Matrix3x5<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn lu_static_3_5(m in matrix3x5_($scalar)) { let lu = m.lu(); let (p, l, u) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)) } - fn lu_static_5_3(m: Matrix5x3<$scalar>) -> bool { - let m = m.map(|e| e.0); + fn lu_static_5_3(m in matrix5x3_($scalar)) { let lu = m.lu(); let (p, l, u) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)); } - fn lu_static_square(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn lu_static_square(m in matrix4_($scalar)) { let lu = m.lu(); let (p, l, u) = lu.unpack(); let mut lu = l * u; p.inv_permute_rows(&mut lu); - relative_eq!(m, lu, epsilon = 1.0e-7) + prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7)); } - fn lu_solve(n: usize, nb: usize) -> bool { - if n != 0 && nb != 0 { - let n = cmp::min(n, 50); // To avoid slowing down the test too much. - let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + #[test] + fn lu_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); - let lu = m.clone().lu(); - let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); - let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); + let lu = m.clone().lu(); + let b1 = DVector::<$scalar_type>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar_type>::new_random(n, nb).map(|e| e.0); - let sol1 = lu.solve(&b1); - let sol2 = lu.solve(&b2); + let sol1 = lu.solve(&b1); + let sol2 = lu.solve(&b2); - return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && - (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) - } - - return true; + prop_assert!(sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)); + prop_assert!(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)); } - fn lu_solve_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn lu_solve_static(m in matrix4_($scalar)) { let lu = m.lu(); - let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); - let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0); + let b1 = Vector4::<$scalar_type>::new_random().map(|e| e.0); + let b2 = Matrix4x3::<$scalar_type>::new_random().map(|e| e.0); let sol1 = lu.solve(&b1); let sol2 = lu.solve(&b2); - return (sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)) && - (sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)) + prop_assert!(sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6)); + prop_assert!(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6)); } - fn lu_inverse(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much. - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - + #[test] + fn lu_inverse(n in PROPTEST_MATRIX_DIM) { + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); let mut l = m.lower_triangle(); let mut u = m.upper_triangle(); @@ -147,21 +132,20 @@ mod quickcheck_tests { let id1 = &m * &m1; let id2 = &m1 * &m; - return id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5); + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); } - fn lu_inverse_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn lu_inverse_static(m in matrix4_($scalar)) { let lu = m.lu(); if let Some(m1) = lu.try_inverse() { let id1 = &m * &m1; let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - } - else { - true + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); } } } @@ -169,6 +153,6 @@ mod quickcheck_tests { } ); - gen_tests!(complex, RandComplex); - gen_tests!(f64, RandScalar); + gen_tests!(complex, complex_f64(), RandComplex); + gen_tests!(f64, PROPTEST_F64, RandScalar); } diff --git a/tests/linalg/mod.rs b/tests/linalg/mod.rs index 7fc01396..9c252bfd 100644 --- a/tests/linalg/mod.rs +++ b/tests/linalg/mod.rs @@ -1,6 +1,7 @@ mod balancing; mod bidiagonal; mod cholesky; +mod col_piv_qr; mod convolution; mod eigen; mod exp; @@ -13,3 +14,4 @@ mod schur; mod solve; mod svd; mod tridiagonal; +mod udu; diff --git a/tests/linalg/qr.rs b/tests/linalg/qr.rs index a6e54af4..f499b030 100644 --- a/tests/linalg/qr.rs +++ b/tests/linalg/qr.rs @@ -1,126 +1,112 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { - use na::{DMatrix, DVector, Matrix3x5, Matrix4, Matrix4x3, Matrix5x3, Vector4}; + use na::{DMatrix, DVector, Matrix4x3, Vector4}; use std::cmp; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn qr(m: DMatrix<$scalar>) -> bool { - let m = m.map(|e| e.0); + proptest! { + #[test] + fn qr(m in dmatrix_($scalar)) { let qr = m.clone().qr(); let q = qr.q(); let r = qr.r(); - println!("m: {}", m); - println!("qr: {}", &q * &r); - - relative_eq!(m, &q * r, epsilon = 1.0e-7) && - q.is_orthogonal(1.0e-7) + prop_assert!(relative_eq!(m, &q * r, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); } - fn qr_static_5_3(m: Matrix5x3<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn qr_static_5_3(m in matrix5x3_($scalar)) { let qr = m.qr(); let q = qr.q(); let r = qr.r(); - relative_eq!(m, q * r, epsilon = 1.0e-7) && - q.is_orthogonal(1.0e-7) + prop_assert!(relative_eq!(m, q * r, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); } - fn qr_static_3_5(m: Matrix3x5<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn qr_static_3_5(m in matrix3x5_($scalar)) { let qr = m.qr(); let q = qr.q(); let r = qr.r(); - relative_eq!(m, q * r, epsilon = 1.0e-7) && - q.is_orthogonal(1.0e-7) + prop_assert!(relative_eq!(m, q * r, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); } - fn qr_static_square(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn qr_static_square(m in matrix4_($scalar)) { let qr = m.qr(); let q = qr.q(); let r = qr.r(); - println!("{}{}{}{}", q, r, q * r, m); - - relative_eq!(m, q * r, epsilon = 1.0e-7) && - q.is_orthogonal(1.0e-7) + prop_assert!(relative_eq!(m, q * r, epsilon = 1.0e-7)); + prop_assert!(q.is_orthogonal(1.0e-7)); } - fn qr_solve(n: usize, nb: usize) -> bool { - if n != 0 && nb != 0 { - let n = cmp::min(n, 50); // To avoid slowing down the test too much. - let nb = cmp::min(nb, 50); // To avoid slowing down the test too much. - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + #[test] + fn qr_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); - let qr = m.clone().qr(); - let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); - let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); + let qr = m.clone().qr(); + let b1 = DVector::<$scalar_type>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar_type>::new_random(n, nb).map(|e| e.0); - if qr.is_invertible() { - let sol1 = qr.solve(&b1).unwrap(); - let sol2 = qr.solve(&b2).unwrap(); + if qr.is_invertible() { + let sol1 = qr.solve(&b1).unwrap(); + let sol2 = qr.solve(&b2).unwrap(); - return relative_eq!(&m * sol1, b1, epsilon = 1.0e-6) && - relative_eq!(&m * sol2, b2, epsilon = 1.0e-6) - } + prop_assert!(relative_eq!(&m * sol1, b1, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(&m * sol2, b2, epsilon = 1.0e-6)); } - - return true; } - fn qr_solve_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn qr_solve_static(m in matrix4_($scalar)) { let qr = m.qr(); - let b1 = Vector4::<$scalar>::new_random().map(|e| e.0); - let b2 = Matrix4x3::<$scalar>::new_random().map(|e| e.0); + let b1 = Vector4::<$scalar_type>::new_random().map(|e| e.0); + let b2 = Matrix4x3::<$scalar_type>::new_random().map(|e| e.0); if qr.is_invertible() { let sol1 = qr.solve(&b1).unwrap(); let sol2 = qr.solve(&b2).unwrap(); - relative_eq!(m * sol1, b1, epsilon = 1.0e-6) && - relative_eq!(m * sol2, b2, epsilon = 1.0e-6) - } - else { - false + prop_assert!(relative_eq!(m * sol1, b1, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(m * sol2, b2, epsilon = 1.0e-6)); } } - fn qr_inverse(n: usize) -> bool { + #[test] + fn qr_inverse(n in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much. - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); if let Some(m1) = m.clone().qr().try_inverse() { let id1 = &m * &m1; let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - } - else { - true + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); } } - fn qr_inverse_static(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn qr_inverse_static(m in matrix4_($scalar)) { let qr = m.qr(); if let Some(m1) = qr.try_inverse() { let id1 = &m * &m1; let id2 = &m1 * &m; - id1.is_identity(1.0e-5) && id2.is_identity(1.0e-5) - } - else { - true + prop_assert!(id1.is_identity(1.0e-5)); + prop_assert!(id2.is_identity(1.0e-5)); } } } @@ -128,5 +114,5 @@ macro_rules! gen_tests( } ); -gen_tests!(complex, RandComplex); -gen_tests!(f64, RandScalar); +gen_tests!(complex, complex_f64(), RandComplex); +gen_tests!(f64, PROPTEST_F64, RandScalar); diff --git a/tests/linalg/schur.rs b/tests/linalg/schur.rs index 2086ce2d..11402770 100644 --- a/tests/linalg/schur.rs +++ b/tests/linalg/schur.rs @@ -4,8 +4,8 @@ use na::{DMatrix, Matrix3, Matrix4}; #[rustfmt::skip] fn schur_simpl_mat3() { let m = Matrix3::new(-2.0, -4.0, 2.0, - -2.0, 1.0, 2.0, - 4.0, 2.0, 5.0); + -2.0, 1.0, 2.0, + 4.0, 2.0, 5.0); let schur = m.schur(); let (vecs, vals) = schur.unpack(); @@ -13,72 +13,49 @@ fn schur_simpl_mat3() { assert!(relative_eq!(vecs * vals * vecs.transpose(), m, epsilon = 1.0e-7)); } -#[cfg(feature = "arbitrary")] -mod quickcheck_tests { +#[cfg(feature = "proptest-support")] +mod proptest_tests { macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { - use std::cmp; - use na::{DMatrix, Matrix2, Matrix3, Matrix4}; + use na::DMatrix; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn schur(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 10)); - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); - + proptest! { + #[test] + fn schur(n in PROPTEST_MATRIX_DIM) { + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); let (vecs, vals) = m.clone().schur().unpack(); - - if !relative_eq!(&vecs * &vals * vecs.adjoint(), m, epsilon = 1.0e-7) { - println!("{:.5}{:.5}", m, &vecs * &vals * vecs.adjoint()); - } - - relative_eq!(&vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7) + prop_assert!(relative_eq!(&vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7)); } - fn schur_static_mat2(m: Matrix2<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn schur_static_mat2(m in matrix2_($scalar)) { let (vecs, vals) = m.clone().schur().unpack(); - - let ok = relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7); - if !ok { - println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); - println!("Reconstruction:{}{}", m, &vecs * &vals * vecs.adjoint()); - } - ok + prop_assert!(relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7)); } - fn schur_static_mat3(m: Matrix3<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn schur_static_mat3(m in matrix3_($scalar)) { let (vecs, vals) = m.clone().schur().unpack(); - - let ok = relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7); - if !ok { - println!("Vecs: {:.5} Vals: {:.5}", vecs, vals); - println!("{:.5}{:.5}", m, &vecs * &vals * vecs.adjoint()); - } - ok + prop_assert!(relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7)); } - fn schur_static_mat4(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn schur_static_mat4(m in matrix4_($scalar)) { let (vecs, vals) = m.clone().schur().unpack(); - - let ok = relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7); - if !ok { - println!("{:.5}{:.5}", m, &vecs * &vals * vecs.adjoint()); - } - - ok + prop_assert!(relative_eq!(vecs * vals * vecs.adjoint(), m, epsilon = 1.0e-7)); } } } } ); - gen_tests!(complex, RandComplex); - gen_tests!(f64, RandScalar); + gen_tests!(complex, complex_f64(), RandComplex); + gen_tests!(f64, PROPTEST_F64, RandScalar); } #[test] diff --git a/tests/linalg/solve.rs b/tests/linalg/solve.rs index 3bd6075e..81cd1c71 100644 --- a/tests/linalg/solve.rs +++ b/tests/linalg/solve.rs @@ -1,11 +1,13 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr) => { mod $module { - use na::{Matrix4, Matrix4x5, ComplexField}; + use na::{Matrix4, ComplexField}; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; fn unzero_diagonal(a: &mut Matrix4) { for i in 0..4 { @@ -15,50 +17,50 @@ macro_rules! gen_tests( } } - quickcheck! { - fn solve_lower_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { - let b = b.map(|e| e.0); - let mut a = a.map(|e| e.0); + proptest! { + #[test] + fn solve_lower_triangular(a in matrix4_($scalar), b in matrix4x5_($scalar)) { + let mut a = a; unzero_diagonal(&mut a); let tri = a.lower_triangle(); let x = a.solve_lower_triangular(&b).unwrap(); - relative_eq!(tri * x, b, epsilon = 1.0e-7) + prop_assert!(relative_eq!(tri * x, b, epsilon = 1.0e-7)) } - fn solve_upper_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { - let b = b.map(|e| e.0); - let mut a = a.map(|e| e.0); + #[test] + fn solve_upper_triangular(a in matrix4_($scalar), b in matrix4x5_($scalar)) { + let mut a = a; unzero_diagonal(&mut a); let tri = a.upper_triangle(); let x = a.solve_upper_triangular(&b).unwrap(); - relative_eq!(tri * x, b, epsilon = 1.0e-7) + prop_assert!(relative_eq!(tri * x, b, epsilon = 1.0e-7)) } - fn tr_solve_lower_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { - let b = b.map(|e| e.0); - let mut a = a.map(|e| e.0); + #[test] + fn tr_solve_lower_triangular(a in matrix4_($scalar), b in matrix4x5_($scalar)) { + let mut a = a; unzero_diagonal(&mut a); let tri = a.lower_triangle(); let x = a.tr_solve_lower_triangular(&b).unwrap(); - relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7) + prop_assert!(relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7)) } - fn tr_solve_upper_triangular(a: Matrix4<$scalar>, b: Matrix4x5<$scalar>) -> bool { - let b = b.map(|e| e.0); - let mut a = a.map(|e| e.0); + #[test] + fn tr_solve_upper_triangular(a in matrix4_($scalar), b in matrix4x5_($scalar)) { + let mut a = a; unzero_diagonal(&mut a); let tri = a.upper_triangle(); let x = a.tr_solve_upper_triangular(&b).unwrap(); - relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7) + prop_assert!(relative_eq!(tri.transpose() * x, b, epsilon = 1.0e-7)) } } } } ); -gen_tests!(complex, RandComplex); -gen_tests!(f64, RandScalar); +gen_tests!(complex, complex_f64()); +gen_tests!(f64, PROPTEST_F64); diff --git a/tests/linalg/svd.rs b/tests/linalg/svd.rs index cd44b61d..80aa6a20 100644 --- a/tests/linalg/svd.rs +++ b/tests/linalg/svd.rs @@ -1,162 +1,143 @@ use na::{DMatrix, Matrix6}; -#[cfg(feature = "arbitrary")] -mod quickcheck_tests { +#[cfg(feature = "proptest-support")] +mod proptest_tests { macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr, $scalar_type: ty) => { mod $module { use na::{ - DMatrix, DVector, Matrix2, Matrix2x5, Matrix3, Matrix3x5, Matrix4, Matrix5x2, Matrix5x3, + DMatrix, DVector, Matrix2, Matrix3, Matrix4, ComplexField }; use std::cmp; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn svd(m: DMatrix<$scalar>) -> bool { - let m = m.map(|e| e.0); - if m.len() > 0 { - let svd = m.clone().svd(true, true); - let recomp_m = svd.clone().recompose().unwrap(); - let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); - let ds = DMatrix::from_diagonal(&s.map(|e| ComplexField::from_real(e))); + proptest! { + #[test] + fn svd(m in dmatrix_($scalar)) { + let svd = m.clone().svd(true, true); + let recomp_m = svd.clone().recompose().unwrap(); + let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); + let ds = DMatrix::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(&u * ds * &v_t, recomp_m, epsilon = 1.0e-5) && - relative_eq!(m, recomp_m, epsilon = 1.0e-5) - } - else { - true - } + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(&u * ds * &v_t, recomp_m, epsilon = 1.0e-5)); + prop_assert!(relative_eq!(m, recomp_m, epsilon = 1.0e-5)); } - fn svd_static_5_3(m: Matrix5x3<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_static_5_3(m in matrix5x3_($scalar)) { let svd = m.svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = Matrix3::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5)); + prop_assert!(u.is_orthogonal(1.0e-5)); + prop_assert!(v_t.is_orthogonal(1.0e-5)); } - fn svd_static_5_2(m: Matrix5x2<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_static_5_2(m in matrix5x2_($scalar)) { let svd = m.svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = Matrix2::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(m, &u * ds * &v_t, epsilon = 1.0e-5)); + prop_assert!(u.is_orthogonal(1.0e-5)); + prop_assert!(v_t.is_orthogonal(1.0e-5)); } - fn svd_static_3_5(m: Matrix3x5<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_static_3_5(m in matrix3x5_($scalar)) { let svd = m.svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = Matrix3::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5)); } - fn svd_static_2_5(m: Matrix2x5<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_static_2_5(m in matrix2x5_($scalar)) { let svd = m.svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = Matrix2::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5)); } - fn svd_static_square(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_static_square(m in matrix4_($scalar)) { let svd = m.svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = Matrix4::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5)); + prop_assert!(u.is_orthogonal(1.0e-5)); + prop_assert!(v_t.is_orthogonal(1.0e-5)); } - fn svd_static_square_2x2(m: Matrix2<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_static_square_2x2(m in matrix2_($scalar)) { let svd = m.svd(true, true); let (u, s, v_t) = (svd.u.unwrap(), svd.singular_values, svd.v_t.unwrap()); let ds = Matrix2::from_diagonal(&s.map(|e| ComplexField::from_real(e))); - s.iter().all(|e| *e >= 0.0) && - relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5) && - u.is_orthogonal(1.0e-5) && - v_t.is_orthogonal(1.0e-5) + prop_assert!(s.iter().all(|e| *e >= 0.0)); + prop_assert!(relative_eq!(m, u * ds * v_t, epsilon = 1.0e-5)); + prop_assert!(u.is_orthogonal(1.0e-5)); + prop_assert!(v_t.is_orthogonal(1.0e-5)); } - fn svd_pseudo_inverse(m: DMatrix<$scalar>) -> bool { - let m = m.map(|e| e.0); + #[test] + fn svd_pseudo_inverse(m in dmatrix_($scalar)) { + let svd = m.clone().svd(true, true); + let pinv = svd.pseudo_inverse(1.0e-10).unwrap(); - if m.len() > 0 { - let svd = m.clone().svd(true, true); - let pinv = svd.pseudo_inverse(1.0e-10).unwrap(); - - if m.nrows() > m.ncols() { - (pinv * m).is_identity(1.0e-5) - } - else { - (m * pinv).is_identity(1.0e-5) - } - } - else { - true + if m.nrows() > m.ncols() { + prop_assert!((pinv * m).is_identity(1.0e-5)) + } else { + prop_assert!((m * pinv).is_identity(1.0e-5)) } } - fn svd_solve(n: usize, nb: usize) -> bool { + #[test] + fn svd_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) { let n = cmp::max(1, cmp::min(n, 10)); let nb = cmp::min(nb, 10); - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0); + let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0); let svd = m.clone().svd(true, true); if svd.rank(1.0e-7) == n { - let b1 = DVector::<$scalar>::new_random(n).map(|e| e.0); - let b2 = DMatrix::<$scalar>::new_random(n, nb).map(|e| e.0); + let b1 = DVector::<$scalar_type>::new_random(n).map(|e| e.0); + let b2 = DMatrix::<$scalar_type>::new_random(n, nb).map(|e| e.0); let sol1 = svd.solve(&b1, 1.0e-7).unwrap(); let sol2 = svd.solve(&b2, 1.0e-7).unwrap(); let recomp = svd.recompose().unwrap(); - if !relative_eq!(m, recomp, epsilon = 1.0e-6) { - println!("{}{}", m, recomp); - } - if !relative_eq!(&m * &sol1, b1, epsilon = 1.0e-6) { - println!("Problem 1: {:.6}{:.6}", b1, &m * sol1); - return false; - } - if !relative_eq!(&m * &sol2, b2, epsilon = 1.0e-6) { - println!("Problem 2: {:.6}{:.6}", b2, &m * sol2); - return false; - } + prop_assert!(relative_eq!(m, recomp, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(&m * &sol1, b1, epsilon = 1.0e-6)); + prop_assert!(relative_eq!(&m * &sol2, b2, epsilon = 1.0e-6)); } - - true } } } } ); - gen_tests!(complex, RandComplex); - gen_tests!(f64, RandScalar); + gen_tests!(complex, complex_f64(), RandComplex); + gen_tests!(f64, PROPTEST_F64, RandScalar); } // Test proposed on the issue #176 of rulinalg. @@ -303,31 +284,31 @@ fn svd_identity() { #[rustfmt::skip] fn svd_with_delimited_subproblem() { let mut m = DMatrix::::from_element(10, 10, 0.0); - m[(0,0)] = 1.0; m[(0,1)] = 2.0; - m[(1,1)] = 0.0; m[(1,2)] = 3.0; - m[(2,2)] = 4.0; m[(2,3)] = 5.0; - m[(3,3)] = 6.0; m[(3,4)] = 0.0; - m[(4,4)] = 8.0; m[(3,5)] = 9.0; - m[(5,5)] = 10.0; m[(3,6)] = 11.0; - m[(6,6)] = 12.0; m[(3,7)] = 12.0; - m[(7,7)] = 14.0; m[(3,8)] = 13.0; - m[(8,8)] = 16.0; m[(3,9)] = 17.0; - m[(9,9)] = 18.0; + m[(0, 0)] = 1.0; m[(0, 1)] = 2.0; + m[(1, 1)] = 0.0; m[(1, 2)] = 3.0; + m[(2, 2)] = 4.0; m[(2, 3)] = 5.0; + m[(3, 3)] = 6.0; m[(3, 4)] = 0.0; + m[(4, 4)] = 8.0; m[(3, 5)] = 9.0; + m[(5, 5)] = 10.0; m[(3, 6)] = 11.0; + m[(6, 6)] = 12.0; m[(3, 7)] = 12.0; + m[(7, 7)] = 14.0; m[(3, 8)] = 13.0; + m[(8, 8)] = 16.0; m[(3, 9)] = 17.0; + m[(9, 9)] = 18.0; let svd = m.clone().svd(true, true); assert_relative_eq!(m, svd.recompose().unwrap(), epsilon = 1.0e-7); // Rectangular versions. let mut m = DMatrix::::from_element(15, 10, 0.0); - m[(0,0)] = 1.0; m[(0,1)] = 2.0; - m[(1,1)] = 0.0; m[(1,2)] = 3.0; - m[(2,2)] = 4.0; m[(2,3)] = 5.0; - m[(3,3)] = 6.0; m[(3,4)] = 0.0; - m[(4,4)] = 8.0; m[(3,5)] = 9.0; - m[(5,5)] = 10.0; m[(3,6)] = 11.0; - m[(6,6)] = 12.0; m[(3,7)] = 12.0; - m[(7,7)] = 14.0; m[(3,8)] = 13.0; - m[(8,8)] = 16.0; m[(3,9)] = 17.0; - m[(9,9)] = 18.0; + m[(0, 0)] = 1.0; m[(0, 1)] = 2.0; + m[(1, 1)] = 0.0; m[(1, 2)] = 3.0; + m[(2, 2)] = 4.0; m[(2, 3)] = 5.0; + m[(3, 3)] = 6.0; m[(3, 4)] = 0.0; + m[(4, 4)] = 8.0; m[(3, 5)] = 9.0; + m[(5, 5)] = 10.0; m[(3, 6)] = 11.0; + m[(6, 6)] = 12.0; m[(3, 7)] = 12.0; + m[(7, 7)] = 14.0; m[(3, 8)] = 13.0; + m[(8, 8)] = 16.0; m[(3, 9)] = 17.0; + m[(9, 9)] = 18.0; let svd = m.clone().svd(true, true); assert_relative_eq!(m, svd.recompose().unwrap(), epsilon = 1.0e-7); diff --git a/tests/linalg/tridiagonal.rs b/tests/linalg/tridiagonal.rs index b787fd12..3f06fe8e 100644 --- a/tests/linalg/tridiagonal.rs +++ b/tests/linalg/tridiagonal.rs @@ -1,54 +1,56 @@ -#![cfg(feature = "arbitrary")] +#![cfg(feature = "proptest-support")] macro_rules! gen_tests( - ($module: ident, $scalar: ty) => { + ($module: ident, $scalar: expr) => { mod $module { - use std::cmp; - - use na::{DMatrix, Matrix2, Matrix4}; #[allow(unused_imports)] use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; - quickcheck! { - fn symm_tridiagonal(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 50)); - let m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + proptest! { + #[test] + fn symm_tridiagonal(m in dmatrix_($scalar)) { + let m = &m * m.adjoint(); let tri = m.clone().symmetric_tridiagonalize(); let recomp = tri.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)); } - fn symm_tridiagonal_singular(n: usize) -> bool { - let n = cmp::max(1, cmp::min(n, 4)); - let mut m = DMatrix::<$scalar>::new_random(n, n).map(|e| e.0).hermitian_part(); + #[test] + fn symm_tridiagonal_singular(m in dmatrix_($scalar)) { + let mut m = &m * m.adjoint(); + let n = m.nrows(); m.row_mut(n / 2).fill(na::zero()); m.column_mut(n / 2).fill(na::zero()); let tri = m.clone().symmetric_tridiagonalize(); let recomp = tri.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)); } - fn symm_tridiagonal_static_square(m: Matrix4<$scalar>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); + #[test] + fn symm_tridiagonal_static_square(m in matrix4_($scalar)) { + let m = m.hermitian_part(); let tri = m.symmetric_tridiagonalize(); let recomp = tri.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)); } - fn symm_tridiagonal_static_square_2x2(m: Matrix2<$scalar>) -> bool { - let m = m.map(|e| e.0).hermitian_part(); + #[test] + fn symm_tridiagonal_static_square_2x2(m in matrix2_($scalar)) { + let m = m.hermitian_part(); let tri = m.symmetric_tridiagonalize(); let recomp = tri.recompose(); - relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7) + prop_assert!(relative_eq!(m.lower_triangle(), recomp.lower_triangle(), epsilon = 1.0e-7)); } } } } ); -gen_tests!(complex, RandComplex); -gen_tests!(f64, RandScalar); +gen_tests!(complex, complex_f64()); +gen_tests!(f64, PROPTEST_F64); diff --git a/tests/linalg/udu.rs b/tests/linalg/udu.rs new file mode 100644 index 00000000..a7bda1a4 --- /dev/null +++ b/tests/linalg/udu.rs @@ -0,0 +1,76 @@ +use na::Matrix3; + +#[test] +#[rustfmt::skip] +fn udu_simple() { + let m = Matrix3::new( + 2.0, -1.0, 0.0, + -1.0, 2.0, -1.0, + 0.0, -1.0, 2.0); + + let udu = m.udu().unwrap(); + + // Rebuild + let p = udu.u * udu.d_matrix() * udu.u.transpose(); + + assert!(relative_eq!(m, p, epsilon = 3.0e-16)); +} + +#[test] +#[should_panic] +#[rustfmt::skip] +fn udu_non_sym_panic() { + let m = Matrix3::new( + 2.0, -1.0, 0.0, + 1.0, -2.0, 3.0, + -2.0, 1.0, 0.3); + + let udu = m.udu().unwrap(); + // Rebuild + let p = udu.u * udu.d_matrix() * udu.u.transpose(); + + assert!(relative_eq!(m, p, epsilon = 3.0e-16)); +} + +#[cfg(feature = "proptest-support")] +mod proptest_tests { + #[allow(unused_imports)] + use crate::core::helper::{RandComplex, RandScalar}; + + macro_rules! gen_tests( + ($module: ident, $scalar: expr) => { + mod $module { + #[allow(unused_imports)] + use crate::core::helper::{RandScalar, RandComplex}; + use crate::proptest::*; + use proptest::{prop_assert, proptest}; + + proptest! { + #[test] + fn udu(m in dmatrix_($scalar)) { + let m = &m * m.adjoint(); + + if let Some(udu) = m.clone().udu() { + let p = &udu.u * &udu.d_matrix() * &udu.u.transpose(); + println!("m: {}, p: {}", m, p); + + prop_assert!(relative_eq!(m, p, epsilon = 1.0e-7)); + } + } + + #[test] + fn udu_static(m in matrix4_($scalar)) { + let m = m.hermitian_part(); + + if let Some(udu) = m.udu() { + let p = udu.u * udu.d_matrix() * udu.u.transpose(); + prop_assert!(relative_eq!(m, p, epsilon = 1.0e-7)); + } + } + } + } + } + ); + + gen_tests!(f64, PROPTEST_F64); +} diff --git a/tests/proptest/mod.rs b/tests/proptest/mod.rs new file mode 100644 index 00000000..174c0fdd --- /dev/null +++ b/tests/proptest/mod.rs @@ -0,0 +1,371 @@ +//! Tests for proptest-related functionality. + +#![allow(dead_code)] + +use nalgebra::allocator::Allocator; +use nalgebra::base::dimension::*; +use nalgebra::proptest::{DimRange, MatrixStrategy}; +use nalgebra::{ + DMatrix, DVector, DefaultAllocator, Dim, DualQuaternion, Isometry2, Isometry3, Matrix3, + MatrixMN, Point2, Point3, Quaternion, Rotation2, Rotation3, Scalar, Similarity3, Translation2, + Translation3, UnitComplex, UnitDualQuaternion, UnitQuaternion, Vector3, U2, U3, U4, U7, U8, +}; +use num_complex::Complex; +use proptest::prelude::*; +use proptest::strategy::{Strategy, ValueTree}; +use proptest::test_runner::TestRunner; +use std::ops::RangeInclusive; + +pub const PROPTEST_MATRIX_DIM: RangeInclusive = 1..=20; +pub const PROPTEST_F64: RangeInclusive = -100.0..=100.0; + +pub use nalgebra::proptest::{matrix, vector}; + +pub fn point2() -> impl Strategy> { + vector2().prop_map(|v| Point2::from(v)) +} + +pub fn point3() -> impl Strategy> { + vector3().prop_map(|v| Point3::from(v)) +} + +pub fn translation2() -> impl Strategy> { + vector2().prop_map(|v| Translation2::from(v)) +} + +pub fn translation3() -> impl Strategy> { + vector3().prop_map(|v| Translation3::from(v)) +} + +pub fn rotation2() -> impl Strategy> { + PROPTEST_F64.prop_map(|v| Rotation2::new(v)) +} + +pub fn rotation3() -> impl Strategy> { + vector3().prop_map(|v| Rotation3::new(v)) +} + +pub fn unit_complex() -> impl Strategy> { + PROPTEST_F64.prop_map(|v| UnitComplex::new(v)) +} + +pub fn isometry2() -> impl Strategy> { + vector3().prop_map(|v| Isometry2::new(v.xy(), v.z)) +} + +pub fn isometry3() -> impl Strategy> { + vector6().prop_map(|v| Isometry3::new(v.xyz(), Vector3::new(v.w, v.a, v.b))) +} + +// pub fn similarity2() -> impl Strategy> { +// vector4().prop_map(|v| Similarity2::new(v.xy(), v.z, v.w)) +// } + +pub fn similarity3() -> impl Strategy> { + vector(PROPTEST_F64, U7) + .prop_map(|v| Similarity3::new(v.xyz(), Vector3::new(v[3], v[4], v[5]), v[6])) +} + +pub fn unit_dual_quaternion() -> impl Strategy> { + isometry3().prop_map(|iso| UnitDualQuaternion::from_isometry(&iso)) +} + +pub fn dual_quaternion() -> impl Strategy> { + vector(PROPTEST_F64, U8).prop_map(|v| { + DualQuaternion::from_real_and_dual( + Quaternion::new(v[0], v[1], v[2], v[3]), + Quaternion::new(v[4], v[5], v[6], v[7]), + ) + }) +} + +pub fn quaternion() -> impl Strategy> { + vector4().prop_map(|v| Quaternion::from(v)) +} + +pub fn unit_quaternion() -> impl Strategy> { + vector3().prop_map(|v| UnitQuaternion::new(v)) +} + +pub fn complex_f64() -> impl Strategy> + Clone { + vector(PROPTEST_F64, U2).prop_map(|v| Complex::new(v.x, v.y)) +} + +pub fn dmatrix() -> impl Strategy> { + matrix(PROPTEST_F64, PROPTEST_MATRIX_DIM, PROPTEST_MATRIX_DIM) +} + +pub fn dvector() -> impl Strategy> { + vector(PROPTEST_F64, PROPTEST_MATRIX_DIM) +} + +pub fn dmatrix_( + scalar_strategy: ScalarStrategy, +) -> impl Strategy> +where + ScalarStrategy: Strategy + Clone + 'static, + ScalarStrategy::Value: Scalar, + DefaultAllocator: Allocator, +{ + matrix(scalar_strategy, PROPTEST_MATRIX_DIM, PROPTEST_MATRIX_DIM) +} + +// pub fn dvector_(range: RangeInclusive) -> impl Strategy> +// where +// RangeInclusive: Strategy, +// T: Scalar + PartialEq + Copy, +// DefaultAllocator: Allocator, +// { +// vector(range, PROPTEST_MATRIX_DIM) +// } + +macro_rules! define_strategies( + ($($strategy_: ident $strategy: ident<$nrows: ident, $ncols: ident>),*) => {$( + pub fn $strategy() -> impl Strategy> { + matrix(PROPTEST_F64, $nrows, $ncols) + } + + pub fn $strategy_(scalar_strategy: ScalarStrategy) -> impl Strategy> + where + ScalarStrategy: Strategy + Clone + 'static, + ScalarStrategy::Value: Scalar, + DefaultAllocator: Allocator { + matrix(scalar_strategy, $nrows, $ncols) + } + )*} +); + +define_strategies!( + matrix1_ matrix1, + matrix2_ matrix2, + matrix3_ matrix3, + matrix4_ matrix4, + matrix5_ matrix5, + matrix6_ matrix6, + + matrix5x2_ matrix5x2, + matrix2x5_ matrix2x5, + matrix5x3_ matrix5x3, + matrix3x5_ matrix3x5, + matrix5x4_ matrix5x4, + matrix4x5_ matrix4x5, + + vector1_ vector1, + vector2_ vector2, + vector3_ vector3, + vector4_ vector4, + vector5_ vector5, + vector6_ vector6 +); + +/// Generate a proptest that tests that all matrices generated with the +/// provided rows and columns conform to the constraints defined by the +/// input. +macro_rules! generate_matrix_sanity_test { + ($test_name:ident, $rows:expr, $cols:expr) => { + proptest! { + #[test] + fn $test_name(a in matrix(-5 ..= 5i32, $rows, $cols)) { + // let a: MatrixMN<_, $rows, $cols> = a; + let rows_range = DimRange::from($rows); + let cols_range = DimRange::from($cols); + prop_assert!(a.nrows() >= rows_range.lower_bound().value() + && a.nrows() <= rows_range.upper_bound().value()); + prop_assert!(a.ncols() >= cols_range.lower_bound().value() + && a.ncols() <= cols_range.upper_bound().value()); + prop_assert!(a.iter().all(|x_ij| *x_ij >= -5 && *x_ij <= 5)); + } + } + }; +} + +// Test all fixed-size matrices with row/col dimensions up to 3 +generate_matrix_sanity_test!(test_matrix_u0_u0, U0, U0); +generate_matrix_sanity_test!(test_matrix_u1_u0, U1, U0); +generate_matrix_sanity_test!(test_matrix_u0_u1, U0, U1); +generate_matrix_sanity_test!(test_matrix_u1_u1, U1, U1); +generate_matrix_sanity_test!(test_matrix_u2_u1, U2, U1); +generate_matrix_sanity_test!(test_matrix_u1_u2, U1, U2); +generate_matrix_sanity_test!(test_matrix_u2_u2, U2, U2); +generate_matrix_sanity_test!(test_matrix_u3_u2, U3, U2); +generate_matrix_sanity_test!(test_matrix_u2_u3, U2, U3); +generate_matrix_sanity_test!(test_matrix_u3_u3, U3, U3); + +// Similarly test all heap-allocated but fixed dim ranges +generate_matrix_sanity_test!(test_matrix_0_0, 0, 0); +generate_matrix_sanity_test!(test_matrix_0_1, 0, 1); +generate_matrix_sanity_test!(test_matrix_1_0, 1, 0); +generate_matrix_sanity_test!(test_matrix_1_1, 1, 1); +generate_matrix_sanity_test!(test_matrix_2_1, 2, 1); +generate_matrix_sanity_test!(test_matrix_1_2, 1, 2); +generate_matrix_sanity_test!(test_matrix_2_2, 2, 2); +generate_matrix_sanity_test!(test_matrix_3_2, 3, 2); +generate_matrix_sanity_test!(test_matrix_2_3, 2, 3); +generate_matrix_sanity_test!(test_matrix_3_3, 3, 3); + +// Test arbitrary inputs +generate_matrix_sanity_test!(test_matrix_input_1, U5, 1..=5); +generate_matrix_sanity_test!(test_matrix_input_2, 3..=4, 1..=5); +generate_matrix_sanity_test!(test_matrix_input_3, 1..=2, U3); +generate_matrix_sanity_test!(test_matrix_input_4, 3, U4); + +#[test] +fn test_matrix_output_types() { + // Test that the dimension types are correct for the given inputs + let _: MatrixStrategy<_, U3, U4> = matrix(-5..5, U3, U4); + let _: MatrixStrategy<_, U3, U3> = matrix(-5..5, U3, U3); + let _: MatrixStrategy<_, U3, Dynamic> = matrix(-5..5, U3, 1..=5); + let _: MatrixStrategy<_, Dynamic, U3> = matrix(-5..5, 1..=5, U3); + let _: MatrixStrategy<_, Dynamic, Dynamic> = matrix(-5..5, 1..=5, 1..=5); +} + +// Below we have some tests to ensure that specific instances of MatrixMN are usable +// in a typical proptest scenario where we (implicitly) use the `Arbitrary` trait +proptest! { + #[test] + fn ensure_arbitrary_test_compiles_matrix3(_: Matrix3) {} + + #[test] + fn ensure_arbitrary_test_compiles_matrixmn_u3_dynamic(_: MatrixMN) {} + + #[test] + fn ensure_arbitrary_test_compiles_matrixmn_dynamic_u3(_: MatrixMN) {} + + #[test] + fn ensure_arbitrary_test_compiles_dmatrix(_: DMatrix) {} + + #[test] + fn ensure_arbitrary_test_compiles_vector3(_: Vector3) {} + + #[test] + fn ensure_arbitrary_test_compiles_dvector(_: DVector) {} +} + +#[test] +fn matrix_shrinking_satisfies_constraints() { + // We use a deterministic test runner to make the test "stable". + let mut runner = TestRunner::deterministic(); + + let strategy = matrix(-1..=2, 1..=3, 2..=4); + + let num_matrices = 25; + + macro_rules! maybeprintln { + ($($arg:tt)*) => { + // Uncomment the below line to enable printing of matrix sequences. This is handy + // for manually inspecting the sequences of simplified matrices. + // println!($($arg)*) + }; + } + + maybeprintln!("========================== (begin generation process)"); + + for _ in 0..num_matrices { + let mut tree = strategy + .new_tree(&mut runner) + .expect("Tree generation should not fail."); + + let mut current = Some(tree.current()); + + maybeprintln!("------------------"); + + while let Some(matrix) = current { + maybeprintln!("{}", matrix); + + assert!( + matrix.iter().all(|&v| v >= -1 && v <= 2), + "All matrix elements must satisfy constraints" + ); + assert!( + matrix.nrows() >= 1 && matrix.nrows() <= 3, + "Number of rows in matrix must satisfy constraints." + ); + assert!( + matrix.ncols() >= 2 && matrix.ncols() <= 4, + "Number of columns in matrix must satisfy constraints." + ); + + current = if tree.simplify() { + Some(tree.current()) + } else { + None + } + } + } + + maybeprintln!("========================== (end of generation process)"); +} + +#[cfg(feature = "slow-tests")] +mod slow { + use super::*; + use itertools::Itertools; + use std::collections::HashSet; + use std::iter::repeat; + + #[cfg(feature = "slow-tests")] + #[test] + fn matrix_samples_all_possible_outputs() { + // Test that the proptest generation covers all possible outputs for a small space of inputs + // given enough samples. + + // We use a deterministic test runner to make the test "stable". + let mut runner = TestRunner::deterministic(); + + // This number needs to be high enough so that we with high probability sample + // all possible cases + let num_generated_matrices = 200000; + + let values = -1..=1; + let rows = 0..=2; + let cols = 0..=3; + let strategy = matrix(values.clone(), rows.clone(), cols.clone()); + + // Enumerate all possible combinations + let mut all_combinations = HashSet::new(); + for nrows in rows { + for ncols in cols.clone() { + // For the given number of rows and columns + let n_values = nrows * ncols; + + if n_values == 0 { + // If we have zero rows or columns, the set of matrices with the given + // rows and columns is a single element: an empty matrix + all_combinations.insert(DMatrix::from_row_slice(nrows, ncols, &[])); + } else { + // Otherwise, we need to sample all possible matrices. + // To do this, we generate the values as the (multi) Cartesian product + // of the value sets. For example, for a 2x2 matrices, we consider + // all possible 4-element arrays that the matrices can take by + // considering all elements in the cartesian product + // V x V x V x V + // where V is the set of eligible values, e.g. V := -1 ..= 1 + for matrix_values in repeat(values.clone()) + .take(n_values) + .multi_cartesian_product() + { + all_combinations.insert(DMatrix::from_row_slice( + nrows, + ncols, + &matrix_values, + )); + } + } + } + } + + let mut visited_combinations = HashSet::new(); + for _ in 0..num_generated_matrices { + let tree = strategy + .new_tree(&mut runner) + .expect("Tree generation should not fail"); + let matrix = tree.current(); + visited_combinations.insert(matrix.clone()); + } + + assert_eq!( + visited_combinations, all_combinations, + "Did not sample all possible values." + ); + } +} diff --git a/tests/sparse/cs_conversion.rs b/tests/sparse/cs_conversion.rs index f08fe758..895650dc 100644 --- a/tests/sparse/cs_conversion.rs +++ b/tests/sparse/cs_conversion.rs @@ -43,7 +43,6 @@ fn cs_matrix_from_triplet() { ); let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals); - println!("Mat from triplet: {:?}", cs_mat); assert!(cs_mat.is_sorted()); assert_eq!(cs_mat, cs_expected); @@ -62,7 +61,6 @@ fn cs_matrix_from_triplet() { } let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals); - println!("Mat from triplet: {:?}", cs_mat); assert!(cs_mat.is_sorted()); assert_eq!(cs_mat, cs_expected); @@ -80,7 +78,6 @@ fn cs_matrix_from_triplet() { vals.append(&mut va); let cs_mat = CsMatrix::from_triplet(4, 5, &irows, &icols, &vals); - println!("Mat from triplet: {:?}", cs_mat); assert!(cs_mat.is_sorted()); assert_eq!(cs_mat, cs_expected * 2.0); diff --git a/tests/sparse/cs_matrix_market.rs b/tests/sparse/cs_matrix_market.rs index 12414b37..7c0cee43 100644 --- a/tests/sparse/cs_matrix_market.rs +++ b/tests/sparse/cs_matrix_market.rs @@ -41,7 +41,6 @@ fn cs_matrix_market() { "#; let cs_mat = io::cs_matrix_from_matrix_market_str(file_str).unwrap(); - println!("CS mat: {:?}", cs_mat); let mat: DMatrix<_> = cs_mat.into(); let expected = DMatrix::from_row_slice(5, 5, &[ 1.0, 0.0, 0.0, 6.0, 0.0,