commit
7b6f4c6547
|
@ -36,14 +36,20 @@ jobs:
|
||||||
run: cargo build;
|
run: cargo build;
|
||||||
- name: Build --features serde-serialize
|
- name: Build --features serde-serialize
|
||||||
run: cargo build --features serde-serialize
|
run: cargo build --features serde-serialize
|
||||||
- name: Build --all-features
|
|
||||||
run: cargo build --all-features;
|
|
||||||
- name: Build nalgebra-glm
|
|
||||||
run: cargo build -p nalgebra-glm --all-features;
|
|
||||||
- name: Build nalgebra-lapack
|
- name: Build nalgebra-lapack
|
||||||
run: cd nalgebra-lapack; cargo build;
|
run: cd nalgebra-lapack; cargo build;
|
||||||
- name: Build nalgebra-sparse
|
- name: Build nalgebra-sparse
|
||||||
run: cd nalgebra-sparse; cargo build;
|
run: cd nalgebra-sparse; cargo build;
|
||||||
|
# Run this on it’s own job because it alone takes a lot of time.
|
||||||
|
# So it’s best to let it run in parallel to the other jobs.
|
||||||
|
build-nalgebra-all-features:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Needed because the --all-features build which enables cuda support.
|
||||||
|
- uses: Jimver/cuda-toolkit@v0.2.4
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- run: cargo build --all-features;
|
||||||
|
- run: cargo build -p nalgebra-glm --all-features;
|
||||||
test-nalgebra:
|
test-nalgebra:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# env:
|
# env:
|
||||||
|
@ -65,10 +71,10 @@ jobs:
|
||||||
- name: test nalgebra-sparse
|
- name: test nalgebra-sparse
|
||||||
# Manifest-path is necessary because cargo otherwise won't correctly forward features
|
# Manifest-path is necessary because cargo otherwise won't correctly forward features
|
||||||
# We increase number of proptest cases to hopefully catch more potential bugs
|
# We increase number of proptest cases to hopefully catch more potential bugs
|
||||||
run: PROPTEST_CASES=10000 cargo test --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support
|
run: PROPTEST_CASES=10000 cargo test --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,io
|
||||||
- name: test nalgebra-sparse (slow tests)
|
- name: test nalgebra-sparse (slow tests)
|
||||||
# Unfortunately, the "slow-tests" take so much time that we need to run them with --release
|
# Unfortunately, the "slow-tests" take so much time that we need to run them with --release
|
||||||
run: PROPTEST_CASES=10000 cargo test --release --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,slow-tests slow
|
run: PROPTEST_CASES=10000 cargo test --release --manifest-path=nalgebra-sparse/Cargo.toml --features compare,proptest-support,io,slow-tests slow
|
||||||
test-nalgebra-macros:
|
test-nalgebra-macros:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
@ -106,3 +112,20 @@ jobs:
|
||||||
run: xargo build --verbose --no-default-features --features alloc --target=x86_64-unknown-linux-gnu;
|
run: xargo build --verbose --no-default-features --features alloc --target=x86_64-unknown-linux-gnu;
|
||||||
- name: build thumbv7em-none-eabihf
|
- name: build thumbv7em-none-eabihf
|
||||||
run: xargo build --verbose --no-default-features --target=thumbv7em-none-eabihf;
|
run: xargo build --verbose --no-default-features --target=thumbv7em-none-eabihf;
|
||||||
|
- name: build x86_64-unknown-linux-gnu nalgebra-glm
|
||||||
|
run: xargo build --verbose --no-default-features -p nalgebra-glm --target=x86_64-unknown-linux-gnu;
|
||||||
|
- name: build thumbv7em-none-eabihf nalgebra-glm
|
||||||
|
run: xargo build --verbose --no-default-features -p nalgebra-glm --target=thumbv7em-none-eabihf;
|
||||||
|
build-cuda:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: Jimver/cuda-toolkit@v0.2.4
|
||||||
|
- name: Install nightly-2021-12-04
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: nightly-2021-12-04
|
||||||
|
override: true
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- run: rustup target add nvptx64-nvidia-cuda
|
||||||
|
- run: cargo build --no-default-features --features cuda
|
||||||
|
- run: cargo build --no-default-features --features cuda --target=nvptx64-nvidia-cuda
|
90
CHANGELOG.md
90
CHANGELOG.md
|
@ -4,6 +4,52 @@ documented here.
|
||||||
|
|
||||||
This project adheres to [Semantic Versioning](https://semver.org/).
|
This project adheres to [Semantic Versioning](https://semver.org/).
|
||||||
|
|
||||||
|
## [0.30.0] (02 Jan. 2022)
|
||||||
|
|
||||||
|
### Breaking changes
|
||||||
|
- The `Dim` trait is now marked as unsafe.
|
||||||
|
- The `Matrix::pow` and `Matrix::pow_mut` methods only allow positive integer exponents now. To compute negative
|
||||||
|
exponents, the user is free to invert the matrix before calling `pow` with the exponent’s absolute value.
|
||||||
|
|
||||||
|
### Modified
|
||||||
|
- Use more concise debug impls for matrices and geometric transformation types.
|
||||||
|
- The singular values computed by the SVD are now sorted in increasing order by default. Use `SVD::new_unordered`
|
||||||
|
instead to reproduce the older behavior without the sorting overhead.
|
||||||
|
- The `UnitDualQuaternion::sclerp` method will no longer panic when given two equal rotations.
|
||||||
|
- The `Matrix::select_rows` and `Matrix::select_columns` methods no longer require the matrix components to implement
|
||||||
|
the trait `Zero`.
|
||||||
|
- The `Matrix::pow` and `Matrix::pow_mut` methods will now also work with integer matrices.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Added the conversion trait `From<Vec<T>>` and method `from_vec_storage` for `RowDVector`.
|
||||||
|
- Added implementation of `From` and `Into` for converting between `nalgebra` types and types from
|
||||||
|
`glam 0.18`. These can be enabled by enabling the `convert-glam018` cargo features.
|
||||||
|
- Added the methods `Matrix::product`, `::row_product`, `::row_product_tr`, and `::column_product` to compute the
|
||||||
|
product of the components, rows, or columns, of a single matrix or vector.
|
||||||
|
- The `Default` trait is now implemented for most geometric types: `Point`, `Isometry`, `Rotation`, `Similarity`,
|
||||||
|
`Transform`, `UnitComplex`, and `UnitQuaternion`.
|
||||||
|
- Added the `Scale` geometric type for representing non-uniform scaling.
|
||||||
|
- Added `Cholesky::new_with_substitute` that will replace diagonal elements by a given constant whenever `Cholesky`
|
||||||
|
meets a non-definite-positiveness.
|
||||||
|
- Re-added the conversion from a vector/matrix slice to a static array.
|
||||||
|
- Added the `cuda` feature that enables the support of [rust-cuda](https://github.com/Rust-GPU/Rust-CUDA) for using
|
||||||
|
`nalgebra` features with CUDA kernels written in Rust.
|
||||||
|
- Added special-cases implementations for the 2x2 and 3x3 SVDs for better accuracy and performances.
|
||||||
|
- Added the methods `Matrix::polar`, `Matrix::try_polar`, and `SVD::to_polar` to compute the polar decomposition of
|
||||||
|
a matrix, based on its SVD.
|
||||||
|
- `nalgebra-sparse`: provide constructors for unsorted but otherwise valid data using the CSR format.
|
||||||
|
- `nalgebra-sparse`: added reading MatrixMarked data files to a sparse `CooMatrix`.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fixed a potential unsoundness with `matrix.get(i)` and `matrix.get_mut(i)` where `i` is an `usize`, and `matrix`
|
||||||
|
is a matrix slice with non-default strides.
|
||||||
|
- Fixed potential unsoundness with `vector.perp` where `vector` isn’t actually a 2D vector as expected.
|
||||||
|
- Fixed linkage issue with `nalgebra-lapack`: the user of `nalgebra-lapack` no longer have to add
|
||||||
|
`extern crate lapack-src` to their `main.rs`.
|
||||||
|
- Fixed the `no-std` build of `nalgebra-glm`.
|
||||||
|
- Fix the `pow` and `pow_mut` functions (the result was incorrect for some exponent values).
|
||||||
|
|
||||||
|
|
||||||
## [0.29.0]
|
## [0.29.0]
|
||||||
### Breaking changes
|
### Breaking changes
|
||||||
- We updated to the version 0.6 of `simba`. This means that the trait bounds `T: na::RealField`, `na::ComplexField`,
|
- We updated to the version 0.6 of `simba`. This means that the trait bounds `T: na::RealField`, `na::ComplexField`,
|
||||||
|
@ -242,21 +288,21 @@ for details about this switch and its benefits.
|
||||||
geometric types.
|
geometric types.
|
||||||
### Modified
|
### Modified
|
||||||
* Use of traits like `alga::general::{RealField, ComplexField}` have now been replaced by
|
* Use of traits like `alga::general::{RealField, ComplexField}` have now been replaced by
|
||||||
`simba::scalar::{RealField, ComplexField}`.
|
`simba::scalar::{RealField, ComplexField}`.
|
||||||
* The implementation of traits from the __alga__ crate (and well as the dependency to _alga__) are now
|
* The implementation of traits from the __alga__ crate (and well as the dependency to _alga__) are now
|
||||||
omitted unless the `alga` cargo feature is activated.
|
omitted unless the `alga` cargo feature is activated.
|
||||||
### Removed
|
### Removed
|
||||||
* The `Neg` unary operator is no longer implemented for `UnitComplex` and `UnitQuaternion`. This caused
|
* The `Neg` unary operator is no longer implemented for `UnitComplex` and `UnitQuaternion`. This caused
|
||||||
hard-to-track errors when we mistakenly write, e.g., `-q * v` instead of `-(q * v)`.
|
hard-to-track errors when we mistakenly write, e.g., `-q * v` instead of `-(q * v)`.
|
||||||
* The `na::convert_unchecked` is no longer marked as unsafe.
|
* The `na::convert_unchecked` is no longer marked as unsafe.
|
||||||
|
|
||||||
## [0.20.0]
|
## [0.20.0]
|
||||||
### Added
|
### Added
|
||||||
* `cholesky.rank_one_update(...)` which performs a rank-one update on the cholesky decomposition of a matrix.
|
* `cholesky.rank_one_update(...)` which performs a rank-one update on the cholesky decomposition of a matrix.
|
||||||
* `From<&Matrix>` is now implemented for matrix slices.
|
* `From<&Matrix>` is now implemented for matrix slices.
|
||||||
* `.try_set_magnitude(...)` which sets the magnitude of a vector, while keeping its direction.
|
* `.try_set_magnitude(...)` which sets the magnitude of a vector, while keeping its direction.
|
||||||
* Implementations of `From` and `Into` for the conversion between matrix slices and standard (`&[N]` `&mut [N]`) slices.
|
* Implementations of `From` and `Into` for the conversion between matrix slices and standard (`&[N]` `&mut [N]`) slices.
|
||||||
|
|
||||||
### Modified
|
### Modified
|
||||||
* We started some major changes in order to allow non-Copy types to be used as scalar types inside of matrices/vectors.
|
* We started some major changes in order to allow non-Copy types to be used as scalar types inside of matrices/vectors.
|
||||||
|
|
||||||
|
@ -264,16 +310,16 @@ for details about this switch and its benefits.
|
||||||
### Added
|
### Added
|
||||||
* `.remove_rows_at` and `remove_columns_at` which removes a set of rows or columns (specified by indices) from a matrix.
|
* `.remove_rows_at` and `remove_columns_at` which removes a set of rows or columns (specified by indices) from a matrix.
|
||||||
* Several formatting traits have been implemented for all matrices/vectors: `LowerExp`, `UpperExp`, `Octal`, `LowerHex`,
|
* Several formatting traits have been implemented for all matrices/vectors: `LowerExp`, `UpperExp`, `Octal`, `LowerHex`,
|
||||||
`UpperHex`, `Binary`, `Pointer`.
|
`UpperHex`, `Binary`, `Pointer`.
|
||||||
* `UnitQuaternion::quaternions_mean(...)` which computes the mean rotation of a set of unit quaternions. This implements
|
* `UnitQuaternion::quaternions_mean(...)` which computes the mean rotation of a set of unit quaternions. This implements
|
||||||
the algorithm from _Oshman, Yaakov, and Avishy Carmi, "Attitude estimation from vector observations using a genetic-algorithm-embedded quaternion particle filter."
|
the algorithm from _Oshman, Yaakov, and Avishy Carmi, "Attitude estimation from vector observations using a genetic-algorithm-embedded quaternion particle filter."
|
||||||
|
|
||||||
### Modified
|
### Modified
|
||||||
* It is now possible to get the `min/max` element of unsigned integer matrices.
|
* It is now possible to get the `min/max` element of unsigned integer matrices.
|
||||||
|
|
||||||
### Added to nalgebra-glm
|
### Added to nalgebra-glm
|
||||||
* Some infinite and reversed perspectives: `::infinite_perspective_rh_no`, `::infinite_perspective_rh_zo`,
|
* Some infinite and reversed perspectives: `::infinite_perspective_rh_no`, `::infinite_perspective_rh_zo`,
|
||||||
`::reversed_perspective_rh_zo`, and `::reversed_infinite_perspective_rh_zo`.
|
`::reversed_perspective_rh_zo`, and `::reversed_infinite_perspective_rh_zo`.
|
||||||
|
|
||||||
## [0.18.0]
|
## [0.18.0]
|
||||||
This release adds full complex number support to nalgebra. This includes all common vector/matrix operations as well
|
This release adds full complex number support to nalgebra. This includes all common vector/matrix operations as well
|
||||||
|
@ -287,12 +333,12 @@ as matrix decomposition. This excludes geometric type (like `Isometry`, `Rotatio
|
||||||
* Add `.left_div, .right_div` for quaternions.
|
* Add `.left_div, .right_div` for quaternions.
|
||||||
* Add `.renormalize` to `Unit<...>` and `Rotation3` to correct potential drift due to repeated operations.
|
* Add `.renormalize` to `Unit<...>` and `Rotation3` to correct potential drift due to repeated operations.
|
||||||
Those drifts could cause them not to be pure rotations anymore.
|
Those drifts could cause them not to be pure rotations anymore.
|
||||||
|
|
||||||
#### Convolution
|
#### Convolution
|
||||||
* `.convolve_full(kernel)` returns the convolution of `self` by `kernel`.
|
* `.convolve_full(kernel)` returns the convolution of `self` by `kernel`.
|
||||||
* `.convolve_valid(kernel)` returns the convolution of `self` by `kernel` after removal of all the elements relying on zero-padding.
|
* `.convolve_valid(kernel)` returns the convolution of `self` by `kernel` after removal of all the elements relying on zero-padding.
|
||||||
* `.convolve_same(kernel)` returns the convolution of `self` by `kernel` with a result of the same size as `self`.
|
* `.convolve_same(kernel)` returns the convolution of `self` by `kernel` with a result of the same size as `self`.
|
||||||
|
|
||||||
#### Complex number support
|
#### Complex number support
|
||||||
* Add the `::from_matrix` constructor too all rotation types to extract a rotation from a raw matrix.
|
* Add the `::from_matrix` constructor too all rotation types to extract a rotation from a raw matrix.
|
||||||
* Add the `::from_matrix_eps` constructor too all rotation types to extract a rotation from a raw matrix. This takes
|
* Add the `::from_matrix_eps` constructor too all rotation types to extract a rotation from a raw matrix. This takes
|
||||||
|
@ -329,22 +375,22 @@ Note that all the other BLAS operation will continue to work for all fields, inc
|
||||||
* Add `.slerp` and `.try_slerp` to unit vectors.
|
* Add `.slerp` and `.try_slerp` to unit vectors.
|
||||||
* Add `.lerp` to vectors.
|
* Add `.lerp` to vectors.
|
||||||
* Add `.to_projective` and `.as_projective` to `Perspective3` and `Orthographic3` in order to
|
* Add `.to_projective` and `.as_projective` to `Perspective3` and `Orthographic3` in order to
|
||||||
use them as `Projective3` structures.
|
use them as `Projective3` structures.
|
||||||
* Add `From/Into` impls to allow the conversion of any transformation type to a matrix.
|
* Add `From/Into` impls to allow the conversion of any transformation type to a matrix.
|
||||||
* Add `Into` impls to convert a matrix slice into an owned matrix.
|
* Add `Into` impls to convert a matrix slice into an owned matrix.
|
||||||
* Add `Point*::from_slice` to create a point from a slice.
|
* Add `Point*::from_slice` to create a point from a slice.
|
||||||
* Add `.map_with_location` to matrices to apply a map which passes the component indices to the user-defined closure alongside
|
* Add `.map_with_location` to matrices to apply a map which passes the component indices to the user-defined closure alongside
|
||||||
the component itself.
|
the component itself.
|
||||||
* Add impl `From<Vector>` for `Point`.
|
* Add impl `From<Vector>` for `Point`.
|
||||||
* Add impl `From<Vector4>` for `Quaternion`.
|
* Add impl `From<Vector4>` for `Quaternion`.
|
||||||
* Add impl `From<Vector>` for `Translation`.
|
* Add impl `From<Vector>` for `Translation`.
|
||||||
* Add the `::from_vec` constructor to construct a matrix from a `Vec` (a `DMatrix` will reuse the original `Vec`
|
* Add the `::from_vec` constructor to construct a matrix from a `Vec` (a `DMatrix` will reuse the original `Vec`
|
||||||
as-is for its storage).
|
as-is for its storage).
|
||||||
* Add `.to_homogeneous` to square matrices (and with dimensions higher than 1x1). This will increase their number of row
|
* Add `.to_homogeneous` to square matrices (and with dimensions higher than 1x1). This will increase their number of row
|
||||||
and columns by 1. The new column and row are filled with 0, except for the diagonal element which is set to 1.
|
and columns by 1. The new column and row are filled with 0, except for the diagonal element which is set to 1.
|
||||||
* Implement `Extend<Vec>` for matrices with a dynamic storage. The provided `Vec` is assumed to represent a column-major
|
* Implement `Extend<Vec>` for matrices with a dynamic storage. The provided `Vec` is assumed to represent a column-major
|
||||||
matrix with the same number of rows as the one being extended. This will effectively append new columns on the right of
|
matrix with the same number of rows as the one being extended. This will effectively append new columns on the right of
|
||||||
the matrix being extended.
|
the matrix being extended.
|
||||||
* Implement `Extend<Vec>` for vectors with a dynamic storage. This will concatenate the vector with the given `Vec`.
|
* Implement `Extend<Vec>` for vectors with a dynamic storage. This will concatenate the vector with the given `Vec`.
|
||||||
* Implement `Extend<Matrix<...>>` for matrices with dynamic storage. This will concatenate the columns of both matrices.
|
* Implement `Extend<Matrix<...>>` for matrices with dynamic storage. This will concatenate the columns of both matrices.
|
||||||
* Implement `Into<Vec>` for the `MatrixVec` storage.
|
* Implement `Into<Vec>` for the `MatrixVec` storage.
|
||||||
|
@ -353,10 +399,10 @@ Note that all the other BLAS operation will continue to work for all fields, inc
|
||||||
|
|
||||||
### Modified
|
### Modified
|
||||||
* The orthographic projection no longer require that `bottom < top`, that `left < right`, and that `znear < zfar`. The
|
* The orthographic projection no longer require that `bottom < top`, that `left < right`, and that `znear < zfar`. The
|
||||||
only restriction now ith that they must not be equal (in which case the projection would be singular).
|
only restriction now ith that they must not be equal (in which case the projection would be singular).
|
||||||
* The `Point::from_coordinates` methods is deprecated. Use `Point::from` instead.
|
* The `Point::from_coordinates` methods is deprecated. Use `Point::from` instead.
|
||||||
* The `.transform_point` and `.transform_vector` methods are now inherent methods for matrices so that the user does not have to
|
* The `.transform_point` and `.transform_vector` methods are now inherent methods for matrices so that the user does not have to
|
||||||
explicitly import the `Transform` trait from the alga crate.
|
explicitly import the `Transform` trait from the alga crate.
|
||||||
* Renamed the matrix storage types: `MatrixArray` -> `ArrayStorage` and `MatrixVec` -> `VecStorage`.
|
* Renamed the matrix storage types: `MatrixArray` -> `ArrayStorage` and `MatrixVec` -> `VecStorage`.
|
||||||
* Renamed `.unwrap()` to `.into_inner()` for geometric types that wrap another type.
|
* Renamed `.unwrap()` to `.into_inner()` for geometric types that wrap another type.
|
||||||
This is for the case of `Unit`, `Transform`, `Orthographic3`, `Perspective3`, `Rotation`.
|
This is for the case of `Unit`, `Transform`, `Orthographic3`, `Perspective3`, `Rotation`.
|
||||||
|
@ -552,8 +598,8 @@ overview of all the added/modified features.
|
||||||
|
|
||||||
This version is a major rewrite of the library. Major changes are:
|
This version is a major rewrite of the library. Major changes are:
|
||||||
* Algebraic traits are now defined by the [alga](https://crates.io/crates/alga) crate.
|
* Algebraic traits are now defined by the [alga](https://crates.io/crates/alga) crate.
|
||||||
All other mathematical traits, except `Axpy` have been removed from
|
All other mathematical traits, except `Axpy` have been removed from
|
||||||
**nalgebra**.
|
**nalgebra**.
|
||||||
* Methods are now preferred to free functions because they do not require any
|
* Methods are now preferred to free functions because they do not require any
|
||||||
trait to be used anymore.
|
trait to be used anymore.
|
||||||
* Most algebraic entities can be parametrized by type-level integers
|
* Most algebraic entities can be parametrized by type-level integers
|
||||||
|
@ -746,11 +792,11 @@ you [there](https://users.nphysics.org)!
|
||||||
|
|
||||||
### Modified
|
### Modified
|
||||||
* Vectors are now multipliable with isometries. This will result into a pure rotation (this is how
|
* Vectors are now multipliable with isometries. This will result into a pure rotation (this is how
|
||||||
vectors differ from point semantically: they design directions, so they are not translatable).
|
vectors differ from point semantically: they design directions, so they are not translatable).
|
||||||
* `{Isometry3, Rotation3}::look_at` reimplemented and renamed to `::look_at_rh` and `::look_at_lh` to agree
|
* `{Isometry3, Rotation3}::look_at` reimplemented and renamed to `::look_at_rh` and `::look_at_lh` to agree
|
||||||
with the computer graphics community (in particular, the GLM library). Use the `::look_at_rh`
|
with the computer graphics community (in particular, the GLM library). Use the `::look_at_rh`
|
||||||
variant to build a view matrix that
|
variant to build a view matrix that
|
||||||
may be successfully used with `Persp` and `Ortho`.
|
may be successfully used with `Persp` and `Ortho`.
|
||||||
* The old `{Isometry3, Rotation3}::look_at` implementations are now called `::new_observer_frame`.
|
* The old `{Isometry3, Rotation3}::look_at` implementations are now called `::new_observer_frame`.
|
||||||
* Rename every `fov` on `Persp` to `fovy`.
|
* Rename every `fov` on `Persp` to `fovy`.
|
||||||
* Fixed the perspective and orthographic projection matrices.
|
* Fixed the perspective and orthographic projection matrices.
|
||||||
|
|
13
Cargo.toml
13
Cargo.toml
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra"
|
name = "nalgebra"
|
||||||
version = "0.29.0"
|
version = "0.30.0"
|
||||||
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
|
authors = [ "Sébastien Crozet <developer@crozet.re>" ]
|
||||||
|
|
||||||
description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices."
|
description = "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices."
|
||||||
|
@ -32,6 +32,7 @@ compare = [ "matrixcompare-core" ]
|
||||||
libm = [ "simba/libm" ]
|
libm = [ "simba/libm" ]
|
||||||
libm-force = [ "simba/libm_force" ]
|
libm-force = [ "simba/libm_force" ]
|
||||||
macros = [ "nalgebra-macros" ]
|
macros = [ "nalgebra-macros" ]
|
||||||
|
cuda = [ "cust", "simba/cuda" ]
|
||||||
|
|
||||||
# Conversion
|
# Conversion
|
||||||
convert-mint = [ "mint" ]
|
convert-mint = [ "mint" ]
|
||||||
|
@ -41,6 +42,7 @@ convert-glam014 = [ "glam014" ]
|
||||||
convert-glam015 = [ "glam015" ]
|
convert-glam015 = [ "glam015" ]
|
||||||
convert-glam016 = [ "glam016" ]
|
convert-glam016 = [ "glam016" ]
|
||||||
convert-glam017 = [ "glam017" ]
|
convert-glam017 = [ "glam017" ]
|
||||||
|
convert-glam018 = [ "glam018" ]
|
||||||
|
|
||||||
# Serialization
|
# Serialization
|
||||||
## To use serde in a #[no-std] environment, enable the
|
## To use serde in a #[no-std] environment, enable the
|
||||||
|
@ -72,7 +74,7 @@ num-traits = { version = "0.2", default-features = false }
|
||||||
num-complex = { version = "0.4", default-features = false }
|
num-complex = { version = "0.4", default-features = false }
|
||||||
num-rational = { version = "0.4", default-features = false }
|
num-rational = { version = "0.4", default-features = false }
|
||||||
approx = { version = "0.5", default-features = false }
|
approx = { version = "0.5", default-features = false }
|
||||||
simba = { version = "0.6", default-features = false }
|
simba = { version = "0.7", default-features = false }
|
||||||
alga = { version = "0.9", default-features = false, optional = true }
|
alga = { version = "0.9", default-features = false, optional = true }
|
||||||
rand_distr = { version = "0.4", default-features = false, optional = true }
|
rand_distr = { version = "0.4", default-features = false, optional = true }
|
||||||
matrixmultiply = { version = "0.3", optional = true }
|
matrixmultiply = { version = "0.3", optional = true }
|
||||||
|
@ -85,12 +87,16 @@ pest = { version = "2", optional = true }
|
||||||
pest_derive = { version = "2", optional = true }
|
pest_derive = { version = "2", optional = true }
|
||||||
bytemuck = { version = "1.5", optional = true }
|
bytemuck = { version = "1.5", optional = true }
|
||||||
matrixcompare-core = { version = "0.1", optional = true }
|
matrixcompare-core = { version = "0.1", optional = true }
|
||||||
proptest = { version = "1", optional = true, default-features = false, features = ["std"] }
|
proptest = { version = "1", optional = true, default-features = false, features = ["std"] }
|
||||||
glam013 = { package = "glam", version = "0.13", optional = true }
|
glam013 = { package = "glam", version = "0.13", optional = true }
|
||||||
glam014 = { package = "glam", version = "0.14", optional = true }
|
glam014 = { package = "glam", version = "0.14", optional = true }
|
||||||
glam015 = { package = "glam", version = "0.15", optional = true }
|
glam015 = { package = "glam", version = "0.15", optional = true }
|
||||||
glam016 = { package = "glam", version = "0.16", optional = true }
|
glam016 = { package = "glam", version = "0.16", optional = true }
|
||||||
glam017 = { package = "glam", version = "0.17", optional = true }
|
glam017 = { package = "glam", version = "0.17", optional = true }
|
||||||
|
glam018 = { package = "glam", version = "0.18", optional = true }
|
||||||
|
|
||||||
|
[target.'cfg(not(target_os = "cuda"))'.dependencies]
|
||||||
|
cust = { version = "0.2", optional = true }
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
@ -127,3 +133,4 @@ lto = true
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
# Enable certain features when building docs for docs.rs
|
# Enable certain features when building docs for docs.rs
|
||||||
features = [ "proptest-support", "compare", "macros", "rand" ]
|
features = [ "proptest-support", "compare", "macros", "rand" ]
|
||||||
|
|
||||||
|
|
12
Makefile
12
Makefile
|
@ -1,12 +0,0 @@
|
||||||
all:
|
|
||||||
cargo test --features "debug arbitrary serde-serialize abomonation-serialize compare"
|
|
||||||
# cargo check --features "debug arbitrary serde-serialize"
|
|
||||||
|
|
||||||
doc:
|
|
||||||
cargo doc --no-deps --features "debug arbitrary serde-serialize abomonation"
|
|
||||||
|
|
||||||
bench:
|
|
||||||
cargo bench
|
|
||||||
|
|
||||||
test:
|
|
||||||
cargo test --features "debug arbitrary serde-serialize abomonation-serialize compare"
|
|
16
README.md
16
README.md
|
@ -9,7 +9,7 @@
|
||||||
<img src="https://circleci.com/gh/dimforge/nalgebra.svg?style=svg" alt="Build status">
|
<img src="https://circleci.com/gh/dimforge/nalgebra.svg?style=svg" alt="Build status">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://crates.io/crates/nalgebra">
|
<a href="https://crates.io/crates/nalgebra">
|
||||||
<img src="https://meritbadge.herokuapp.com/nalgebra?style=flat-square" alt="crates.io">
|
<img src="https://img.shields.io/crates/v/nalgebra.svg?style=flat-square" alt="crates.io">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://opensource.org/licenses/Apache-2.0">
|
<a href="https://opensource.org/licenses/Apache-2.0">
|
||||||
<img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg">
|
<img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg">
|
||||||
|
@ -30,10 +30,18 @@
|
||||||
|
|
||||||
-----
|
-----
|
||||||
|
|
||||||
## Platinum sponsors
|
## Acknowledgements
|
||||||
Rapier is supported by:
|
nalgebra is supported by our **platinum** sponsors:
|
||||||
<p>
|
<p>
|
||||||
<a href="https://embark-studios.com">
|
<a href="https://embark-studios.com">
|
||||||
<img src="https://www.embark.dev/img/logo_black.png" width="401px">
|
<img src="https://www.embark.dev/img/logo_black.png" width="301px">
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
And our gold sponsors:
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<a href="https://fragcolor.com">
|
||||||
|
<img src="https://dimforge.com/img/fragcolor_logo1_color_black.svg" width="151px">
|
||||||
|
</a>
|
||||||
|
</p>
|
|
@ -1,4 +1,18 @@
|
||||||
use na::{Matrix4, SVD};
|
use na::{Matrix2, Matrix3, Matrix4, SVD};
|
||||||
|
|
||||||
|
fn svd_decompose_2x2_f32(bh: &mut criterion::Criterion) {
|
||||||
|
let m = Matrix2::<f32>::new_random();
|
||||||
|
bh.bench_function("svd_decompose_2x2", move |bh| {
|
||||||
|
bh.iter(|| std::hint::black_box(SVD::new_unordered(m.clone(), true, true)))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn svd_decompose_3x3_f32(bh: &mut criterion::Criterion) {
|
||||||
|
let m = Matrix3::<f32>::new_random();
|
||||||
|
bh.bench_function("svd_decompose_3x3", move |bh| {
|
||||||
|
bh.iter(|| std::hint::black_box(SVD::new_unordered(m.clone(), true, true)))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
fn svd_decompose_4x4(bh: &mut criterion::Criterion) {
|
fn svd_decompose_4x4(bh: &mut criterion::Criterion) {
|
||||||
let m = Matrix4::<f64>::new_random();
|
let m = Matrix4::<f64>::new_random();
|
||||||
|
@ -114,6 +128,8 @@ fn pseudo_inverse_200x200(bh: &mut criterion::Criterion) {
|
||||||
|
|
||||||
criterion_group!(
|
criterion_group!(
|
||||||
svd,
|
svd,
|
||||||
|
svd_decompose_2x2_f32,
|
||||||
|
svd_decompose_3x3_f32,
|
||||||
svd_decompose_4x4,
|
svd_decompose_4x4,
|
||||||
svd_decompose_10x10,
|
svd_decompose_10x10,
|
||||||
svd_decompose_100x100,
|
svd_decompose_100x100,
|
||||||
|
|
|
@ -4,7 +4,7 @@ version = "0.0.0"
|
||||||
authors = [ "You" ]
|
authors = [ "You" ]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = "0.29.0"
|
nalgebra = "0.30.0"
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "example"
|
name = "example"
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
|
|
||||||
use na::{Isometry3, Perspective3, Point3, Vector3};
|
use na::{Isometry3, Perspective3, Point3, Vector3};
|
||||||
|
use std::f32::consts;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// Our object is translated along the x axis.
|
// Our object is translated along the x axis.
|
||||||
|
@ -15,7 +16,7 @@ fn main() {
|
||||||
let view = Isometry3::look_at_rh(&eye, &target, &Vector3::y());
|
let view = Isometry3::look_at_rh(&eye, &target, &Vector3::y());
|
||||||
|
|
||||||
// A perspective projection.
|
// A perspective projection.
|
||||||
let projection = Perspective3::new(16.0 / 9.0, 3.14 / 2.0, 1.0, 1000.0);
|
let projection = Perspective3::new(16.0 / 9.0, consts::PI / 2.0, 1.0, 1000.0);
|
||||||
|
|
||||||
// The combination of the model with the view is still an isometry.
|
// The combination of the model with the view is still an isometry.
|
||||||
let model_view = view * model;
|
let model_view = view * model;
|
||||||
|
|
|
@ -19,6 +19,7 @@ fn main() {
|
||||||
|
|
||||||
/* Then pass the raw pointers to some graphics API. */
|
/* Then pass the raw pointers to some graphics API. */
|
||||||
|
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
unsafe {
|
unsafe {
|
||||||
assert_eq!(*v_pointer, 1.0);
|
assert_eq!(*v_pointer, 1.0);
|
||||||
assert_eq!(*v_pointer.offset(1), 0.0);
|
assert_eq!(*v_pointer.offset(1), 0.0);
|
||||||
|
|
|
@ -3,9 +3,10 @@
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
|
|
||||||
use na::{Perspective3, Point2, Point3, Unit};
|
use na::{Perspective3, Point2, Point3, Unit};
|
||||||
|
use std::f32::consts;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let projection = Perspective3::new(800.0 / 600.0, 3.14 / 2.0, 1.0, 1000.0);
|
let projection = Perspective3::new(800.0 / 600.0, consts::PI / 2.0, 1.0, 1000.0);
|
||||||
let screen_point = Point2::new(10.0f32, 20.0);
|
let screen_point = Point2::new(10.0f32, 20.0);
|
||||||
|
|
||||||
// Compute two points in clip-space.
|
// Compute two points in clip-space.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
|
|
||||||
use na::{Isometry2, Similarity2, Vector2};
|
use na::{Isometry2, Similarity2, Vector2};
|
||||||
|
use std::f32::consts;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// Isometry -> Similarity conversion always succeeds.
|
// Isometry -> Similarity conversion always succeeds.
|
||||||
|
@ -8,8 +9,8 @@ fn main() {
|
||||||
let _: Similarity2<f32> = na::convert(iso);
|
let _: Similarity2<f32> = na::convert(iso);
|
||||||
|
|
||||||
// Similarity -> Isometry conversion fails if the scaling factor is not 1.0.
|
// Similarity -> Isometry conversion fails if the scaling factor is not 1.0.
|
||||||
let sim_without_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 1.0);
|
let sim_without_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), consts::PI, 1.0);
|
||||||
let sim_with_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), 3.14, 2.0);
|
let sim_with_scaling = Similarity2::new(Vector2::new(1.0f32, 2.0), consts::PI, 2.0);
|
||||||
|
|
||||||
let iso_success: Option<Isometry2<f32>> = na::try_convert(sim_without_scaling);
|
let iso_success: Option<Isometry2<f32>> = na::try_convert(sim_without_scaling);
|
||||||
let iso_fail: Option<Isometry2<f32>> = na::try_convert(sim_with_scaling);
|
let iso_fail: Option<Isometry2<f32>> = na::try_convert(sim_with_scaling);
|
||||||
|
|
|
@ -3,6 +3,7 @@ extern crate approx;
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
|
|
||||||
use na::{Matrix4, Point3, Vector3};
|
use na::{Matrix4, Point3, Vector3};
|
||||||
|
use std::f32::consts;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// Create a uniform scaling matrix with scaling factor 2.
|
// Create a uniform scaling matrix with scaling factor 2.
|
||||||
|
@ -28,7 +29,7 @@ fn main() {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Create rotation.
|
// Create rotation.
|
||||||
let rot = Matrix4::from_scaled_axis(&Vector3::x() * 3.14);
|
let rot = Matrix4::from_scaled_axis(Vector3::x() * consts::PI);
|
||||||
let rot_then_m = m * rot; // Right-multiplication is equivalent to prepending `rot` to `m`.
|
let rot_then_m = m * rot; // Right-multiplication is equivalent to prepending `rot` to `m`.
|
||||||
let m_then_rot = rot * m; // Left-multiplication is equivalent to appending `rot` to `m`.
|
let m_then_rot = rot * m; // Left-multiplication is equivalent to appending `rot` to `m`.
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ fn main() {
|
||||||
|
|
||||||
/* Then pass the raw pointer to some graphics API. */
|
/* Then pass the raw pointer to some graphics API. */
|
||||||
|
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
unsafe {
|
unsafe {
|
||||||
assert_eq!(*iso_pointer, 1.0);
|
assert_eq!(*iso_pointer, 1.0);
|
||||||
assert_eq!(*iso_pointer.offset(5), 1.0);
|
assert_eq!(*iso_pointer.offset(5), 1.0);
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
#![allow(clippy::float_cmp)]
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
|
|
||||||
use na::{Unit, Vector3};
|
use na::{Unit, Vector3};
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra-glm"
|
name = "nalgebra-glm"
|
||||||
version = "0.15.0"
|
version = "0.16.0"
|
||||||
authors = ["sebcrozet <developer@crozet.re>"]
|
authors = ["sebcrozet <developer@crozet.re>"]
|
||||||
|
|
||||||
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
|
description = "A computer-graphics oriented API for nalgebra, inspired by the C++ GLM library."
|
||||||
|
@ -22,9 +22,20 @@ std = [ "nalgebra/std", "simba/std" ]
|
||||||
arbitrary = [ "nalgebra/arbitrary" ]
|
arbitrary = [ "nalgebra/arbitrary" ]
|
||||||
serde-serialize = [ "nalgebra/serde-serialize-no-std" ]
|
serde-serialize = [ "nalgebra/serde-serialize-no-std" ]
|
||||||
abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
|
abomonation-serialize = [ "nalgebra/abomonation-serialize" ]
|
||||||
|
cuda = [ "nalgebra/cuda" ]
|
||||||
|
|
||||||
|
# Conversion
|
||||||
|
convert-mint = [ "nalgebra/mint" ]
|
||||||
|
convert-bytemuck = [ "nalgebra/bytemuck" ]
|
||||||
|
convert-glam013 = [ "nalgebra/glam013" ]
|
||||||
|
convert-glam014 = [ "nalgebra/glam014" ]
|
||||||
|
convert-glam015 = [ "nalgebra/glam015" ]
|
||||||
|
convert-glam016 = [ "nalgebra/glam016" ]
|
||||||
|
convert-glam017 = [ "nalgebra/glam017" ]
|
||||||
|
convert-glam018 = [ "nalgebra/glam018" ]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
num-traits = { version = "0.2", default-features = false }
|
num-traits = { version = "0.2", default-features = false }
|
||||||
approx = { version = "0.5", default-features = false }
|
approx = { version = "0.5", default-features = false }
|
||||||
simba = { version = "0.6", default-features = false }
|
simba = { version = "0.7", default-features = false }
|
||||||
nalgebra = { path = "..", version = "0.29", default-features = false }
|
nalgebra = { path = "..", version = "0.30", default-features = false }
|
||||||
|
|
|
@ -75,6 +75,21 @@ pub fn mat2x4<T: Scalar>(m11: T, m12: T, m13: T, m14: T,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new 3x3 matrix.
|
/// Create a new 3x3 matrix.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra_glm::mat3;
|
||||||
|
/// let m = mat3(
|
||||||
|
/// 1.0, 2.0, 3.0,
|
||||||
|
/// 4.0, 5.0, 6.0,
|
||||||
|
/// 7.0, 8.0, 9.0
|
||||||
|
/// );
|
||||||
|
/// assert!(
|
||||||
|
/// m.m11 == 1.0 && m.m12 == 2.0 && m.m13 == 3.0 &&
|
||||||
|
/// m.m21 == 4.0 && m.m22 == 5.0 && m.m23 == 6.0 &&
|
||||||
|
/// m.m31 == 7.0 && m.m32 == 8.0 && m.m33 == 9.0
|
||||||
|
/// );
|
||||||
|
/// ```
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
pub fn mat3<T: Scalar>(m11: T, m12: T, m13: T,
|
pub fn mat3<T: Scalar>(m11: T, m12: T, m13: T,
|
||||||
m21: T, m22: T, m23: T,
|
m21: T, m22: T, m23: T,
|
||||||
|
@ -105,8 +120,8 @@ pub fn mat3x3<T: Scalar>(m11: T, m12: T, m13: T,
|
||||||
m31: T, m32: T, m33: T) -> TMat3<T> {
|
m31: T, m32: T, m33: T) -> TMat3<T> {
|
||||||
TMat::<T, 3, 3>::new(
|
TMat::<T, 3, 3>::new(
|
||||||
m11, m12, m13,
|
m11, m12, m13,
|
||||||
m31, m32, m33,
|
|
||||||
m21, m22, m23,
|
m21, m22, m23,
|
||||||
|
m31, m32, m33,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
use approx::AbsDiffEq;
|
use approx::AbsDiffEq;
|
||||||
use num::{Bounded, Signed};
|
use num::{Bounded, Signed};
|
||||||
|
|
||||||
|
use core::cmp::PartialOrd;
|
||||||
use na::Scalar;
|
use na::Scalar;
|
||||||
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField};
|
use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, RealField};
|
||||||
use std::cmp::PartialOrd;
|
|
||||||
|
|
||||||
/// A number that can either be an integer or a float.
|
/// A number that can either be an integer or a float.
|
||||||
pub trait Number:
|
pub trait Number:
|
||||||
|
|
|
@ -53,7 +53,7 @@ pub fn degrees<T: RealNumber, const D: usize>(radians: &TVec<T, D>) -> TVec<T, D
|
||||||
radians.map(|e| e * na::convert(180.0) / T::pi())
|
radians.map(|e| e * na::convert(180.0) / T::pi())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Component-wise conversion fro degrees to radians.
|
/// Component-wise conversion from degrees to radians.
|
||||||
pub fn radians<T: RealNumber, const D: usize>(degrees: &TVec<T, D>) -> TVec<T, D> {
|
pub fn radians<T: RealNumber, const D: usize>(degrees: &TVec<T, D>) -> TVec<T, D> {
|
||||||
degrees.map(|e| e * T::pi() / na::convert(180.0))
|
degrees.map(|e| e * T::pi() / na::convert(180.0))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra-lapack"
|
name = "nalgebra-lapack"
|
||||||
version = "0.20.0"
|
version = "0.21.0"
|
||||||
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
|
authors = [ "Sébastien Crozet <developer@crozet.re>", "Andrew Straw <strawman@astraw.com>" ]
|
||||||
|
|
||||||
description = "Matrix decompositions using nalgebra matrices and Lapack bindings."
|
description = "Matrix decompositions using nalgebra matrices and Lapack bindings."
|
||||||
|
@ -29,17 +29,17 @@ accelerate = ["lapack-src/accelerate"]
|
||||||
intel-mkl = ["lapack-src/intel-mkl"]
|
intel-mkl = ["lapack-src/intel-mkl"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = { version = "0.29", path = ".." }
|
nalgebra = { version = "0.30", path = ".." }
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
num-complex = { version = "0.4", default-features = false }
|
num-complex = { version = "0.4", default-features = false }
|
||||||
simba = "0.5"
|
simba = "0.7"
|
||||||
serde = { version = "1.0", features = [ "derive" ], optional = true }
|
serde = { version = "1.0", features = [ "derive" ], optional = true }
|
||||||
lapack = { version = "0.19", default-features = false }
|
lapack = { version = "0.19", default-features = false }
|
||||||
lapack-src = { version = "0.8", default-features = false }
|
lapack-src = { version = "0.8", default-features = false }
|
||||||
# clippy = "*"
|
# clippy = "*"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
nalgebra = { version = "0.29", features = [ "arbitrary", "rand" ], path = ".." }
|
nalgebra = { version = "0.30", features = [ "arbitrary", "rand" ], path = ".." }
|
||||||
proptest = { version = "1", default-features = false, features = ["std"] }
|
proptest = { version = "1", default-features = false, features = ["std"] }
|
||||||
quickcheck = "1"
|
quickcheck = "1"
|
||||||
approx = "0.5"
|
approx = "0.5"
|
||||||
|
|
|
@ -294,7 +294,7 @@ where
|
||||||
let mut res = Matrix::zeros_generic(nrows, Const::<1>);
|
let mut res = Matrix::zeros_generic(nrows, Const::<1>);
|
||||||
|
|
||||||
for i in 0..res.len() {
|
for i in 0..res.len() {
|
||||||
res[i] = Complex::new(wr[i], wi[i]);
|
res[i] = Complex::new(wr[i].clone(), wi[i].clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
res
|
res
|
||||||
|
@ -306,7 +306,7 @@ where
|
||||||
pub fn determinant(&self) -> T {
|
pub fn determinant(&self) -> T {
|
||||||
let mut det = T::one();
|
let mut det = T::one();
|
||||||
for e in self.eigenvalues.iter() {
|
for e in self.eigenvalues.iter() {
|
||||||
det *= *e;
|
det *= e.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
det
|
det
|
||||||
|
|
|
@ -73,6 +73,9 @@
|
||||||
html_root_url = "https://nalgebra.org/rustdoc"
|
html_root_url = "https://nalgebra.org/rustdoc"
|
||||||
)]
|
)]
|
||||||
|
|
||||||
|
extern crate lapack;
|
||||||
|
extern crate lapack_src;
|
||||||
|
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
extern crate num_traits as num;
|
extern crate num_traits as num;
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ where
|
||||||
let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>);
|
let mut out = Matrix::zeros_generic(self.t.shape_generic().0, Const::<1>);
|
||||||
|
|
||||||
for i in 0..out.len() {
|
for i in 0..out.len() {
|
||||||
out[i] = Complex::new(self.re[i], self.im[i])
|
out[i] = Complex::new(self.re[i].clone(), self.im[i].clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
out
|
out
|
||||||
|
|
|
@ -140,7 +140,7 @@ where
|
||||||
pub fn determinant(&self) -> T {
|
pub fn determinant(&self) -> T {
|
||||||
let mut det = T::one();
|
let mut det = T::one();
|
||||||
for e in self.eigenvalues.iter() {
|
for e in self.eigenvalues.iter() {
|
||||||
det *= *e;
|
det *= e.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
det
|
det
|
||||||
|
@ -153,7 +153,7 @@ where
|
||||||
pub fn recompose(&self) -> OMatrix<T, D, D> {
|
pub fn recompose(&self) -> OMatrix<T, D, D> {
|
||||||
let mut u_t = self.eigenvectors.clone();
|
let mut u_t = self.eigenvectors.clone();
|
||||||
for i in 0..self.eigenvalues.len() {
|
for i in 0..self.eigenvalues.len() {
|
||||||
let val = self.eigenvalues[i];
|
let val = self.eigenvalues[i].clone();
|
||||||
u_t.column_mut(i).mul_assign(val);
|
u_t.column_mut(i).mul_assign(val);
|
||||||
}
|
}
|
||||||
u_t.transpose_mut();
|
u_t.transpose_mut();
|
||||||
|
|
|
@ -6,9 +6,6 @@ compile_error!("Tests must be run with `proptest-support`");
|
||||||
extern crate nalgebra as na;
|
extern crate nalgebra as na;
|
||||||
extern crate nalgebra_lapack as nl;
|
extern crate nalgebra_lapack as nl;
|
||||||
|
|
||||||
extern crate lapack;
|
|
||||||
extern crate lapack_src;
|
|
||||||
|
|
||||||
mod linalg;
|
mod linalg;
|
||||||
#[path = "../../tests/proptest/mod.rs"]
|
#[path = "../../tests/proptest/mod.rs"]
|
||||||
mod proptest;
|
mod proptest;
|
||||||
|
|
|
@ -21,5 +21,5 @@ quote = "1.0"
|
||||||
proc-macro2 = "1.0"
|
proc-macro2 = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
nalgebra = { version = "0.29.0", path = ".." }
|
nalgebra = { version = "0.30.0", path = ".." }
|
||||||
trybuild = "1.0.42"
|
trybuild = "1.0.42"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "nalgebra-sparse"
|
name = "nalgebra-sparse"
|
||||||
version = "0.5.0"
|
version = "0.6.0"
|
||||||
authors = [ "Andreas Longva", "Sébastien Crozet <developer@crozet.re>" ]
|
authors = [ "Andreas Longva", "Sébastien Crozet <developer@crozet.re>" ]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Sparse matrix computation based on nalgebra."
|
description = "Sparse matrix computation based on nalgebra."
|
||||||
|
@ -16,19 +16,24 @@ license = "Apache-2.0"
|
||||||
proptest-support = ["proptest", "nalgebra/proptest-support"]
|
proptest-support = ["proptest", "nalgebra/proptest-support"]
|
||||||
compare = [ "matrixcompare-core" ]
|
compare = [ "matrixcompare-core" ]
|
||||||
|
|
||||||
|
# Enable matrix market I/O
|
||||||
|
io = [ "pest", "pest_derive" ]
|
||||||
|
|
||||||
# Enable to enable running some tests that take a lot of time to run
|
# Enable to enable running some tests that take a lot of time to run
|
||||||
slow-tests = []
|
slow-tests = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
nalgebra = { version="0.29", path = "../" }
|
nalgebra = { version="0.30", path = "../" }
|
||||||
num-traits = { version = "0.2", default-features = false }
|
num-traits = { version = "0.2", default-features = false }
|
||||||
proptest = { version = "1.0", optional = true }
|
proptest = { version = "1.0", optional = true }
|
||||||
matrixcompare-core = { version = "0.1.0", optional = true }
|
matrixcompare-core = { version = "0.1.0", optional = true }
|
||||||
|
pest = { version = "2", optional = true }
|
||||||
|
pest_derive = { version = "2", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] }
|
matrixcompare = { version = "0.3.0", features = [ "proptest-support" ] }
|
||||||
nalgebra = { version="0.29", path = "../", features = ["compare"] }
|
nalgebra = { version="0.30", path = "../", features = ["compare"] }
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
# Enable certain features when building docs for docs.rs
|
# Enable certain features when building docs for docs.rs
|
||||||
|
|
|
@ -10,6 +10,7 @@ use crate::{SparseEntry, SparseEntryMut, SparseFormatError, SparseFormatErrorKin
|
||||||
use nalgebra::Scalar;
|
use nalgebra::Scalar;
|
||||||
use num_traits::One;
|
use num_traits::One;
|
||||||
|
|
||||||
|
use std::iter::FromIterator;
|
||||||
use std::slice::{Iter, IterMut};
|
use std::slice::{Iter, IterMut};
|
||||||
|
|
||||||
/// A CSR representation of a sparse matrix.
|
/// A CSR representation of a sparse matrix.
|
||||||
|
@ -170,6 +171,77 @@ impl<T> CsrMatrix<T> {
|
||||||
Self::try_from_pattern_and_values(pattern, values)
|
Self::try_from_pattern_and_values(pattern, values)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Try to construct a CSR matrix from raw CSR data with unsorted column indices.
|
||||||
|
///
|
||||||
|
/// It is assumed that each row contains unique column indices that are in
|
||||||
|
/// bounds with respect to the number of columns in the matrix. If this is not the case,
|
||||||
|
/// an error is returned to indicate the failure.
|
||||||
|
///
|
||||||
|
/// An error is returned if the data given does not conform to the CSR storage format
|
||||||
|
/// with the exception of having unsorted column indices and values.
|
||||||
|
/// See the documentation for [CsrMatrix](struct.CsrMatrix.html) for more information.
|
||||||
|
pub fn try_from_unsorted_csr_data(
|
||||||
|
num_rows: usize,
|
||||||
|
num_cols: usize,
|
||||||
|
row_offsets: Vec<usize>,
|
||||||
|
col_indices: Vec<usize>,
|
||||||
|
values: Vec<T>,
|
||||||
|
) -> Result<Self, SparseFormatError>
|
||||||
|
where
|
||||||
|
T: Scalar,
|
||||||
|
{
|
||||||
|
use SparsityPatternFormatError::*;
|
||||||
|
let count = col_indices.len();
|
||||||
|
let mut p: Vec<usize> = (0..count).collect();
|
||||||
|
|
||||||
|
if col_indices.len() != values.len() {
|
||||||
|
return Err(SparseFormatError::from_kind_and_msg(
|
||||||
|
SparseFormatErrorKind::InvalidStructure,
|
||||||
|
"Number of values and column indices must be the same",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if row_offsets.len() == 0 {
|
||||||
|
return Err(SparseFormatError::from_kind_and_msg(
|
||||||
|
SparseFormatErrorKind::InvalidStructure,
|
||||||
|
"Number of offsets should be greater than 0",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (index, &offset) in row_offsets[0..row_offsets.len() - 1].iter().enumerate() {
|
||||||
|
let next_offset = row_offsets[index + 1];
|
||||||
|
if next_offset > count {
|
||||||
|
return Err(SparseFormatError::from_kind_and_msg(
|
||||||
|
SparseFormatErrorKind::InvalidStructure,
|
||||||
|
"No row offset should be greater than the number of column indices",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if offset > next_offset {
|
||||||
|
return Err(NonmonotonicOffsets).map_err(pattern_format_error_to_csr_error);
|
||||||
|
}
|
||||||
|
p[offset..next_offset].sort_by(|a, b| {
|
||||||
|
let x = &col_indices[*a];
|
||||||
|
let y = &col_indices[*b];
|
||||||
|
x.partial_cmp(y).unwrap()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// permute indices
|
||||||
|
let sorted_col_indices: Vec<usize> =
|
||||||
|
Vec::from_iter((p.iter().map(|i| &col_indices[*i])).cloned());
|
||||||
|
|
||||||
|
// permute values
|
||||||
|
let sorted_values: Vec<T> = Vec::from_iter((p.iter().map(|i| &values[*i])).cloned());
|
||||||
|
|
||||||
|
return Self::try_from_csr_data(
|
||||||
|
num_rows,
|
||||||
|
num_cols,
|
||||||
|
row_offsets,
|
||||||
|
sorted_col_indices,
|
||||||
|
sorted_values,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/// Try to construct a CSR matrix from a sparsity pattern and associated non-zero values.
|
/// Try to construct a CSR matrix from a sparsity pattern and associated non-zero values.
|
||||||
///
|
///
|
||||||
/// Returns an error if the number of values does not match the number of minor indices
|
/// Returns an error if the number of values does not match the number of minor indices
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
WHITESPACE = _{ " "|"\t" }
|
||||||
|
|
||||||
|
//
|
||||||
|
|
||||||
|
Sparsity = {^"coordinate" | ^"array"}
|
||||||
|
DataType = {^"real" | ^"complex" | ^"pattern" | ^"integer" }
|
||||||
|
StorageScheme = {^"symmetric" | ^"general" | ^"skew-symmetric" | ^"hermitian"}
|
||||||
|
// Only consider matrices here.
|
||||||
|
Header = { ^"%%matrixmarket matrix" ~ Sparsity ~ DataType ~ StorageScheme }
|
||||||
|
|
||||||
|
//
|
||||||
|
|
||||||
|
Comments = _{ "%" ~ (!NEWLINE ~ ANY)* }
|
||||||
|
|
||||||
|
//
|
||||||
|
|
||||||
|
Dimension = @{ ASCII_DIGIT+ }
|
||||||
|
SparseShape = { Dimension ~ Dimension ~ Dimension}
|
||||||
|
DenseShape = { Dimension ~ Dimension}
|
||||||
|
Shape = {SparseShape | DenseShape }
|
||||||
|
|
||||||
|
//
|
||||||
|
|
||||||
|
// grammar from https://doc.rust-lang.org/std/primitive.f64.html#grammar
|
||||||
|
|
||||||
|
Sign = {("+" | "-")}
|
||||||
|
Exp = @{ ^"e" ~ Sign? ~ ASCII_DIGIT+}
|
||||||
|
Number = @{ ((ASCII_DIGIT+ ~ "." ~ ASCII_DIGIT*) | (ASCII_DIGIT* ~ "." ~ASCII_DIGIT+) | ASCII_DIGIT+ ) ~ Exp? }
|
||||||
|
Real = @{ Sign? ~ ("inf" | "NaN" | Number) }
|
||||||
|
|
||||||
|
|
||||||
|
SparseReal = {Dimension~ Dimension~ Real }
|
||||||
|
SparseComplex = {Dimension ~ Dimension ~ Real ~ Real}
|
||||||
|
SparsePattern = {Dimension ~ Dimension}
|
||||||
|
|
||||||
|
DenseReal = {Real}
|
||||||
|
DenseComplex = {Real ~ Real}
|
||||||
|
|
||||||
|
|
||||||
|
Entry = { SparseComplex | SparseReal | SparsePattern | DenseComplex | DenseReal }
|
||||||
|
|
||||||
|
// end of file, a silent way, see https://github.com/pest-parser/pest/issues/304#issuecomment-427198507
|
||||||
|
eoi = _{ !ANY }
|
||||||
|
|
||||||
|
Document = {
|
||||||
|
SOI ~
|
||||||
|
NEWLINE* ~
|
||||||
|
Header ~
|
||||||
|
(NEWLINE ~ Comments)* ~
|
||||||
|
(NEWLINE ~ Shape) ~
|
||||||
|
(NEWLINE ~ Entry?)* ~
|
||||||
|
eoi
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,38 @@
|
||||||
|
//! Functionality for importing and exporting sparse matrices to and from files.
|
||||||
|
//!
|
||||||
|
//! **Available only when the `io` feature is enabled.**
|
||||||
|
//!
|
||||||
|
//! The following formats are currently supported:
|
||||||
|
//!
|
||||||
|
//! | Format | Import | Export |
|
||||||
|
//! | ------------------------------------------------|------------|------------|
|
||||||
|
//! | [Matrix market](#matrix-market-format) | Yes | No |
|
||||||
|
//!
|
||||||
|
//! [Matrix market]: https://math.nist.gov/MatrixMarket/formats.html
|
||||||
|
//!
|
||||||
|
//! ## Matrix Market format
|
||||||
|
//!
|
||||||
|
//! The Matrix Market format is a simple ASCII-based file format for sparse matrices, and was initially developed for
|
||||||
|
//! the [NIST Matrix Market](https://math.nist.gov/MatrixMarket/), a repository of example sparse matrices.
|
||||||
|
//! In later years it has largely been superseded by the
|
||||||
|
//! [SuiteSparse Matrix Collection](https://sparse.tamu.edu/) (formerly University of Florida Sparse Matrix Collection),
|
||||||
|
//! which also uses the Matrix Market file format.
|
||||||
|
//!
|
||||||
|
//! We currently offer functionality for importing a Matrix market file to an instance of a
|
||||||
|
//! [CooMatrix](crate::CooMatrix) through the function [load_coo_from_matrix_market_file]. It is also possible to load
|
||||||
|
//! a matrix stored in the matrix market format with the function [load_coo_from_matrix_market_str].
|
||||||
|
//!
|
||||||
|
//! Export is currently not implemented, but [planned](https://github.com/dimforge/nalgebra/issues/1037).
|
||||||
|
//!
|
||||||
|
//! Our implementation is based on the [format description](https://math.nist.gov/MatrixMarket/formats.html)
|
||||||
|
//! on the Matrix Market website and the
|
||||||
|
//! [following NIST whitepaper](https://math.nist.gov/MatrixMarket/reports/MMformat.ps):
|
||||||
|
//!
|
||||||
|
//! > Boisvert, Ronald F., Roldan Pozo, and Karin A. Remington.<br/>
|
||||||
|
//! > "*The Matrix Market Exchange Formats: Initial Design.*" (1996).
|
||||||
|
|
||||||
|
pub use self::matrix_market::{
|
||||||
|
load_coo_from_matrix_market_file, load_coo_from_matrix_market_str, MatrixMarketError,
|
||||||
|
MatrixMarketErrorKind, MatrixMarketScalar,
|
||||||
|
};
|
||||||
|
mod matrix_market;
|
|
@ -19,6 +19,7 @@
|
||||||
//! - Sparsity patterns in CSR and CSC matrices are explicitly represented by the
|
//! - Sparsity patterns in CSR and CSC matrices are explicitly represented by the
|
||||||
//! [SparsityPattern](pattern::SparsityPattern) type, which encodes the invariants of the
|
//! [SparsityPattern](pattern::SparsityPattern) type, which encodes the invariants of the
|
||||||
//! associated index data structures.
|
//! associated index data structures.
|
||||||
|
//! - [Matrix market format support](`io`) when the `io` feature is enabled.
|
||||||
//! - [proptest strategies](`proptest`) for sparse matrices when the feature
|
//! - [proptest strategies](`proptest`) for sparse matrices when the feature
|
||||||
//! `proptest-support` is enabled.
|
//! `proptest-support` is enabled.
|
||||||
//! - [matrixcompare support](https://crates.io/crates/matrixcompare) for effortless
|
//! - [matrixcompare support](https://crates.io/crates/matrixcompare) for effortless
|
||||||
|
@ -142,11 +143,19 @@
|
||||||
)]
|
)]
|
||||||
|
|
||||||
pub extern crate nalgebra as na;
|
pub extern crate nalgebra as na;
|
||||||
|
#[cfg(feature = "io")]
|
||||||
|
extern crate pest;
|
||||||
|
#[macro_use]
|
||||||
|
#[cfg(feature = "io")]
|
||||||
|
extern crate pest_derive;
|
||||||
|
|
||||||
pub mod convert;
|
pub mod convert;
|
||||||
pub mod coo;
|
pub mod coo;
|
||||||
pub mod csc;
|
pub mod csc;
|
||||||
pub mod csr;
|
pub mod csr;
|
||||||
pub mod factorization;
|
pub mod factorization;
|
||||||
|
#[cfg(feature = "io")]
|
||||||
|
pub mod io;
|
||||||
pub mod ops;
|
pub mod ops;
|
||||||
pub mod pattern;
|
pub mod pattern;
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@
|
||||||
//! As can be seen from the table, only `CSR * Dense` and `CSC * Dense` are supported.
|
//! As can be seen from the table, only `CSR * Dense` and `CSC * Dense` are supported.
|
||||||
//! The other way around, i.e. `Dense * CSR` and `Dense * CSC` are not implemented.
|
//! The other way around, i.e. `Dense * CSR` and `Dense * CSC` are not implemented.
|
||||||
//!
|
//!
|
||||||
//! Additionally, [CsrMatrix](`crate::csr::CsrMatrix`) and [CooMatrix](`crate::coo::CooMatrix`)
|
//! Additionally, [CsrMatrix](`crate::csr::CsrMatrix`) and [CscMatrix](`crate::csc::CscMatrix`)
|
||||||
//! support multiplication with scalars, in addition to division by a scalar.
|
//! support multiplication with scalars, in addition to division by a scalar.
|
||||||
//! Note that only `Matrix * Scalar` works in a generic context, although `Scalar * Matrix`
|
//! Note that only `Matrix * Scalar` works in a generic context, although `Scalar * Matrix`
|
||||||
//! has been implemented for many of the built-in arithmetic types. This is due to a fundamental
|
//! has been implemented for many of the built-in arithmetic types. This is due to a fundamental
|
||||||
|
|
|
@ -5,11 +5,6 @@
|
||||||
//! The strategies provided here are generally expected to be able to generate the entire range
|
//! The strategies provided here are generally expected to be able to generate the entire range
|
||||||
//! of possible outputs given the constraints on dimensions and values. However, there are no
|
//! of possible outputs given the constraints on dimensions and values. However, there are no
|
||||||
//! particular guarantees on the distribution of possible values.
|
//! particular guarantees on the distribution of possible values.
|
||||||
|
|
||||||
// Contains some patched code from proptest that we can remove in the (hopefully near) future.
|
|
||||||
// See docs in file for more details.
|
|
||||||
mod proptest_patched;
|
|
||||||
|
|
||||||
use crate::coo::CooMatrix;
|
use crate::coo::CooMatrix;
|
||||||
use crate::csc::CscMatrix;
|
use crate::csc::CscMatrix;
|
||||||
use crate::csr::CsrMatrix;
|
use crate::csr::CsrMatrix;
|
||||||
|
@ -31,16 +26,10 @@ fn dense_row_major_coord_strategy(
|
||||||
let mut booleans = vec![true; nnz];
|
let mut booleans = vec![true; nnz];
|
||||||
booleans.append(&mut vec![false; (nrows * ncols) - nnz]);
|
booleans.append(&mut vec![false; (nrows * ncols) - nnz]);
|
||||||
// Make sure that exactly `nnz` of the booleans are true
|
// Make sure that exactly `nnz` of the booleans are true
|
||||||
|
Just(booleans)
|
||||||
// TODO: We cannot use the below code because of a bug in proptest, see
|
// Need to shuffle to make sure they are randomly distributed
|
||||||
// https://github.com/AltSysrq/proptest/pull/217
|
.prop_shuffle()
|
||||||
// so for now we're using a patched version of the Shuffle adapter
|
.prop_map(move |booleans| {
|
||||||
// (see also docs in `proptest_patched`
|
|
||||||
// Just(booleans)
|
|
||||||
// // Need to shuffle to make sure they are randomly distributed
|
|
||||||
// .prop_shuffle()
|
|
||||||
|
|
||||||
proptest_patched::Shuffle(Just(booleans)).prop_map(move |booleans| {
|
|
||||||
booleans
|
booleans
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
|
|
|
@ -1,146 +0,0 @@
|
||||||
//! Contains a modified implementation of `proptest::strategy::Shuffle`.
|
|
||||||
//!
|
|
||||||
//! The current implementation in `proptest` does not generate all permutations, which is
|
|
||||||
//! problematic for our proptest generators. The issue has been fixed in
|
|
||||||
//! https://github.com/AltSysrq/proptest/pull/217
|
|
||||||
//! but it has yet to be merged and released. As soon as this fix makes it into a new release,
|
|
||||||
//! the modified code here can be removed.
|
|
||||||
//!
|
|
||||||
/*!
|
|
||||||
This code has been copied and adapted from
|
|
||||||
https://github.com/AltSysrq/proptest/blob/master/proptest/src/strategy/shuffle.rs
|
|
||||||
The original licensing text is:
|
|
||||||
|
|
||||||
//-
|
|
||||||
// Copyright 2017 Jason Lingle
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
||||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
||||||
// option. This file may not be copied, modified, or distributed
|
|
||||||
// except according to those terms.
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
use proptest::num;
|
|
||||||
use proptest::prelude::Rng;
|
|
||||||
use proptest::strategy::{NewTree, Shuffleable, Strategy, ValueTree};
|
|
||||||
use proptest::test_runner::{TestRng, TestRunner};
|
|
||||||
use std::cell::Cell;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
#[must_use = "strategies do nothing unless used"]
|
|
||||||
pub struct Shuffle<S>(pub(super) S);
|
|
||||||
|
|
||||||
impl<S: Strategy> Strategy for Shuffle<S>
|
|
||||||
where
|
|
||||||
S::Value: Shuffleable,
|
|
||||||
{
|
|
||||||
type Tree = ShuffleValueTree<S::Tree>;
|
|
||||||
type Value = S::Value;
|
|
||||||
|
|
||||||
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
|
|
||||||
let rng = runner.new_rng();
|
|
||||||
|
|
||||||
self.0.new_tree(runner).map(|inner| ShuffleValueTree {
|
|
||||||
inner,
|
|
||||||
rng,
|
|
||||||
dist: Cell::new(None),
|
|
||||||
simplifying_inner: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct ShuffleValueTree<V> {
|
|
||||||
inner: V,
|
|
||||||
rng: TestRng,
|
|
||||||
dist: Cell<Option<num::usize::BinarySearch>>,
|
|
||||||
simplifying_inner: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<V: ValueTree> ShuffleValueTree<V>
|
|
||||||
where
|
|
||||||
V::Value: Shuffleable,
|
|
||||||
{
|
|
||||||
fn init_dist(&self, dflt: usize) -> usize {
|
|
||||||
if self.dist.get().is_none() {
|
|
||||||
self.dist.set(Some(num::usize::BinarySearch::new(dflt)));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.dist.get().unwrap().current()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn force_init_dist(&self) {
|
|
||||||
if self.dist.get().is_none() {
|
|
||||||
let _ = self.init_dist(self.current().shuffle_len());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<V: ValueTree> ValueTree for ShuffleValueTree<V>
|
|
||||||
where
|
|
||||||
V::Value: Shuffleable,
|
|
||||||
{
|
|
||||||
type Value = V::Value;
|
|
||||||
|
|
||||||
fn current(&self) -> V::Value {
|
|
||||||
let mut value = self.inner.current();
|
|
||||||
let len = value.shuffle_len();
|
|
||||||
// The maximum distance to swap elements. This could be larger than
|
|
||||||
// `value` if `value` has reduced size during shrinking; that's OK,
|
|
||||||
// since we only use this to filter swaps.
|
|
||||||
let max_swap = self.init_dist(len);
|
|
||||||
|
|
||||||
// If empty collection or all swaps will be filtered out, there's
|
|
||||||
// nothing to shuffle.
|
|
||||||
if 0 == len || 0 == max_swap {
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut rng = self.rng.clone();
|
|
||||||
|
|
||||||
for start_index in 0..len - 1 {
|
|
||||||
// Determine the other index to be swapped, then skip the swap if
|
|
||||||
// it is too far. This ordering is critical, as it ensures that we
|
|
||||||
// generate the same sequence of random numbers every time.
|
|
||||||
|
|
||||||
// NOTE: The below line is the whole reason for the existence of this adapted code
|
|
||||||
// We need to be able to swap with the same element, so that some elements remain in
|
|
||||||
// place rather being swapped
|
|
||||||
// let end_index = rng.gen_range(start_index + 1..len);
|
|
||||||
let end_index = rng.gen_range(start_index..len);
|
|
||||||
if end_index - start_index <= max_swap {
|
|
||||||
value.shuffle_swap(start_index, end_index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn simplify(&mut self) -> bool {
|
|
||||||
if self.simplifying_inner {
|
|
||||||
self.inner.simplify()
|
|
||||||
} else {
|
|
||||||
// Ensure that we've initialised `dist` to *something* to give
|
|
||||||
// consistent non-panicking behaviour even if called in an
|
|
||||||
// unexpected sequence.
|
|
||||||
self.force_init_dist();
|
|
||||||
if self.dist.get_mut().as_mut().unwrap().simplify() {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
self.simplifying_inner = true;
|
|
||||||
self.inner.simplify()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn complicate(&mut self) -> bool {
|
|
||||||
if self.simplifying_inner {
|
|
||||||
self.inner.complicate()
|
|
||||||
} else {
|
|
||||||
self.force_init_dist();
|
|
||||||
self.dist.get_mut().as_mut().unwrap().complicate()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +1,9 @@
|
||||||
//! Unit tests
|
//! Unit tests
|
||||||
#[cfg(any(not(feature = "proptest-support"), not(feature = "compare")))]
|
#[cfg(not(all(feature = "proptest-support", feature = "compare", feature = "io",)))]
|
||||||
compile_error!("Tests must be run with features `proptest-support` and `compare`");
|
compile_error!(
|
||||||
|
"Please enable the `proptest-support`, `compare` and `io` features in order to compile and run the tests.
|
||||||
|
Example: `cargo test -p nalgebra-sparse --features proptest-support,compare,io`"
|
||||||
|
);
|
||||||
|
|
||||||
mod unit_tests;
|
mod unit_tests;
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,8 @@ use nalgebra_sparse::{SparseEntry, SparseEntryMut, SparseFormatErrorKind};
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
use proptest::sample::subsequence;
|
use proptest::sample::subsequence;
|
||||||
|
|
||||||
|
use super::test_data_examples::InvalidCsrDataExamples;
|
||||||
|
|
||||||
use crate::assert_panics;
|
use crate::assert_panics;
|
||||||
use crate::common::csr_strategy;
|
use crate::common::csr_strategy;
|
||||||
|
|
||||||
|
@ -171,11 +173,36 @@ fn csr_matrix_valid_data() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn csr_matrix_valid_data_unsorted_column_indices() {
|
||||||
|
let csr = CsrMatrix::try_from_unsorted_csr_data(
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
vec![0, 3, 5, 8, 11],
|
||||||
|
vec![4, 1, 3, 3, 1, 2, 3, 0, 3, 4, 1],
|
||||||
|
vec![5, 1, 4, 7, 4, 2, 3, 1, 8, 9, 6],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let expected_csr = CsrMatrix::try_from_csr_data(
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
vec![0, 3, 5, 8, 11],
|
||||||
|
vec![1, 3, 4, 1, 3, 0, 2, 3, 1, 3, 4],
|
||||||
|
vec![1, 4, 5, 4, 7, 1, 2, 3, 6, 8, 9],
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(csr, expected_csr);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn csr_matrix_try_from_invalid_csr_data() {
|
fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
let invalid_data: InvalidCsrDataExamples = InvalidCsrDataExamples::new();
|
||||||
{
|
{
|
||||||
// Empty offset array (invalid length)
|
// Empty offset array (invalid length)
|
||||||
let matrix = CsrMatrix::try_from_csr_data(0, 0, Vec::new(), Vec::new(), Vec::<u32>::new());
|
let (offsets, indices, values) = invalid_data.empty_offset_array;
|
||||||
|
let matrix = CsrMatrix::try_from_csr_data(0, 0, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
&SparseFormatErrorKind::InvalidStructure
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
@ -184,10 +211,8 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Offset array invalid length for arbitrary data
|
// Offset array invalid length for arbitrary data
|
||||||
let offsets = vec![0, 3, 5];
|
let (offsets, indices, values) =
|
||||||
let indices = vec![0, 1, 2, 3, 5];
|
invalid_data.offset_array_invalid_length_for_arbitrary_data;
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -197,9 +222,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Invalid first entry in offsets array
|
// Invalid first entry in offsets array
|
||||||
let offsets = vec![1, 2, 2, 5];
|
let (offsets, indices, values) = invalid_data.invalid_first_entry_in_offsets_array;
|
||||||
let indices = vec![0, 5, 1, 2, 3];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -209,9 +232,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Invalid last entry in offsets array
|
// Invalid last entry in offsets array
|
||||||
let offsets = vec![0, 2, 2, 4];
|
let (offsets, indices, values) = invalid_data.invalid_last_entry_in_offsets_array;
|
||||||
let indices = vec![0, 5, 1, 2, 3];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -221,9 +242,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Invalid length of offsets array
|
// Invalid length of offsets array
|
||||||
let offsets = vec![0, 2, 2];
|
let (offsets, indices, values) = invalid_data.invalid_length_of_offsets_array;
|
||||||
let indices = vec![0, 5, 1, 2, 3];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -233,9 +252,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Nonmonotonic offsets
|
// Nonmonotonic offsets
|
||||||
let offsets = vec![0, 3, 2, 5];
|
let (offsets, indices, values) = invalid_data.nonmonotonic_offsets;
|
||||||
let indices = vec![0, 1, 2, 3, 4];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -245,9 +262,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Nonmonotonic minor indices
|
// Nonmonotonic minor indices
|
||||||
let offsets = vec![0, 2, 2, 5];
|
let (offsets, indices, values) = invalid_data.nonmonotonic_minor_indices;
|
||||||
let indices = vec![0, 2, 3, 1, 4];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -257,9 +272,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Minor index out of bounds
|
// Minor index out of bounds
|
||||||
let offsets = vec![0, 2, 2, 5];
|
let (offsets, indices, values) = invalid_data.minor_index_out_of_bounds;
|
||||||
let indices = vec![0, 6, 1, 2, 3];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -269,9 +282,7 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Duplicate entry
|
// Duplicate entry
|
||||||
let offsets = vec![0, 2, 2, 5];
|
let (offsets, indices, values) = invalid_data.duplicate_entry;
|
||||||
let indices = vec![0, 5, 2, 2, 3];
|
|
||||||
let values = vec![0, 1, 2, 3, 4];
|
|
||||||
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
let matrix = CsrMatrix::try_from_csr_data(3, 6, offsets, indices, values);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
matrix.unwrap_err().kind(),
|
matrix.unwrap_err().kind(),
|
||||||
|
@ -280,6 +291,91 @@ fn csr_matrix_try_from_invalid_csr_data() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn csr_matrix_try_from_unsorted_invalid_csr_data() {
|
||||||
|
let invalid_data: InvalidCsrDataExamples = InvalidCsrDataExamples::new();
|
||||||
|
{
|
||||||
|
// Empty offset array (invalid length)
|
||||||
|
let (offsets, indices, values) = invalid_data.empty_offset_array;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(0, 0, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Offset array invalid length for arbitrary data
|
||||||
|
let (offsets, indices, values) =
|
||||||
|
invalid_data.offset_array_invalid_length_for_arbitrary_data;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Invalid first entry in offsets array
|
||||||
|
let (offsets, indices, values) = invalid_data.invalid_first_entry_in_offsets_array;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Invalid last entry in offsets array
|
||||||
|
let (offsets, indices, values) = invalid_data.invalid_last_entry_in_offsets_array;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Invalid length of offsets array
|
||||||
|
let (offsets, indices, values) = invalid_data.invalid_length_of_offsets_array;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Nonmonotonic offsets
|
||||||
|
let (offsets, indices, values) = invalid_data.nonmonotonic_offsets;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::InvalidStructure
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Minor index out of bounds
|
||||||
|
let (offsets, indices, values) = invalid_data.minor_index_out_of_bounds;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::IndexOutOfBounds
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Duplicate entry
|
||||||
|
let (offsets, indices, values) = invalid_data.duplicate_entry;
|
||||||
|
let matrix = CsrMatrix::try_from_unsorted_csr_data(3, 6, offsets, indices, values);
|
||||||
|
assert_eq!(
|
||||||
|
matrix.unwrap_err().kind(),
|
||||||
|
&SparseFormatErrorKind::DuplicateEntry
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn csr_disassemble_avoids_clone_when_owned() {
|
fn csr_disassemble_avoids_clone_when_owned() {
|
||||||
// Test that disassemble avoids cloning the sparsity pattern when it holds the sole reference
|
// Test that disassemble avoids cloning the sparsity pattern when it holds the sole reference
|
||||||
|
|
|
@ -0,0 +1,345 @@
|
||||||
|
use matrixcompare::assert_matrix_eq;
|
||||||
|
use nalgebra::dmatrix;
|
||||||
|
use nalgebra::Complex;
|
||||||
|
use nalgebra_sparse::io::load_coo_from_matrix_market_str;
|
||||||
|
use nalgebra_sparse::CooMatrix;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_sparse_real_general_empty() {
|
||||||
|
// Test several valid zero-shapes of a sparse matrix
|
||||||
|
let shapes = vec![ (0, 0), (1, 0), (0, 1) ];
|
||||||
|
let strings: Vec<String> = shapes
|
||||||
|
.iter()
|
||||||
|
.map(|(m, n)| format!("%%MatrixMarket matrix coordinate real general\n {} {} 0", m, n))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (shape,string) in shapes.iter().zip(strings.iter()) {
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<f32>(string).unwrap();
|
||||||
|
assert_eq!(sparse_mat.nrows(), shape.0);
|
||||||
|
assert_eq!(sparse_mat.ncols(), shape.1);
|
||||||
|
assert_eq!(sparse_mat.nnz(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_dense_real_general_empty() {
|
||||||
|
// Test several valid zero-shapes of a dense matrix
|
||||||
|
let shapes = vec![ (0, 0), (1, 0), (0, 1) ];
|
||||||
|
let strings: Vec<String> = shapes
|
||||||
|
.iter()
|
||||||
|
.map(|(m, n)| format!("%%MatrixMarket matrix array real general\n {} {}", m, n))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (shape,string) in shapes.iter().zip(strings.iter()) {
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<f32>(string).unwrap();
|
||||||
|
assert_eq!(sparse_mat.nrows(), shape.0);
|
||||||
|
assert_eq!(sparse_mat.ncols(), shape.1);
|
||||||
|
assert_eq!(sparse_mat.nnz(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_sparse_real_general() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix CoOrdinate real general
|
||||||
|
% This is also an example of free-format features.
|
||||||
|
%=================================================================================
|
||||||
|
%
|
||||||
|
% This ASCII file represents a sparse MxN matrix with L
|
||||||
|
% nonzeros in the following Matrix Market format:
|
||||||
|
%
|
||||||
|
% +----------------------------------------------+
|
||||||
|
% |%%MatrixMarket matrix coordinate real general | <--- header line
|
||||||
|
% |% | <--+
|
||||||
|
% |% comments | |-- 0 or more comment lines
|
||||||
|
% |% | <--+
|
||||||
|
% | M T L | <--- rows, columns, entries
|
||||||
|
% | I1 J1 A(I1, J1) | <--+
|
||||||
|
% | I2 J2 A(I2, J2) | |
|
||||||
|
% | I3 J3 A(I3, J3) | |-- L lines
|
||||||
|
% | . . . | |
|
||||||
|
% | IL JL A(IL, JL) | <--+
|
||||||
|
% +----------------------------------------------+
|
||||||
|
%
|
||||||
|
% Indices are 1-based, i.e. A(1,1) is the first element.
|
||||||
|
%
|
||||||
|
%=================================================================================
|
||||||
|
5 5 8
|
||||||
|
1 1 1
|
||||||
|
|
||||||
|
2 2 1.050e+01
|
||||||
|
3 3 1.500e-02
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
1 4 6.000e+00
|
||||||
|
|
||||||
|
4 2 2.505e+02
|
||||||
|
|
||||||
|
4 4 -2.800e+02
|
||||||
|
4 5 3.332e+01
|
||||||
|
5 5 1.200e+01
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<f32>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
1.0, 0.0, 0.0, 6.0, 0.0;
|
||||||
|
0.0, 10.5, 0.0, 0.0, 0.0;
|
||||||
|
0.0, 0.0, 0.015, 0.0, 0.0;
|
||||||
|
0.0, 250.5, 0.0, -280.0, 33.32;
|
||||||
|
0.0, 0.0, 0.0, 0.0, 12.0;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_sparse_int_symmetric() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix coordinate integer symmetric
|
||||||
|
%
|
||||||
|
5 5 9
|
||||||
|
1 1 11
|
||||||
|
2 2 22
|
||||||
|
3 2 23
|
||||||
|
3 3 33
|
||||||
|
4 2 24
|
||||||
|
4 4 44
|
||||||
|
5 1 -15
|
||||||
|
5 3 35
|
||||||
|
5 5 55
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<i128>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
11, 0, 0, 0, -15;
|
||||||
|
0, 22, 23, 24, 0;
|
||||||
|
0, 23, 33, 0, 35;
|
||||||
|
0, 24, 0, 44, 0;
|
||||||
|
-15, 0, 35, 0, 55;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_sparse_complex_hermitian() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix coordinate complex hermitian
|
||||||
|
%
|
||||||
|
5 5 7
|
||||||
|
1 1 1.0 0.0
|
||||||
|
2 2 10.5 0.0
|
||||||
|
4 2 250.5 22.22
|
||||||
|
3 3 0.015 0.0
|
||||||
|
4 4 -2.8e2 0.0
|
||||||
|
5 5 12.0 0.0
|
||||||
|
5 4 0.0 33.32
|
||||||
|
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<Complex<f64>>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
Complex::<f64>{re:1.0,im:0.0}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.0,im:0.0},Complex::<f64>{re:0.0,im:0.0};
|
||||||
|
Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:10.5,im:0.0}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:250.5,im:-22.22},Complex::<f64>{re:0.0,im:0.0};
|
||||||
|
Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.015,im:0.0}, Complex::<f64>{re:0.0,im:0.0},Complex::<f64>{re:0.0,im:0.0};
|
||||||
|
Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:250.5,im:22.22}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:-280.0,im:0.0},Complex::<f64>{re:0.0,im:-33.32};
|
||||||
|
Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.0,im:0.0}, Complex::<f64>{re:0.0,im:33.32},Complex::<f64>{re:12.0,im:0.0};
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_sparse_real_skew() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix coordinate real skew-symmetric
|
||||||
|
%
|
||||||
|
5 5 4
|
||||||
|
3 2 -23.0
|
||||||
|
4 2 -24.0
|
||||||
|
5 1 -15.0
|
||||||
|
5 3 -35.0
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<f64>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
0.0, 0.0, 0.0, 0.0, 15.0;
|
||||||
|
0.0, 0.0, 23.0, 24.0, 0.0;
|
||||||
|
0.0, -23.0, 0.0, 0.0, 35.0;
|
||||||
|
0.0, -24.0, 0.0, 0.0, 0.0;
|
||||||
|
-15.0, 0.0, -35.0, 0.0, 0.0;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_sparse_pattern_general() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix coordinate pattern general
|
||||||
|
%
|
||||||
|
5 5 10
|
||||||
|
1 1
|
||||||
|
1 5
|
||||||
|
2 3
|
||||||
|
2 4
|
||||||
|
3 2
|
||||||
|
3 5
|
||||||
|
4 1
|
||||||
|
5 2
|
||||||
|
5 4
|
||||||
|
5 5
|
||||||
|
"#;
|
||||||
|
let pattern_matrix = load_coo_from_matrix_market_str::<()>(file_str).unwrap();
|
||||||
|
let nrows = pattern_matrix.nrows();
|
||||||
|
let ncols = pattern_matrix.ncols();
|
||||||
|
let (row_idx, col_idx, val) = pattern_matrix.disassemble();
|
||||||
|
let values = vec![1; val.len()];
|
||||||
|
let sparse_mat = CooMatrix::try_from_triplets(nrows, ncols, row_idx, col_idx, values).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
1, 0, 0, 0, 1;
|
||||||
|
0, 0, 1, 1, 0;
|
||||||
|
0, 1, 0, 0, 1;
|
||||||
|
1, 0, 0, 0, 0;
|
||||||
|
0, 1, 0, 1, 1;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_dense_real_general() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix array real general
|
||||||
|
%
|
||||||
|
4 3
|
||||||
|
1.0
|
||||||
|
2.0
|
||||||
|
3.0
|
||||||
|
4.0
|
||||||
|
5.0
|
||||||
|
6.0
|
||||||
|
7.0
|
||||||
|
8.0
|
||||||
|
9.0
|
||||||
|
10.0
|
||||||
|
11.0
|
||||||
|
12.0
|
||||||
|
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<f32>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
1.0, 5.0, 9.0;
|
||||||
|
2.0, 6.0, 10.0;
|
||||||
|
3.0, 7.0, 11.0;
|
||||||
|
4.0, 8.0, 12.0;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_dense_real_symmetric() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix array real symmetric
|
||||||
|
%
|
||||||
|
4 4
|
||||||
|
1.0
|
||||||
|
2.0
|
||||||
|
3.0
|
||||||
|
4.0
|
||||||
|
5.0
|
||||||
|
6.0
|
||||||
|
7.0
|
||||||
|
8.0
|
||||||
|
9.0
|
||||||
|
10.0
|
||||||
|
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<f32>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
1.0, 2.0, 3.0, 4.0;
|
||||||
|
2.0, 5.0, 6.0, 7.0;
|
||||||
|
3.0, 6.0, 8.0, 9.0;
|
||||||
|
4.0, 7.0, 9.0, 10.0;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_dense_complex_hermitian() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix array complex hermitian
|
||||||
|
%
|
||||||
|
4 4
|
||||||
|
1.0 0.0
|
||||||
|
2.0 2.0
|
||||||
|
3.0 3.0
|
||||||
|
4.0 4.0
|
||||||
|
5.0 0.0
|
||||||
|
6.0 6.0
|
||||||
|
7.0 7.0
|
||||||
|
8.0 0.0
|
||||||
|
9.0 9.0
|
||||||
|
10.0 0.0
|
||||||
|
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<Complex<f64>>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
Complex::<f64>{re:1.0,im:0.0}, Complex::<f64>{re:2.0,im:-2.0} ,Complex::<f64>{re:3.0,im:-3.0} ,Complex::<f64>{re:4.0,im:-4.0};
|
||||||
|
Complex::<f64>{re:2.0,im:2.0}, Complex::<f64>{re:5.0,im:0.0} ,Complex::<f64>{re:6.0,im:-6.0} ,Complex::<f64>{re:7.0,im:-7.0};
|
||||||
|
Complex::<f64>{re:3.0,im:3.0}, Complex::<f64>{re:6.0,im:6.0} ,Complex::<f64>{re:8.0,im:0.0} ,Complex::<f64>{re:9.0,im:-9.0};
|
||||||
|
Complex::<f64>{re:4.0,im:4.0}, Complex::<f64>{re:7.0,im:7.0} ,Complex::<f64>{re:9.0,im:9.0} ,Complex::<f64>{re:10.0,im:0.0};
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_dense_int_skew() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix array integer skew-symmetric
|
||||||
|
%
|
||||||
|
4 4
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
4
|
||||||
|
5
|
||||||
|
6
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<i32>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
0,-1,-2,-3;
|
||||||
|
1, 0,-4,-5;
|
||||||
|
2, 4, 0,-6;
|
||||||
|
3, 5, 6, 0;
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[rustfmt::skip]
|
||||||
|
fn test_matrixmarket_dense_complex_general() {
|
||||||
|
let file_str = r#"
|
||||||
|
%%MatrixMarket matrix array complex general
|
||||||
|
%
|
||||||
|
2 2
|
||||||
|
1 0
|
||||||
|
1 0
|
||||||
|
1 0
|
||||||
|
1 0
|
||||||
|
"#;
|
||||||
|
let sparse_mat = load_coo_from_matrix_market_str::<Complex<f32>>(file_str).unwrap();
|
||||||
|
let expected = dmatrix![
|
||||||
|
Complex::<f32>{re:1.0,im:0.0},Complex::<f32>{re:1.0,im:0.0};
|
||||||
|
Complex::<f32>{re:1.0,im:0.0},Complex::<f32>{re:1.0,im:0.0};
|
||||||
|
];
|
||||||
|
assert_matrix_eq!(sparse_mat, expected);
|
||||||
|
}
|
|
@ -3,6 +3,8 @@ mod convert_serial;
|
||||||
mod coo;
|
mod coo;
|
||||||
mod csc;
|
mod csc;
|
||||||
mod csr;
|
mod csr;
|
||||||
|
mod matrix_market;
|
||||||
mod ops;
|
mod ops;
|
||||||
mod pattern;
|
mod pattern;
|
||||||
mod proptest;
|
mod proptest;
|
||||||
|
mod test_data_examples;
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
/// Examples of *invalid* raw CSR data `(offsets, indices, values)`.
|
||||||
|
pub struct InvalidCsrDataExamples {
|
||||||
|
pub empty_offset_array: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub offset_array_invalid_length_for_arbitrary_data: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub invalid_first_entry_in_offsets_array: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub invalid_last_entry_in_offsets_array: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub invalid_length_of_offsets_array: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub nonmonotonic_offsets: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub nonmonotonic_minor_indices: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub minor_index_out_of_bounds: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
pub duplicate_entry: (Vec<usize>, Vec<usize>, Vec<i32>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InvalidCsrDataExamples {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
let empty_offset_array = (Vec::<usize>::new(), Vec::<usize>::new(), Vec::<i32>::new());
|
||||||
|
let offset_array_invalid_length_for_arbitrary_data =
|
||||||
|
(vec![0, 3, 5], vec![0, 1, 2, 3, 5], vec![0, 1, 2, 3, 4]);
|
||||||
|
let invalid_first_entry_in_offsets_array =
|
||||||
|
(vec![1, 2, 2, 5], vec![0, 5, 1, 2, 3], vec![0, 1, 2, 3, 4]);
|
||||||
|
let invalid_last_entry_in_offsets_array =
|
||||||
|
(vec![0, 2, 2, 4], vec![0, 5, 1, 2, 3], vec![0, 1, 2, 3, 4]);
|
||||||
|
let invalid_length_of_offsets_array =
|
||||||
|
(vec![0, 2, 2], vec![0, 5, 1, 2, 3], vec![0, 1, 2, 3, 4]);
|
||||||
|
let nonmonotonic_offsets = (vec![0, 3, 2, 5], vec![0, 1, 2, 3, 4], vec![0, 1, 2, 3, 4]);
|
||||||
|
let nonmonotonic_minor_indices =
|
||||||
|
(vec![0, 2, 2, 5], vec![0, 2, 3, 1, 4], vec![0, 1, 2, 3, 4]);
|
||||||
|
let minor_index_out_of_bounds =
|
||||||
|
(vec![0, 2, 2, 5], vec![0, 6, 1, 2, 3], vec![0, 1, 2, 3, 4]);
|
||||||
|
let duplicate_entry = (vec![0, 2, 2, 5], vec![0, 5, 2, 2, 3], vec![0, 1, 2, 3, 4]);
|
||||||
|
|
||||||
|
return Self {
|
||||||
|
empty_offset_array,
|
||||||
|
offset_array_invalid_length_for_arbitrary_data,
|
||||||
|
invalid_first_entry_in_offsets_array,
|
||||||
|
invalid_last_entry_in_offsets_array,
|
||||||
|
invalid_length_of_offsets_array,
|
||||||
|
nonmonotonic_minor_indices,
|
||||||
|
nonmonotonic_offsets,
|
||||||
|
minor_index_out_of_bounds,
|
||||||
|
duplicate_entry,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
|
@ -32,6 +32,10 @@ use std::mem;
|
||||||
/// A array-based statically sized matrix data storage.
|
/// A array-based statically sized matrix data storage.
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
pub struct ArrayStorage<T, const R: usize, const C: usize>(pub [[T; R]; C]);
|
||||||
|
|
||||||
impl<T, const R: usize, const C: usize> ArrayStorage<T, R, C> {
|
impl<T, const R: usize, const C: usize> ArrayStorage<T, R, C> {
|
||||||
|
|
|
@ -20,10 +20,10 @@ use crate::base::{
|
||||||
MatrixSliceMut, OMatrix, Scalar,
|
MatrixSliceMut, OMatrix, Scalar,
|
||||||
};
|
};
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
use crate::base::{DVector, VecStorage};
|
use crate::base::{DVector, RowDVector, VecStorage};
|
||||||
use crate::base::{SliceStorage, SliceStorageMut};
|
use crate::base::{SliceStorage, SliceStorageMut};
|
||||||
use crate::constraint::DimEq;
|
use crate::constraint::DimEq;
|
||||||
use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector};
|
use crate::{IsNotStaticOne, RowSVector, SMatrix, SVector, VectorSlice, VectorSliceMut};
|
||||||
use std::mem::MaybeUninit;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
// TODO: too bad this won't work for slice conversions.
|
// TODO: too bad this won't work for slice conversions.
|
||||||
|
@ -125,6 +125,24 @@ impl<T: Scalar, const D: usize> From<SVector<T, D>> for [T; D] {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const D: usize>
|
||||||
|
From<VectorSlice<'a, T, Const<D>, RStride, CStride>> for [T; D]
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(vec: VectorSlice<'a, T, Const<D>, RStride, CStride>) -> Self {
|
||||||
|
vec.into_owned().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const D: usize>
|
||||||
|
From<VectorSliceMut<'a, T, Const<D>, RStride, CStride>> for [T; D]
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(vec: VectorSliceMut<'a, T, Const<D>, RStride, CStride>) -> Self {
|
||||||
|
vec.into_owned().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> From<[T; D]> for RowSVector<T, D>
|
impl<T: Scalar, const D: usize> From<[T; D]> for RowSVector<T, D>
|
||||||
where
|
where
|
||||||
Const<D>: IsNotStaticOne,
|
Const<D>: IsNotStaticOne,
|
||||||
|
@ -197,8 +215,26 @@ impl<T: Scalar, const R: usize, const C: usize> From<[[T; R]; C]> for SMatrix<T,
|
||||||
|
|
||||||
impl<T: Scalar, const R: usize, const C: usize> From<SMatrix<T, R, C>> for [[T; R]; C] {
|
impl<T: Scalar, const R: usize, const C: usize> From<SMatrix<T, R, C>> for [[T; R]; C] {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn from(vec: SMatrix<T, R, C>) -> Self {
|
fn from(mat: SMatrix<T, R, C>) -> Self {
|
||||||
vec.data.0
|
mat.data.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const R: usize, const C: usize>
|
||||||
|
From<MatrixSlice<'a, T, Const<R>, Const<C>, RStride, CStride>> for [[T; R]; C]
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(mat: MatrixSlice<'a, T, Const<R>, Const<C>, RStride, CStride>) -> Self {
|
||||||
|
mat.into_owned().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: Scalar, RStride: Dim, CStride: Dim, const R: usize, const C: usize>
|
||||||
|
From<MatrixSliceMut<'a, T, Const<R>, Const<C>, RStride, CStride>> for [[T; R]; C]
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(mat: MatrixSliceMut<'a, T, Const<R>, Const<C>, RStride, CStride>) -> Self {
|
||||||
|
mat.into_owned().into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,6 +489,14 @@ impl<'a, T: Scalar> From<Vec<T>> for DVector<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
|
impl<'a, T: Scalar> From<Vec<T>> for RowDVector<T> {
|
||||||
|
#[inline]
|
||||||
|
fn from(vec: Vec<T>) -> Self {
|
||||||
|
Self::from_vec(vec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous>
|
impl<'a, T: Scalar + Copy, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous>
|
||||||
From<&'a Matrix<T, R, C, S>> for &'a [T]
|
From<&'a Matrix<T, R, C, S>> for &'a [T]
|
||||||
{
|
{
|
||||||
|
|
|
@ -195,7 +195,7 @@ where
|
||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: Const<RTO>,
|
rto: Const<RTO>,
|
||||||
cto: Const<CTO>,
|
cto: Const<CTO>,
|
||||||
mut buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
|
||||||
) -> ArrayStorage<MaybeUninit<T>, RTO, CTO> {
|
) -> ArrayStorage<MaybeUninit<T>, RTO, CTO> {
|
||||||
let mut res = <Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninit(rto, cto);
|
let mut res = <Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
|
@ -226,7 +226,7 @@ where
|
||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: Dynamic,
|
rto: Dynamic,
|
||||||
cto: CTo,
|
cto: CTo,
|
||||||
mut buf: ArrayStorage<T, RFROM, CFROM>,
|
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
) -> VecStorage<MaybeUninit<T>, Dynamic, CTo> {
|
||||||
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninit(rto, cto);
|
let mut res = <Self as Allocator<T, Dynamic, CTo>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
|
@ -257,7 +257,7 @@ where
|
||||||
unsafe fn reallocate_copy(
|
unsafe fn reallocate_copy(
|
||||||
rto: RTo,
|
rto: RTo,
|
||||||
cto: Dynamic,
|
cto: Dynamic,
|
||||||
mut buf: ArrayStorage<T, RFROM, CFROM>,
|
buf: ArrayStorage<T, RFROM, CFROM>,
|
||||||
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
) -> VecStorage<MaybeUninit<T>, RTo, Dynamic> {
|
||||||
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninit(rto, cto);
|
let mut res = <Self as Allocator<T, RTo, Dynamic>>::allocate_uninit(rto, cto);
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,10 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
/// Dim of dynamically-sized algebraic entities.
|
/// Dim of dynamically-sized algebraic entities.
|
||||||
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub struct Dynamic {
|
pub struct Dynamic {
|
||||||
value: usize,
|
value: usize,
|
||||||
}
|
}
|
||||||
|
@ -55,7 +59,7 @@ impl IsNotStaticOne for Dynamic {}
|
||||||
|
|
||||||
/// Trait implemented by any type that can be used as a dimension. This includes type-level
|
/// Trait implemented by any type that can be used as a dimension. This includes type-level
|
||||||
/// integers and `Dynamic` (for dimensions not known at compile-time).
|
/// integers and `Dynamic` (for dimensions not known at compile-time).
|
||||||
pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync {
|
pub unsafe trait Dim: Any + Debug + Copy + PartialEq + Send + Sync {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn is<D: Dim>() -> bool {
|
fn is<D: Dim>() -> bool {
|
||||||
TypeId::of::<Self>() == TypeId::of::<D>()
|
TypeId::of::<Self>() == TypeId::of::<D>()
|
||||||
|
@ -74,7 +78,7 @@ pub trait Dim: Any + Debug + Copy + PartialEq + Send + Sync {
|
||||||
fn from_usize(dim: usize) -> Self;
|
fn from_usize(dim: usize) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Dim for Dynamic {
|
unsafe impl Dim for Dynamic {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn try_to_usize() -> Option<usize> {
|
fn try_to_usize() -> Option<usize> {
|
||||||
None
|
None
|
||||||
|
@ -197,6 +201,10 @@ dim_ops!(
|
||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub struct Const<const R: usize>;
|
pub struct Const<const R: usize>;
|
||||||
|
|
||||||
/// Trait implemented exclusively by type-level integers.
|
/// Trait implemented exclusively by type-level integers.
|
||||||
|
@ -270,7 +278,7 @@ pub trait ToTypenum {
|
||||||
type Typenum: Unsigned;
|
type Typenum: Unsigned;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const T: usize> Dim for Const<T> {
|
unsafe impl<const T: usize> Dim for Const<T> {
|
||||||
fn try_to_usize() -> Option<usize> {
|
fn try_to_usize() -> Option<usize> {
|
||||||
Some(T)
|
Some(T)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ use crate::base::{DefaultAllocator, Matrix, OMatrix, RowVector, Scalar, Vector};
|
||||||
use crate::{Storage, UninitMatrix};
|
use crate::{Storage, UninitMatrix};
|
||||||
use std::mem::MaybeUninit;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
/// # Rows and columns extraction
|
/// # Triangular matrix extraction
|
||||||
impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Extracts the upper triangular part of this matrix (including the diagonal).
|
/// Extracts the upper triangular part of this matrix (including the diagonal).
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -41,7 +41,10 @@ impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # Rows and columns extraction
|
||||||
|
impl<T: Scalar, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Creates a new matrix by extracting the given set of rows from `self`.
|
/// Creates a new matrix by extracting the given set of rows from `self`.
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
|
@ -95,9 +98,7 @@ impl<T: Scalar + Zero, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
for (destination, source) in icols.enumerate() {
|
for (destination, source) in icols.enumerate() {
|
||||||
// NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit.
|
// NOTE: this is basically a copy_frow but wrapping the values insnide of MaybeUninit.
|
||||||
res.column_mut(destination)
|
res.column_mut(destination)
|
||||||
.zip_apply(&self.column(*source), |out, e| {
|
.zip_apply(&self.column(*source), |out, e| *out = MaybeUninit::new(e));
|
||||||
*out = MaybeUninit::new(e.clone())
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Safety: res is now fully initialized.
|
// Safety: res is now fully initialized.
|
||||||
|
@ -1094,7 +1095,7 @@ unsafe fn compress_rows<T: Scalar>(
|
||||||
|
|
||||||
if new_nrows == 0 || ncols == 0 {
|
if new_nrows == 0 || ncols == 0 {
|
||||||
// The output matrix is empty, drop everything.
|
// The output matrix is empty, drop everything.
|
||||||
ptr::drop_in_place(data.as_mut());
|
ptr::drop_in_place(data);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
//! Indexing
|
//! Indexing
|
||||||
|
#![allow(clippy::reversed_empty_ranges)]
|
||||||
|
|
||||||
use crate::base::storage::{RawStorage, RawStorageMut};
|
use crate::base::storage::{RawStorage, RawStorageMut};
|
||||||
use crate::base::{
|
use crate::base::{
|
||||||
|
@ -43,7 +44,7 @@ impl<D: Dim> DimRange<D> for usize {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dimrange_usize() {
|
fn dimrange_usize() {
|
||||||
assert_eq!(DimRange::contained_by(&0, Const::<0>), false);
|
assert!(!DimRange::contained_by(&0, Const::<0>));
|
||||||
assert!(DimRange::contained_by(&0, Const::<1>));
|
assert!(DimRange::contained_by(&0, Const::<1>));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,8 +69,8 @@ impl<D: Dim> DimRange<D> for ops::Range<usize> {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dimrange_range_usize() {
|
fn dimrange_range_usize() {
|
||||||
assert_eq!(DimRange::contained_by(&(0..0), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(0..0), Const::<0>));
|
||||||
assert_eq!(DimRange::contained_by(&(0..1), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(0..1), Const::<0>));
|
||||||
assert!(DimRange::contained_by(&(0..1), Const::<1>));
|
assert!(DimRange::contained_by(&(0..1), Const::<1>));
|
||||||
assert!(DimRange::contained_by(
|
assert!(DimRange::contained_by(
|
||||||
&((usize::MAX - 1)..usize::MAX),
|
&((usize::MAX - 1)..usize::MAX),
|
||||||
|
@ -110,8 +111,8 @@ impl<D: Dim> DimRange<D> for ops::RangeFrom<usize> {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dimrange_rangefrom_usize() {
|
fn dimrange_rangefrom_usize() {
|
||||||
assert_eq!(DimRange::contained_by(&(0..), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(0..), Const::<0>));
|
||||||
assert_eq!(DimRange::contained_by(&(0..), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(0..), Const::<0>));
|
||||||
assert!(DimRange::contained_by(&(0..), Const::<1>));
|
assert!(DimRange::contained_by(&(0..), Const::<1>));
|
||||||
assert!(DimRange::contained_by(
|
assert!(DimRange::contained_by(
|
||||||
&((usize::MAX - 1)..),
|
&((usize::MAX - 1)..),
|
||||||
|
@ -204,16 +205,16 @@ impl<D: Dim> DimRange<D> for ops::RangeInclusive<usize> {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dimrange_rangeinclusive_usize() {
|
fn dimrange_rangeinclusive_usize() {
|
||||||
assert_eq!(DimRange::contained_by(&(0..=0), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(0..=0), Const::<0>));
|
||||||
assert!(DimRange::contained_by(&(0..=0), Const::<1>));
|
assert!(DimRange::contained_by(&(0..=0), Const::<1>));
|
||||||
assert_eq!(
|
assert!(!DimRange::contained_by(
|
||||||
DimRange::contained_by(&(usize::MAX..=usize::MAX), Dynamic::new(usize::MAX)),
|
&(usize::MAX..=usize::MAX),
|
||||||
false
|
Dynamic::new(usize::MAX)
|
||||||
);
|
));
|
||||||
assert_eq!(
|
assert!(!DimRange::contained_by(
|
||||||
DimRange::contained_by(&((usize::MAX - 1)..=usize::MAX), Dynamic::new(usize::MAX)),
|
&((usize::MAX - 1)..=usize::MAX),
|
||||||
false
|
Dynamic::new(usize::MAX)
|
||||||
);
|
));
|
||||||
assert!(DimRange::contained_by(
|
assert!(DimRange::contained_by(
|
||||||
&((usize::MAX - 1)..=(usize::MAX - 1)),
|
&((usize::MAX - 1)..=(usize::MAX - 1)),
|
||||||
Dynamic::new(usize::MAX)
|
Dynamic::new(usize::MAX)
|
||||||
|
@ -255,7 +256,7 @@ impl<D: Dim> DimRange<D> for ops::RangeTo<usize> {
|
||||||
#[test]
|
#[test]
|
||||||
fn dimrange_rangeto_usize() {
|
fn dimrange_rangeto_usize() {
|
||||||
assert!(DimRange::contained_by(&(..0), Const::<0>));
|
assert!(DimRange::contained_by(&(..0), Const::<0>));
|
||||||
assert_eq!(DimRange::contained_by(&(..1), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(..1), Const::<0>));
|
||||||
assert!(DimRange::contained_by(&(..0), Const::<1>));
|
assert!(DimRange::contained_by(&(..0), Const::<1>));
|
||||||
assert!(DimRange::contained_by(
|
assert!(DimRange::contained_by(
|
||||||
&(..(usize::MAX - 1)),
|
&(..(usize::MAX - 1)),
|
||||||
|
@ -292,13 +293,13 @@ impl<D: Dim> DimRange<D> for ops::RangeToInclusive<usize> {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dimrange_rangetoinclusive_usize() {
|
fn dimrange_rangetoinclusive_usize() {
|
||||||
assert_eq!(DimRange::contained_by(&(..=0), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(..=0), Const::<0>));
|
||||||
assert_eq!(DimRange::contained_by(&(..=1), Const::<0>), false);
|
assert!(!DimRange::contained_by(&(..=1), Const::<0>));
|
||||||
assert!(DimRange::contained_by(&(..=0), Const::<1>));
|
assert!(DimRange::contained_by(&(..=0), Const::<1>));
|
||||||
assert_eq!(
|
assert!(!DimRange::contained_by(
|
||||||
DimRange::contained_by(&(..=(usize::MAX)), Dynamic::new(usize::MAX)),
|
&(..=(usize::MAX)),
|
||||||
false
|
Dynamic::new(usize::MAX)
|
||||||
);
|
));
|
||||||
assert!(DimRange::contained_by(
|
assert!(DimRange::contained_by(
|
||||||
&(..=(usize::MAX - 1)),
|
&(..=(usize::MAX - 1)),
|
||||||
Dynamic::new(usize::MAX)
|
Dynamic::new(usize::MAX)
|
||||||
|
@ -566,7 +567,10 @@ where
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn get_unchecked(self, matrix: &'a Matrix<T, R, C, S>) -> Self::Output {
|
unsafe fn get_unchecked(self, matrix: &'a Matrix<T, R, C, S>) -> Self::Output {
|
||||||
matrix.data.get_unchecked_linear(self)
|
let nrows = matrix.shape().0;
|
||||||
|
let row = self % nrows;
|
||||||
|
let col = self / nrows;
|
||||||
|
matrix.data.get_unchecked(row, col)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -585,7 +589,10 @@ where
|
||||||
where
|
where
|
||||||
S: RawStorageMut<T, R, C>,
|
S: RawStorageMut<T, R, C>,
|
||||||
{
|
{
|
||||||
matrix.data.get_unchecked_linear_mut(self)
|
let nrows = matrix.shape().0;
|
||||||
|
let row = self % nrows;
|
||||||
|
let col = self / nrows;
|
||||||
|
matrix.data.get_unchecked_mut(row, col)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix};
|
||||||
use crate::storage::IsContiguous;
|
use crate::storage::IsContiguous;
|
||||||
use crate::uninit::{Init, InitStatus, Uninit};
|
use crate::uninit::{Init, InitStatus, Uninit};
|
||||||
#[cfg(any(feature = "std", feature = "alloc"))]
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
use crate::{DMatrix, DVector, Dynamic, VecStorage};
|
use crate::{DMatrix, DVector, Dynamic, RowDVector, VecStorage};
|
||||||
use std::mem::MaybeUninit;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
/// A square matrix.
|
/// A square matrix.
|
||||||
|
@ -92,6 +92,7 @@ pub type MatrixCross<T, R1, C1, R2, C2> =
|
||||||
/// - [Interpolation <span style="float:right;">`lerp`, `slerp`…</span>](#interpolation)
|
/// - [Interpolation <span style="float:right;">`lerp`, `slerp`…</span>](#interpolation)
|
||||||
/// - [BLAS functions <span style="float:right;">`gemv`, `gemm`, `syger`…</span>](#blas-functions)
|
/// - [BLAS functions <span style="float:right;">`gemv`, `gemm`, `syger`…</span>](#blas-functions)
|
||||||
/// - [Swizzling <span style="float:right;">`xx`, `yxz`…</span>](#swizzling)
|
/// - [Swizzling <span style="float:right;">`xx`, `yxz`…</span>](#swizzling)
|
||||||
|
/// - [Triangular matrix extraction <span style="float:right;">`upper_triangle`, `lower_triangle`</span>](#triangular-matrix-extraction)
|
||||||
///
|
///
|
||||||
/// #### Statistics
|
/// #### Statistics
|
||||||
/// - [Common operations <span style="float:right;">`row_sum`, `column_mean`, `variance`…</span>](#common-statistics-operations)
|
/// - [Common operations <span style="float:right;">`row_sum`, `column_mean`, `variance`…</span>](#common-statistics-operations)
|
||||||
|
@ -154,6 +155,10 @@ pub type MatrixCross<T, R1, C1, R2, C2> =
|
||||||
/// some concrete types for `T` and a compatible data storage type `S`).
|
/// some concrete types for `T` and a compatible data storage type `S`).
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub struct Matrix<T, R, C, S> {
|
pub struct Matrix<T, R, C, S> {
|
||||||
/// The data storage that contains all the matrix components. Disappointed?
|
/// The data storage that contains all the matrix components. Disappointed?
|
||||||
///
|
///
|
||||||
|
@ -188,17 +193,14 @@ pub struct Matrix<T, R, C, S> {
|
||||||
// Note that it would probably make sense to just have
|
// Note that it would probably make sense to just have
|
||||||
// the type `Matrix<S>`, and have `T, R, C` be associated-types
|
// the type `Matrix<S>`, and have `T, R, C` be associated-types
|
||||||
// of the `RawStorage` trait. However, because we don't have
|
// of the `RawStorage` trait. However, because we don't have
|
||||||
// specialization, this is not bossible because these `T, R, C`
|
// specialization, this is not possible because these `T, R, C`
|
||||||
// allows us to desambiguate a lot of configurations.
|
// allows us to desambiguate a lot of configurations.
|
||||||
_phantoms: PhantomData<(T, R, C)>,
|
_phantoms: PhantomData<(T, R, C)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
|
||||||
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
formatter
|
self.data.fmt(formatter)
|
||||||
.debug_struct("Matrix")
|
|
||||||
.field("data", &self.data)
|
|
||||||
.finish()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,6 +413,21 @@ impl<T> DVector<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Consider removing/deprecating `from_vec_storage` once we are able to make
|
||||||
|
// `from_data` const fn compatible
|
||||||
|
#[cfg(any(feature = "std", feature = "alloc"))]
|
||||||
|
impl<T> RowDVector<T> {
|
||||||
|
/// Creates a new heap-allocated matrix from the given [`VecStorage`].
|
||||||
|
///
|
||||||
|
/// This method exists primarily as a workaround for the fact that `from_data` can not
|
||||||
|
/// work in `const fn` contexts.
|
||||||
|
pub const fn from_vec_storage(storage: VecStorage<T, U1, Dynamic>) -> Self {
|
||||||
|
// This is sound because the dimensions of the matrix and the storage are guaranteed
|
||||||
|
// to be the same
|
||||||
|
unsafe { Self::from_data_statically_unchecked(storage) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T, R: Dim, C: Dim> UninitMatrix<T, R, C>
|
impl<T, R: Dim, C: Dim> UninitMatrix<T, R, C>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, R, C>,
|
DefaultAllocator: Allocator<T, R, C>,
|
||||||
|
@ -681,7 +698,7 @@ impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn transpose_to_uninit<Status, R2, C2, SB>(
|
fn transpose_to_uninit<Status, R2, C2, SB>(
|
||||||
&self,
|
&self,
|
||||||
status: Status,
|
_status: Status,
|
||||||
out: &mut Matrix<Status::Value, R2, C2, SB>,
|
out: &mut Matrix<Status::Value, R2, C2, SB>,
|
||||||
) where
|
) where
|
||||||
Status: InitStatus<T>,
|
Status: InitStatus<T>,
|
||||||
|
@ -1377,7 +1394,7 @@ impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C
|
||||||
#[inline]
|
#[inline]
|
||||||
fn adjoint_to_uninit<Status, R2, C2, SB>(
|
fn adjoint_to_uninit<Status, R2, C2, SB>(
|
||||||
&self,
|
&self,
|
||||||
status: Status,
|
_status: Status,
|
||||||
out: &mut Matrix<Status::Value, R2, C2, SB>,
|
out: &mut Matrix<Status::Value, R2, C2, SB>,
|
||||||
) where
|
) where
|
||||||
Status: InitStatus<T>,
|
Status: InitStatus<T>,
|
||||||
|
@ -1777,7 +1794,7 @@ where
|
||||||
assert!(self.shape() == other.shape());
|
assert!(self.shape() == other.shape());
|
||||||
self.iter()
|
self.iter()
|
||||||
.zip(other.iter())
|
.zip(other.iter())
|
||||||
.all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps.clone()))
|
.all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2021,16 +2038,26 @@ impl<T: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: RawStorag
|
||||||
+ SameNumberOfRows<R2, U2>
|
+ SameNumberOfRows<R2, U2>
|
||||||
+ SameNumberOfColumns<C2, U1>,
|
+ SameNumberOfColumns<C2, U1>,
|
||||||
{
|
{
|
||||||
assert!(
|
let shape = self.shape();
|
||||||
self.shape() == (2, 1),
|
assert_eq!(
|
||||||
"2D perpendicular product requires (2, 1) vector but found {:?}",
|
shape,
|
||||||
self.shape()
|
b.shape(),
|
||||||
|
"2D vector perpendicular product dimension mismatch."
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
shape,
|
||||||
|
(2, 1),
|
||||||
|
"2D perpendicular product requires (2, 1) vectors {:?}",
|
||||||
|
shape
|
||||||
);
|
);
|
||||||
|
|
||||||
unsafe {
|
// SAFETY: assertion above ensures correct shape
|
||||||
self.get_unchecked((0, 0)).clone() * b.get_unchecked((1, 0)).clone()
|
let ax = unsafe { self.get_unchecked((0, 0)).clone() };
|
||||||
- self.get_unchecked((1, 0)).clone() * b.get_unchecked((0, 0)).clone()
|
let ay = unsafe { self.get_unchecked((1, 0)).clone() };
|
||||||
}
|
let bx = unsafe { b.get_unchecked((0, 0)).clone() };
|
||||||
|
let by = unsafe { b.get_unchecked((1, 0)).clone() };
|
||||||
|
|
||||||
|
ax * by - ay * bx
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: use specialization instead of an assertion.
|
// TODO: use specialization instead of an assertion.
|
||||||
|
@ -2051,17 +2078,14 @@ impl<T: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: RawStorag
|
||||||
let shape = self.shape();
|
let shape = self.shape();
|
||||||
assert_eq!(shape, b.shape(), "Vector cross product dimension mismatch.");
|
assert_eq!(shape, b.shape(), "Vector cross product dimension mismatch.");
|
||||||
assert!(
|
assert!(
|
||||||
(shape.0 == 3 && shape.1 == 1) || (shape.0 == 1 && shape.1 == 3),
|
shape == (3, 1) || shape == (1, 3),
|
||||||
"Vector cross product dimension mismatch: must be (3, 1) or (1, 3) but found {:?}.",
|
"Vector cross product dimension mismatch: must be (3, 1) or (1, 3) but found {:?}.",
|
||||||
shape
|
shape
|
||||||
);
|
);
|
||||||
|
|
||||||
if shape.0 == 3 {
|
if shape.0 == 3 {
|
||||||
unsafe {
|
unsafe {
|
||||||
// TODO: soooo ugly!
|
let mut res = Matrix::uninit(Dim::from_usize(3), Dim::from_usize(1));
|
||||||
let nrows = SameShapeR::<R, R2>::from_usize(3);
|
|
||||||
let ncols = SameShapeC::<C, C2>::from_usize(1);
|
|
||||||
let mut res = Matrix::uninit(nrows, ncols);
|
|
||||||
|
|
||||||
let ax = self.get_unchecked((0, 0));
|
let ax = self.get_unchecked((0, 0));
|
||||||
let ay = self.get_unchecked((1, 0));
|
let ay = self.get_unchecked((1, 0));
|
||||||
|
@ -2083,10 +2107,7 @@ impl<T: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: RawStorag
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
unsafe {
|
unsafe {
|
||||||
// TODO: ugly!
|
let mut res = Matrix::uninit(Dim::from_usize(1), Dim::from_usize(3));
|
||||||
let nrows = SameShapeR::<R, R2>::from_usize(1);
|
|
||||||
let ncols = SameShapeC::<C, C2>::from_usize(3);
|
|
||||||
let mut res = Matrix::uninit(nrows, ncols);
|
|
||||||
|
|
||||||
let ax = self.get_unchecked((0, 0));
|
let ax = self.get_unchecked((0, 0));
|
||||||
let ay = self.get_unchecked((0, 1));
|
let ay = self.get_unchecked((0, 1));
|
||||||
|
|
|
@ -42,14 +42,14 @@ where
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn replace(&mut self, i: usize, val: Self::Element) {
|
fn replace(&mut self, i: usize, val: Self::Element) {
|
||||||
self.zip_apply(&val, |mut a, b| {
|
self.zip_apply(&val, |a, b| {
|
||||||
a.replace(i, b);
|
a.replace(i, b);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
|
unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
|
||||||
self.zip_apply(&val, |mut a, b| {
|
self.zip_apply(&val, |a, b| {
|
||||||
a.replace_unchecked(i, b);
|
a.replace_unchecked(i, b);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,6 @@ macro_rules! slice_storage_impl(
|
||||||
S: $Storage<T, RStor, CStor>,
|
S: $Storage<T, RStor, CStor>,
|
||||||
RStride: Dim,
|
RStride: Dim,
|
||||||
CStride: Dim {
|
CStride: Dim {
|
||||||
|
|
||||||
$T::from_raw_parts(storage.$get_addr(start.0, start.1), shape, strides)
|
$T::from_raw_parts(storage.$get_addr(start.0, start.1), shape, strides)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
T: SimdPartialOrd + Zero,
|
T: SimdPartialOrd + Zero,
|
||||||
{
|
{
|
||||||
self.fold_with(
|
self.fold_with(
|
||||||
|e| e.map(|e| e.clone()).unwrap_or_else(T::zero),
|
|e| e.cloned().unwrap_or_else(T::zero),
|
||||||
|a, b| a.simd_max(b.clone()),
|
|a, b| a.simd_max(b.clone()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
T: SimdPartialOrd + Zero,
|
T: SimdPartialOrd + Zero,
|
||||||
{
|
{
|
||||||
self.fold_with(
|
self.fold_with(
|
||||||
|e| e.map(|e| e.clone()).unwrap_or_else(T::zero),
|
|e| e.cloned().unwrap_or_else(T::zero),
|
||||||
|a, b| a.simd_min(b.clone()),
|
|a, b| a.simd_min(b.clone()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -434,12 +434,7 @@ impl<T: Scalar, R: Dim, C: Dim, S: StorageMut<T, R, C>> Matrix<T, R, C, S> {
|
||||||
{
|
{
|
||||||
let n = self.norm();
|
let n = self.norm();
|
||||||
let le = n.clone().simd_le(min_norm);
|
let le = n.clone().simd_le(min_norm);
|
||||||
self.apply(|e| {
|
self.apply(|e| *e = e.clone().simd_unscale(n.clone()).select(le, e.clone()));
|
||||||
*e = e
|
|
||||||
.clone()
|
|
||||||
.simd_unscale(n.clone())
|
|
||||||
.select(le.clone(), e.clone())
|
|
||||||
});
|
|
||||||
SimdOption::new(n, le)
|
SimdOption::new(n, le)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -146,7 +146,7 @@ macro_rules! componentwise_binop_impl(
|
||||||
#[inline]
|
#[inline]
|
||||||
fn $method_to_statically_unchecked_uninit<Status, R2: Dim, C2: Dim, SB,
|
fn $method_to_statically_unchecked_uninit<Status, R2: Dim, C2: Dim, SB,
|
||||||
R3: Dim, C3: Dim, SC>(&self,
|
R3: Dim, C3: Dim, SC>(&self,
|
||||||
status: Status,
|
_status: Status,
|
||||||
rhs: &Matrix<T, R2, C2, SB>,
|
rhs: &Matrix<T, R2, C2, SB>,
|
||||||
out: &mut Matrix<Status::Value, R3, C3, SC>)
|
out: &mut Matrix<Status::Value, R3, C3, SC>)
|
||||||
where Status: InitStatus<T>,
|
where Status: InitStatus<T>,
|
||||||
|
@ -699,7 +699,7 @@ where
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn xx_mul_to_uninit<Status, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
fn xx_mul_to_uninit<Status, R2: Dim, C2: Dim, SB, R3: Dim, C3: Dim, SC>(
|
||||||
&self,
|
&self,
|
||||||
status: Status,
|
_status: Status,
|
||||||
rhs: &Matrix<T, R2, C2, SB>,
|
rhs: &Matrix<T, R2, C2, SB>,
|
||||||
out: &mut Matrix<Status::Value, R3, C3, SC>,
|
out: &mut Matrix<Status::Value, R3, C3, SC>,
|
||||||
dot: impl Fn(
|
dot: impl Fn(
|
||||||
|
|
|
@ -7,10 +7,10 @@ use simba::scalar::{ClosedAdd, ClosedMul, ComplexField, RealField};
|
||||||
use crate::base::allocator::Allocator;
|
use crate::base::allocator::Allocator;
|
||||||
use crate::base::dimension::{Dim, DimMin};
|
use crate::base::dimension::{Dim, DimMin};
|
||||||
use crate::base::storage::Storage;
|
use crate::base::storage::Storage;
|
||||||
use crate::base::{DefaultAllocator, Matrix, Scalar, SquareMatrix};
|
use crate::base::{DefaultAllocator, Matrix, SquareMatrix};
|
||||||
use crate::RawStorage;
|
use crate::RawStorage;
|
||||||
|
|
||||||
impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// The total number of elements of this matrix.
|
/// The total number of elements of this matrix.
|
||||||
///
|
///
|
||||||
/// # Examples:
|
/// # Examples:
|
||||||
|
@ -63,50 +63,18 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
T::Epsilon: Clone,
|
T::Epsilon: Clone,
|
||||||
{
|
{
|
||||||
let (nrows, ncols) = self.shape();
|
let (nrows, ncols) = self.shape();
|
||||||
let d;
|
|
||||||
|
|
||||||
if nrows > ncols {
|
|
||||||
d = ncols;
|
|
||||||
|
|
||||||
for i in d..nrows {
|
|
||||||
for j in 0..ncols {
|
|
||||||
if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// nrows <= ncols
|
|
||||||
d = nrows;
|
|
||||||
|
|
||||||
|
for j in 0..ncols {
|
||||||
for i in 0..nrows {
|
for i in 0..nrows {
|
||||||
for j in d..ncols {
|
let el = unsafe { self.get_unchecked((i, j)) };
|
||||||
if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone()) {
|
if (i == j && !relative_eq!(*el, T::one(), epsilon = eps.clone()))
|
||||||
return false;
|
|| (i != j && !relative_eq!(*el, T::zero(), epsilon = eps.clone()))
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Off-diagonal elements of the sub-square matrix.
|
|
||||||
for i in 1..d {
|
|
||||||
for j in 0..i {
|
|
||||||
// TODO: use unsafe indexing.
|
|
||||||
if !relative_eq!(self[(i, j)], T::zero(), epsilon = eps.clone())
|
|
||||||
|| !relative_eq!(self[(j, i)], T::zero(), epsilon = eps.clone())
|
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diagonal elements of the sub-square matrix.
|
|
||||||
for i in 0..d {
|
|
||||||
if !relative_eq!(self[(i, i)], T::one(), epsilon = eps.clone()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use crate::allocator::Allocator;
|
use crate::allocator::Allocator;
|
||||||
use crate::storage::RawStorage;
|
use crate::storage::RawStorage;
|
||||||
use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1};
|
use crate::{Const, DefaultAllocator, Dim, Matrix, OVector, RowOVector, Scalar, VectorSlice, U1};
|
||||||
use num::Zero;
|
use num::{One, Zero};
|
||||||
use simba::scalar::{ClosedAdd, Field, SupersetOf};
|
use simba::scalar::{ClosedAdd, ClosedMul, Field, SupersetOf};
|
||||||
use std::mem::MaybeUninit;
|
use std::mem::MaybeUninit;
|
||||||
|
|
||||||
/// # Folding on columns and rows
|
/// # Folding on columns and rows
|
||||||
|
@ -123,7 +123,9 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// 4.0, 5.0, 6.0);
|
/// 4.0, 5.0, 6.0);
|
||||||
/// assert_eq!(m.row_sum(), RowVector3::new(5.0, 7.0, 9.0));
|
/// assert_eq!(m.row_sum(), RowVector3::new(5.0, 7.0, 9.0));
|
||||||
///
|
///
|
||||||
/// let mint = Matrix3x2::new(1,2,3,4,5,6);
|
/// let mint = Matrix3x2::new(1, 2,
|
||||||
|
/// 3, 4,
|
||||||
|
/// 5, 6);
|
||||||
/// assert_eq!(mint.row_sum(), RowVector2::new(9,12));
|
/// assert_eq!(mint.row_sum(), RowVector2::new(9,12));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -148,8 +150,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// 4.0, 5.0, 6.0);
|
/// 4.0, 5.0, 6.0);
|
||||||
/// assert_eq!(m.row_sum_tr(), Vector3::new(5.0, 7.0, 9.0));
|
/// assert_eq!(m.row_sum_tr(), Vector3::new(5.0, 7.0, 9.0));
|
||||||
///
|
///
|
||||||
/// let mint = Matrix3x2::new(1,2,3,4,5,6);
|
/// let mint = Matrix3x2::new(1, 2,
|
||||||
/// assert_eq!(mint.row_sum_tr(), Vector2::new(9,12));
|
/// 3, 4,
|
||||||
|
/// 5, 6);
|
||||||
|
/// assert_eq!(mint.row_sum_tr(), Vector2::new(9, 12));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
|
@ -173,8 +177,10 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// 4.0, 5.0, 6.0);
|
/// 4.0, 5.0, 6.0);
|
||||||
/// assert_eq!(m.column_sum(), Vector2::new(6.0, 15.0));
|
/// assert_eq!(m.column_sum(), Vector2::new(6.0, 15.0));
|
||||||
///
|
///
|
||||||
/// let mint = Matrix3x2::new(1,2,3,4,5,6);
|
/// let mint = Matrix3x2::new(1, 2,
|
||||||
/// assert_eq!(mint.column_sum(), Vector3::new(3,7,11));
|
/// 3, 4,
|
||||||
|
/// 5, 6);
|
||||||
|
/// assert_eq!(mint.column_sum(), Vector3::new(3, 7, 11));
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
|
@ -189,6 +195,120 @@ impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Product computation.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
/// The product of all the elements of this matrix.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::Matrix2x3;
|
||||||
|
///
|
||||||
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
|
/// 4.0, 5.0, 6.0);
|
||||||
|
/// assert_eq!(m.product(), 720.0);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn product(&self) -> T
|
||||||
|
where
|
||||||
|
T: ClosedMul + One,
|
||||||
|
{
|
||||||
|
self.iter().cloned().fold(T::one(), |a, b| a * b)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The product of all the rows of this matrix.
|
||||||
|
///
|
||||||
|
/// Use `.row_sum_tr` if you need the result in a column vector instead.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Matrix2x3, Matrix3x2};
|
||||||
|
/// # use nalgebra::{RowVector2, RowVector3};
|
||||||
|
///
|
||||||
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
|
/// 4.0, 5.0, 6.0);
|
||||||
|
/// assert_eq!(m.row_product(), RowVector3::new(4.0, 10.0, 18.0));
|
||||||
|
///
|
||||||
|
/// let mint = Matrix3x2::new(1, 2,
|
||||||
|
/// 3, 4,
|
||||||
|
/// 5, 6);
|
||||||
|
/// assert_eq!(mint.row_product(), RowVector2::new(15, 48));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn row_product(&self) -> RowOVector<T, C>
|
||||||
|
where
|
||||||
|
T: ClosedMul + One,
|
||||||
|
DefaultAllocator: Allocator<T, U1, C>,
|
||||||
|
{
|
||||||
|
self.compress_rows(|col| col.product())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The product of all the rows of this matrix. The result is transposed and returned as a column vector.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Matrix2x3, Matrix3x2};
|
||||||
|
/// # use nalgebra::{Vector2, Vector3};
|
||||||
|
///
|
||||||
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
|
/// 4.0, 5.0, 6.0);
|
||||||
|
/// assert_eq!(m.row_product_tr(), Vector3::new(4.0, 10.0, 18.0));
|
||||||
|
///
|
||||||
|
/// let mint = Matrix3x2::new(1, 2,
|
||||||
|
/// 3, 4,
|
||||||
|
/// 5, 6);
|
||||||
|
/// assert_eq!(mint.row_product_tr(), Vector2::new(15, 48));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn row_product_tr(&self) -> OVector<T, C>
|
||||||
|
where
|
||||||
|
T: ClosedMul + One,
|
||||||
|
DefaultAllocator: Allocator<T, C>,
|
||||||
|
{
|
||||||
|
self.compress_rows_tr(|col| col.product())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The product of all the columns of this matrix.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Matrix2x3, Matrix3x2};
|
||||||
|
/// # use nalgebra::{Vector2, Vector3};
|
||||||
|
///
|
||||||
|
/// let m = Matrix2x3::new(1.0, 2.0, 3.0,
|
||||||
|
/// 4.0, 5.0, 6.0);
|
||||||
|
/// assert_eq!(m.column_product(), Vector2::new(6.0, 120.0));
|
||||||
|
///
|
||||||
|
/// let mint = Matrix3x2::new(1, 2,
|
||||||
|
/// 3, 4,
|
||||||
|
/// 5, 6);
|
||||||
|
/// assert_eq!(mint.column_product(), Vector3::new(2, 12, 30));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn column_product(&self) -> OVector<T, R>
|
||||||
|
where
|
||||||
|
T: ClosedMul + One,
|
||||||
|
DefaultAllocator: Allocator<T, R>,
|
||||||
|
{
|
||||||
|
let nrows = self.shape_generic().0;
|
||||||
|
self.compress_columns(
|
||||||
|
OVector::repeat_generic(nrows, Const::<1>, T::one()),
|
||||||
|
|out, col| {
|
||||||
|
out.component_mul_assign(&col);
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Variance computation.
|
* Variance computation.
|
||||||
|
|
|
@ -66,11 +66,11 @@ unsafe impl<T> InitStatus<T> for Uninit {
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn assume_init_ref(t: &MaybeUninit<T>) -> &T {
|
unsafe fn assume_init_ref(t: &MaybeUninit<T>) -> &T {
|
||||||
std::mem::transmute(t.as_ptr()) // TODO: use t.assume_init_ref()
|
&*t.as_ptr() // TODO: use t.assume_init_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn assume_init_mut(t: &mut MaybeUninit<T>) -> &mut T {
|
unsafe fn assume_init_mut(t: &mut MaybeUninit<T>) -> &mut T {
|
||||||
std::mem::transmute(t.as_mut_ptr()) // TODO: use t.assume_init_mut()
|
&mut *t.as_mut_ptr() // TODO: use t.assume_init_mut()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use std::fmt;
|
||||||
#[cfg(feature = "abomonation-serialize")]
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
use std::io::{Result as IOResult, Write};
|
use std::io::{Result as IOResult, Write};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
@ -24,11 +25,21 @@ use crate::{Dim, Matrix, OMatrix, RealField, Scalar, SimdComplexField, SimdRealF
|
||||||
/// and [`UnitQuaternion`](crate::UnitQuaternion); both built on top of `Unit`. If you are interested
|
/// and [`UnitQuaternion`](crate::UnitQuaternion); both built on top of `Unit`. If you are interested
|
||||||
/// in their documentation, read their dedicated pages directly.
|
/// in their documentation, read their dedicated pages directly.
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
#[derive(Clone, Hash, Debug, Copy)]
|
#[derive(Clone, Hash, Copy)]
|
||||||
|
// #[cfg_attr(
|
||||||
|
// all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
// derive(cust::DeviceCopy)
|
||||||
|
// )]
|
||||||
pub struct Unit<T> {
|
pub struct Unit<T> {
|
||||||
pub(crate) value: T,
|
pub(crate) value: T,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug> fmt::Debug for Unit<T> {
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.value.fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T> bytemuck::Zeroable for Unit<T> where T: bytemuck::Zeroable {}
|
unsafe impl<T> bytemuck::Zeroable for Unit<T> where T: bytemuck::Zeroable {}
|
||||||
|
|
||||||
|
@ -111,6 +122,17 @@ mod rkyv_impl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
|
||||||
|
unsafe impl<T: cust::memory::DeviceCopy, R, C, S> cust::memory::DeviceCopy
|
||||||
|
for Unit<Matrix<T, R, C, S>>
|
||||||
|
where
|
||||||
|
T: Scalar,
|
||||||
|
R: Dim,
|
||||||
|
C: Dim,
|
||||||
|
S: RawStorage<T, R, C> + Copy,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
impl<T, R, C, S> PartialEq for Unit<Matrix<T, R, C, S>>
|
impl<T, R, C, S> PartialEq for Unit<Matrix<T, R, C, S>>
|
||||||
where
|
where
|
||||||
T: Scalar + PartialEq,
|
T: Scalar + PartialEq,
|
||||||
|
|
|
@ -39,6 +39,10 @@ use simba::scalar::{ClosedNeg, RealField};
|
||||||
/// See <https://github.com/dimforge/nalgebra/issues/487>
|
/// See <https://github.com/dimforge/nalgebra/issues/487>
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub struct DualQuaternion<T> {
|
pub struct DualQuaternion<T> {
|
||||||
/// The real component of the quaternion
|
/// The real component of the quaternion
|
||||||
pub real: Quaternion<T>,
|
pub real: Quaternion<T>,
|
||||||
|
@ -351,13 +355,14 @@ impl<T: RealField + UlpsEq<Epsilon = T>> UlpsEq for DualQuaternion<T> {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
||||||
self.clone().to_vector().ulps_eq(&other.clone().to_vector(), epsilon.clone(), max_ulps.clone()) ||
|
self.clone().to_vector().ulps_eq(&other.clone().to_vector(), epsilon.clone(), max_ulps) ||
|
||||||
// Account for the double-covering of S², i.e. q = -q.
|
// Account for the double-covering of S², i.e. q = -q.
|
||||||
self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps.clone()))
|
self.clone().to_vector().iter().zip(other.clone().to_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A unit quaternions. May be used to represent a rotation followed by a translation.
|
/// A unit dual quaternion. May be used to represent a rotation followed by a
|
||||||
|
/// translation.
|
||||||
pub type UnitDualQuaternion<T> = Unit<DualQuaternion<T>>;
|
pub type UnitDualQuaternion<T> = Unit<DualQuaternion<T>>;
|
||||||
|
|
||||||
impl<T: Scalar + ClosedNeg + PartialEq + SimdRealField> PartialEq for UnitDualQuaternion<T> {
|
impl<T: Scalar + ClosedNeg + PartialEq + SimdRealField> PartialEq for UnitDualQuaternion<T> {
|
||||||
|
@ -593,8 +598,9 @@ where
|
||||||
/// Screw linear interpolation between two unit quaternions. This creates a
|
/// Screw linear interpolation between two unit quaternions. This creates a
|
||||||
/// smooth arc from one dual-quaternion to another.
|
/// smooth arc from one dual-quaternion to another.
|
||||||
///
|
///
|
||||||
/// Panics if the angle between both quaternion is 180 degrees (in which case the interpolation
|
/// Panics if the angle between both quaternion is 180 degrees (in which
|
||||||
/// is not well-defined). Use `.try_sclerp` instead to avoid the panic.
|
/// case the interpolation is not well-defined). Use `.try_sclerp`
|
||||||
|
/// instead to avoid the panic.
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```
|
/// ```
|
||||||
|
@ -627,15 +633,16 @@ where
|
||||||
.expect("DualQuaternion sclerp: ambiguous configuration.")
|
.expect("DualQuaternion sclerp: ambiguous configuration.")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the screw-linear interpolation between two unit quaternions or returns `None`
|
/// Computes the screw-linear interpolation between two unit quaternions or
|
||||||
/// if both quaternions are approximately 180 degrees apart (in which case the interpolation is
|
/// returns `None` if both quaternions are approximately 180 degrees
|
||||||
/// not well-defined).
|
/// apart (in which case the interpolation is not well-defined).
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `self`: the first quaternion to interpolate from.
|
/// * `self`: the first quaternion to interpolate from.
|
||||||
/// * `other`: the second quaternion to interpolate toward.
|
/// * `other`: the second quaternion to interpolate toward.
|
||||||
/// * `t`: the interpolation parameter. Should be between 0 and 1.
|
/// * `t`: the interpolation parameter. Should be between 0 and 1.
|
||||||
/// * `epsilon`: the value below which the sinus of the angle separating both quaternion
|
/// * `epsilon`: the value below which the sinus of the angle separating
|
||||||
|
/// both quaternion
|
||||||
/// must be to return `None`.
|
/// must be to return `None`.
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
|
@ -650,6 +657,10 @@ where
|
||||||
// interpolation.
|
// interpolation.
|
||||||
let other = {
|
let other = {
|
||||||
let dot_product = self.as_ref().real.coords.dot(&other.as_ref().real.coords);
|
let dot_product = self.as_ref().real.coords.dot(&other.as_ref().real.coords);
|
||||||
|
if relative_eq!(dot_product, T::zero(), epsilon = epsilon.clone()) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
if dot_product < T::zero() {
|
if dot_product < T::zero() {
|
||||||
-other.clone()
|
-other.clone()
|
||||||
} else {
|
} else {
|
||||||
|
@ -660,13 +671,21 @@ where
|
||||||
let difference = self.as_ref().conjugate() * other.as_ref();
|
let difference = self.as_ref().conjugate() * other.as_ref();
|
||||||
let norm_squared = difference.real.vector().norm_squared();
|
let norm_squared = difference.real.vector().norm_squared();
|
||||||
if relative_eq!(norm_squared, T::zero(), epsilon = epsilon) {
|
if relative_eq!(norm_squared, T::zero(), epsilon = epsilon) {
|
||||||
return None;
|
return Some(Self::from_parts(
|
||||||
|
self.translation()
|
||||||
|
.vector
|
||||||
|
.lerp(&other.translation().vector, t)
|
||||||
|
.into(),
|
||||||
|
self.rotation(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let inverse_norm_squared = T::one() / norm_squared;
|
let scalar: T = difference.real.scalar();
|
||||||
|
let mut angle = two.clone() * scalar.acos();
|
||||||
|
|
||||||
|
let inverse_norm_squared: T = T::one() / norm_squared;
|
||||||
let inverse_norm = inverse_norm_squared.sqrt();
|
let inverse_norm = inverse_norm_squared.sqrt();
|
||||||
|
|
||||||
let mut angle = two.clone() * difference.real.scalar().acos();
|
|
||||||
let mut pitch = -two * difference.dual.scalar() * inverse_norm.clone();
|
let mut pitch = -two * difference.dual.scalar() * inverse_norm.clone();
|
||||||
let direction = difference.real.vector() * inverse_norm.clone();
|
let direction = difference.real.vector() * inverse_norm.clone();
|
||||||
let moment = (difference.dual.vector()
|
let moment = (difference.dual.vector()
|
||||||
|
@ -678,6 +697,7 @@ where
|
||||||
|
|
||||||
let sin = (half.clone() * angle.clone()).sin();
|
let sin = (half.clone() * angle.clone()).sin();
|
||||||
let cos = (half.clone() * angle).cos();
|
let cos = (half.clone() * angle).cos();
|
||||||
|
|
||||||
let real = Quaternion::from_parts(cos.clone(), direction.clone() * sin.clone());
|
let real = Quaternion::from_parts(cos.clone(), direction.clone() * sin.clone());
|
||||||
let dual = Quaternion::from_parts(
|
let dual = Quaternion::from_parts(
|
||||||
-pitch.clone() * half.clone() * sin.clone(),
|
-pitch.clone() * half.clone() * sin.clone(),
|
||||||
|
|
|
@ -54,7 +54,11 @@ use crate::geometry::{AbstractRotation, Point, Translation};
|
||||||
/// * [Conversion to a matrix <span style="float:right;">`to_matrix`…</span>](#conversion-to-a-matrix)
|
/// * [Conversion to a matrix <span style="float:right;">`to_matrix`…</span>](#conversion-to-a-matrix)
|
||||||
///
|
///
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde-serialize-no-std",
|
feature = "serde-serialize-no-std",
|
||||||
|
@ -170,20 +174,6 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy, R: Copy, const D: usize> Copy for Isometry<T, R, D> where
|
|
||||||
Owned<T, Const<D>>: Copy
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar, R: Clone, const D: usize> Clone for Isometry<T, R, D> {
|
|
||||||
#[inline]
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self {
|
|
||||||
rotation: self.rotation.clone(),
|
|
||||||
translation: self.translation.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// # From the translation and rotation parts
|
/// # From the translation and rotation parts
|
||||||
impl<T: Scalar, R: AbstractRotation<T, D>, const D: usize> Isometry<T, R, D> {
|
impl<T: Scalar, R: AbstractRotation<T, D>, const D: usize> Isometry<T, R, D> {
|
||||||
/// Creates a new isometry from its rotational and translational parts.
|
/// Creates a new isometry from its rotational and translational parts.
|
||||||
|
@ -629,7 +619,7 @@ where
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
||||||
self.translation
|
self.translation
|
||||||
.ulps_eq(&other.translation, epsilon.clone(), max_ulps.clone())
|
.ulps_eq(&other.translation, epsilon.clone(), max_ulps)
|
||||||
&& self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps)
|
&& self.rotation.ulps_eq(&other.rotation, epsilon, max_ulps)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,15 @@ use crate::{
|
||||||
UnitQuaternion,
|
UnitQuaternion,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
impl<T: SimdRealField, R: AbstractRotation<T, D>, const D: usize> Default for Isometry<T, R, D>
|
||||||
|
where
|
||||||
|
T::Element: SimdRealField,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: SimdRealField, R: AbstractRotation<T, D>, const D: usize> Isometry<T, R, D>
|
impl<T: SimdRealField, R: AbstractRotation<T, D>, const D: usize> Isometry<T, R, D>
|
||||||
where
|
where
|
||||||
T::Element: SimdRealField,
|
T::Element: SimdRealField,
|
||||||
|
|
|
@ -48,6 +48,14 @@ mod translation_coordinates;
|
||||||
mod translation_ops;
|
mod translation_ops;
|
||||||
mod translation_simba;
|
mod translation_simba;
|
||||||
|
|
||||||
|
mod scale;
|
||||||
|
mod scale_alias;
|
||||||
|
mod scale_construction;
|
||||||
|
mod scale_conversion;
|
||||||
|
mod scale_coordinates;
|
||||||
|
mod scale_ops;
|
||||||
|
mod scale_simba;
|
||||||
|
|
||||||
mod isometry;
|
mod isometry;
|
||||||
mod isometry_alias;
|
mod isometry_alias;
|
||||||
mod isometry_construction;
|
mod isometry_construction;
|
||||||
|
@ -95,6 +103,9 @@ pub use self::unit_complex::*;
|
||||||
pub use self::translation::*;
|
pub use self::translation::*;
|
||||||
pub use self::translation_alias::*;
|
pub use self::translation_alias::*;
|
||||||
|
|
||||||
|
pub use self::scale::*;
|
||||||
|
pub use self::scale_alias::*;
|
||||||
|
|
||||||
pub use self::isometry::*;
|
pub use self::isometry::*;
|
||||||
pub use self::isometry_alias::*;
|
pub use self::isometry_alias::*;
|
||||||
|
|
||||||
|
|
|
@ -19,19 +19,15 @@ use crate::geometry::{Point3, Projective3};
|
||||||
|
|
||||||
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
/// A 3D orthographic projection stored as a homogeneous 4x4 matrix.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
pub struct Orthographic3<T> {
|
pub struct Orthographic3<T> {
|
||||||
matrix: Matrix4<T>,
|
matrix: Matrix4<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: RealField + Copy> Copy for Orthographic3<T> {}
|
|
||||||
|
|
||||||
impl<T: RealField> Clone for Orthographic3<T> {
|
|
||||||
#[inline]
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self::from_matrix_unchecked(self.matrix.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RealField> fmt::Debug for Orthographic3<T> {
|
impl<T: RealField> fmt::Debug for Orthographic3<T> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
self.matrix.fmt(f)
|
self.matrix.fmt(f)
|
||||||
|
@ -175,7 +171,7 @@ impl<T: RealField> Orthographic3<T> {
|
||||||
);
|
);
|
||||||
|
|
||||||
let half: T = crate::convert(0.5);
|
let half: T = crate::convert(0.5);
|
||||||
let width = zfar.clone() * (vfov.clone() * half.clone()).tan();
|
let width = zfar.clone() * (vfov * half.clone()).tan();
|
||||||
let height = width.clone() / aspect;
|
let height = width.clone() / aspect;
|
||||||
|
|
||||||
Self::new(
|
Self::new(
|
||||||
|
|
|
@ -20,19 +20,15 @@ use crate::geometry::{Point3, Projective3};
|
||||||
|
|
||||||
/// A 3D perspective projection stored as a homogeneous 4x4 matrix.
|
/// A 3D perspective projection stored as a homogeneous 4x4 matrix.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
pub struct Perspective3<T> {
|
pub struct Perspective3<T> {
|
||||||
matrix: Matrix4<T>,
|
matrix: Matrix4<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: RealField + Copy> Copy for Perspective3<T> {}
|
|
||||||
|
|
||||||
impl<T: RealField> Clone for Perspective3<T> {
|
|
||||||
#[inline]
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self::from_matrix_unchecked(self.matrix.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: RealField> fmt::Debug for Perspective3<T> {
|
impl<T: RealField> fmt::Debug for Perspective3<T> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
self.matrix.fmt(f)
|
self.matrix.fmt(f)
|
||||||
|
|
|
@ -40,7 +40,7 @@ use std::mem::MaybeUninit;
|
||||||
/// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation
|
/// may have some other methods, e.g., `isometry.inverse_transform_point(&point)`. See the documentation
|
||||||
/// of said transformations for details.
|
/// of said transformations for details.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Clone)]
|
||||||
pub struct OPoint<T: Scalar, D: DimName>
|
pub struct OPoint<T: Scalar, D: DimName>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
@ -49,6 +49,15 @@ where
|
||||||
pub coords: OVector<T, D>,
|
pub coords: OVector<T, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + fmt::Debug, D: DimName> fmt::Debug for OPoint<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
{
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.coords.as_slice().fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Scalar + hash::Hash, D: DimName> hash::Hash for OPoint<T, D>
|
impl<T: Scalar + hash::Hash, D: DimName> hash::Hash for OPoint<T, D>
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
@ -65,6 +74,15 @@ where
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
|
||||||
|
unsafe impl<T: Scalar + cust::memory::DeviceCopy, D: DimName> cust::memory::DeviceCopy
|
||||||
|
for OPoint<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
OVector<T, D>: cust::memory::DeviceCopy,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T: Scalar, D: DimName> bytemuck::Zeroable for OPoint<T, D>
|
unsafe impl<T: Scalar, D: DimName> bytemuck::Zeroable for OPoint<T, D>
|
||||||
where
|
where
|
||||||
|
|
|
@ -19,6 +19,15 @@ use simba::scalar::{ClosedDiv, SupersetOf};
|
||||||
|
|
||||||
use crate::geometry::Point;
|
use crate::geometry::Point;
|
||||||
|
|
||||||
|
impl<T: Scalar + Zero, D: DimName> Default for OPoint<T, D>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, D>,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::origin()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// # Other construction methods
|
/// # Other construction methods
|
||||||
impl<T: Scalar, D: DimName> OPoint<T, D>
|
impl<T: Scalar, D: DimName> OPoint<T, D>
|
||||||
where
|
where
|
||||||
|
|
|
@ -27,12 +27,22 @@ use crate::geometry::{Point3, Rotation};
|
||||||
/// A quaternion. See the type alias `UnitQuaternion = Unit<Quaternion>` for a quaternion
|
/// A quaternion. See the type alias `UnitQuaternion = Unit<Quaternion>` for a quaternion
|
||||||
/// that may be used as a rotation.
|
/// that may be used as a rotation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug, Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub struct Quaternion<T> {
|
pub struct Quaternion<T> {
|
||||||
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
|
/// This quaternion as a 4D vector of coordinates in the `[ x, y, z, w ]` storage order.
|
||||||
pub coords: Vector4<T>,
|
pub coords: Vector4<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug> fmt::Debug for Quaternion<T> {
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.coords.as_slice().fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Hash> Hash for Quaternion<T> {
|
impl<T: Scalar + Hash> Hash for Quaternion<T> {
|
||||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||||
self.coords.hash(state)
|
self.coords.hash(state)
|
||||||
|
@ -1039,9 +1049,9 @@ impl<T: RealField + UlpsEq<Epsilon = T>> UlpsEq for Quaternion<T> {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
||||||
self.as_vector().ulps_eq(other.as_vector(), epsilon.clone(), max_ulps.clone()) ||
|
self.as_vector().ulps_eq(other.as_vector(), epsilon.clone(), max_ulps) ||
|
||||||
// Account for the double-covering of S², i.e. q = -q.
|
// Account for the double-covering of S², i.e. q = -q.
|
||||||
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps.clone()))
|
self.as_vector().iter().zip(other.as_vector().iter()).all(|(a, b)| a.ulps_eq(&-b.clone(), epsilon.clone(), max_ulps))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1058,6 +1068,9 @@ impl<T: RealField + fmt::Display> fmt::Display for Quaternion<T> {
|
||||||
/// A unit quaternions. May be used to represent a rotation.
|
/// A unit quaternions. May be used to represent a rotation.
|
||||||
pub type UnitQuaternion<T> = Unit<Quaternion<T>>;
|
pub type UnitQuaternion<T> = Unit<Quaternion<T>>;
|
||||||
|
|
||||||
|
#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
|
||||||
|
unsafe impl<T: cust::memory::DeviceCopy> cust::memory::DeviceCopy for UnitQuaternion<T> {}
|
||||||
|
|
||||||
impl<T: Scalar + ClosedNeg + PartialEq> PartialEq for UnitQuaternion<T> {
|
impl<T: Scalar + ClosedNeg + PartialEq> PartialEq for UnitQuaternion<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn eq(&self, rhs: &Self) -> bool {
|
fn eq(&self, rhs: &Self) -> bool {
|
||||||
|
@ -1492,18 +1505,18 @@ where
|
||||||
let wk = w.clone() * k.clone() * crate::convert(2.0f64);
|
let wk = w.clone() * k.clone() * crate::convert(2.0f64);
|
||||||
let wj = w.clone() * j.clone() * crate::convert(2.0f64);
|
let wj = w.clone() * j.clone() * crate::convert(2.0f64);
|
||||||
let ik = i.clone() * k.clone() * crate::convert(2.0f64);
|
let ik = i.clone() * k.clone() * crate::convert(2.0f64);
|
||||||
let jk = j.clone() * k.clone() * crate::convert(2.0f64);
|
let jk = j * k * crate::convert(2.0f64);
|
||||||
let wi = w.clone() * i.clone() * crate::convert(2.0f64);
|
let wi = w * i * crate::convert(2.0f64);
|
||||||
|
|
||||||
Rotation::from_matrix_unchecked(Matrix3::new(
|
Rotation::from_matrix_unchecked(Matrix3::new(
|
||||||
ww.clone() + ii.clone() - jj.clone() - kk.clone(),
|
ww.clone() + ii.clone() - jj.clone() - kk.clone(),
|
||||||
ij.clone() - wk.clone(),
|
ij.clone() - wk.clone(),
|
||||||
wj.clone() + ik.clone(),
|
wj.clone() + ik.clone(),
|
||||||
wk.clone() + ij.clone(),
|
wk + ij,
|
||||||
ww.clone() - ii.clone() + jj.clone() - kk.clone(),
|
ww.clone() - ii.clone() + jj.clone() - kk.clone(),
|
||||||
jk.clone() - wi.clone(),
|
jk.clone() - wi.clone(),
|
||||||
ik.clone() - wj.clone(),
|
ik - wj,
|
||||||
wi.clone() + jk.clone(),
|
wi + jk,
|
||||||
ww - ii - jj + kk,
|
ww - ii - jj + kk,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,11 +54,21 @@ use crate::geometry::Point;
|
||||||
/// * [Conversion to a matrix <span style="float:right;">`matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
/// * [Conversion to a matrix <span style="float:right;">`matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
||||||
///
|
///
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
pub struct Rotation<T, const D: usize> {
|
pub struct Rotation<T, const D: usize> {
|
||||||
matrix: SMatrix<T, D, D>,
|
matrix: SMatrix<T, D, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug, const D: usize> fmt::Debug for Rotation<T, D> {
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.matrix.fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Rotation<T, D>
|
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Rotation<T, D>
|
||||||
where
|
where
|
||||||
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: hash::Hash,
|
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: hash::Hash,
|
||||||
|
@ -68,21 +78,6 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy, const D: usize> Copy for Rotation<T, D> where
|
|
||||||
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: Copy
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> Clone for Rotation<T, D>
|
|
||||||
where
|
|
||||||
<DefaultAllocator as Allocator<T, Const<D>, Const<D>>>::Buffer: Clone,
|
|
||||||
{
|
|
||||||
#[inline]
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self::from_matrix_unchecked(self.matrix.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T, const D: usize> bytemuck::Zeroable for Rotation<T, D>
|
unsafe impl<T, const D: usize> bytemuck::Zeroable for Rotation<T, D>
|
||||||
where
|
where
|
||||||
|
|
|
@ -6,6 +6,15 @@ use crate::base::{SMatrix, Scalar};
|
||||||
|
|
||||||
use crate::geometry::Rotation;
|
use crate::geometry::Rotation;
|
||||||
|
|
||||||
|
impl<T, const D: usize> Default for Rotation<T, D>
|
||||||
|
where
|
||||||
|
T: Scalar + Zero + One,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// # Identity
|
/// # Identity
|
||||||
impl<T, const D: usize> Rotation<T, D>
|
impl<T, const D: usize> Rotation<T, D>
|
||||||
where
|
where
|
||||||
|
@ -15,9 +24,16 @@ where
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```
|
/// ```
|
||||||
/// # use nalgebra::Quaternion;
|
/// # use nalgebra::{Rotation2, Rotation3};
|
||||||
/// let rot1 = Quaternion::identity();
|
/// # use nalgebra::Vector3;
|
||||||
/// let rot2 = Quaternion::new(1.0, 2.0, 3.0, 4.0);
|
/// let rot1 = Rotation2::identity();
|
||||||
|
/// let rot2 = Rotation2::new(std::f32::consts::FRAC_PI_2);
|
||||||
|
///
|
||||||
|
/// assert_eq!(rot1 * rot2, rot2);
|
||||||
|
/// assert_eq!(rot2 * rot1, rot2);
|
||||||
|
///
|
||||||
|
/// let rot1 = Rotation3::identity();
|
||||||
|
/// let rot2 = Rotation3::from_axis_angle(&Vector3::z_axis(), std::f32::consts::FRAC_PI_2);
|
||||||
///
|
///
|
||||||
/// assert_eq!(rot1 * rot2, rot2);
|
/// assert_eq!(rot1 * rot2, rot2);
|
||||||
/// assert_eq!(rot2 * rot1, rot2);
|
/// assert_eq!(rot2 * rot1, rot2);
|
||||||
|
|
|
@ -60,7 +60,7 @@ impl<T: SimdRealField> Rotation2<T> {
|
||||||
impl<T: SimdRealField> Rotation2<T> {
|
impl<T: SimdRealField> Rotation2<T> {
|
||||||
/// Builds a rotation from a basis assumed to be orthonormal.
|
/// Builds a rotation from a basis assumed to be orthonormal.
|
||||||
///
|
///
|
||||||
/// In order to get a valid unit-quaternion, the input must be an
|
/// In order to get a valid rotation matrix, the input must be an
|
||||||
/// orthonormal basis, i.e., all vectors are normalized, and the are
|
/// orthonormal basis, i.e., all vectors are normalized, and the are
|
||||||
/// all orthogonal to each other. These invariants are not checked
|
/// all orthogonal to each other. These invariants are not checked
|
||||||
/// by this method.
|
/// by this method.
|
||||||
|
@ -204,7 +204,7 @@ impl<T: SimdRealField> Rotation2<T> {
|
||||||
*self = Self::from_matrix_eps(self.matrix(), T::default_epsilon(), 0, c.into())
|
*self = Self::from_matrix_eps(self.matrix(), T::default_epsilon(), 0, c.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Raise the quaternion to a given floating power, i.e., returns the rotation with the angle
|
/// Raise the rotation to a given floating power, i.e., returns the rotation with the angle
|
||||||
/// of `self` multiplied by `n`.
|
/// of `self` multiplied by `n`.
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
|
@ -660,7 +660,7 @@ where
|
||||||
other * self.inverse()
|
other * self.inverse()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Raise the quaternion to a given floating power, i.e., returns the rotation with the same
|
/// Raise the rotation to a given floating power, i.e., returns the rotation with the same
|
||||||
/// axis as `self` and an angle equal to `self.angle()` multiplied by `n`.
|
/// axis as `self` and an angle equal to `self.angle()` multiplied by `n`.
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
|
@ -692,7 +692,7 @@ where
|
||||||
|
|
||||||
/// Builds a rotation from a basis assumed to be orthonormal.
|
/// Builds a rotation from a basis assumed to be orthonormal.
|
||||||
///
|
///
|
||||||
/// In order to get a valid unit-quaternion, the input must be an
|
/// In order to get a valid rotation matrix, the input must be an
|
||||||
/// orthonormal basis, i.e., all vectors are normalized, and the are
|
/// orthonormal basis, i.e., all vectors are normalized, and the are
|
||||||
/// all orthogonal to each other. These invariants are not checked
|
/// all orthogonal to each other. These invariants are not checked
|
||||||
/// by this method.
|
/// by this method.
|
||||||
|
@ -846,7 +846,7 @@ impl<T: SimdRealField> Rotation3<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The rotation axis and angle in ]0, pi] of this unit quaternion.
|
/// The rotation axis and angle in ]0, pi] of this rotation matrix.
|
||||||
///
|
///
|
||||||
/// Returns `None` if the angle is zero.
|
/// Returns `None` if the angle is zero.
|
||||||
///
|
///
|
||||||
|
|
|
@ -0,0 +1,443 @@
|
||||||
|
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
|
||||||
|
use num::{One, Zero};
|
||||||
|
use std::fmt;
|
||||||
|
use std::hash;
|
||||||
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
|
use std::io::{Result as IOResult, Write};
|
||||||
|
|
||||||
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
|
||||||
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
|
use abomonation::Abomonation;
|
||||||
|
|
||||||
|
use crate::base::allocator::Allocator;
|
||||||
|
use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
|
||||||
|
use crate::base::storage::Owned;
|
||||||
|
use crate::base::{Const, DefaultAllocator, OMatrix, OVector, SVector, Scalar};
|
||||||
|
use crate::ClosedDiv;
|
||||||
|
use crate::ClosedMul;
|
||||||
|
|
||||||
|
use crate::geometry::Point;
|
||||||
|
|
||||||
|
/// A scale which supports non-uniform scaling.
|
||||||
|
#[repr(C)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
pub struct Scale<T, const D: usize> {
|
||||||
|
/// The scale coordinates, i.e., how much is multiplied to a point's coordinates when it is
|
||||||
|
/// scaled.
|
||||||
|
pub vector: SVector<T, D>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug, const D: usize> fmt::Debug for Scale<T, D> {
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.vector.as_slice().fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Scale<T, D>
|
||||||
|
where
|
||||||
|
Owned<T, Const<D>>: hash::Hash,
|
||||||
|
{
|
||||||
|
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
||||||
|
self.vector.hash(state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "bytemuck")]
|
||||||
|
unsafe impl<T, const D: usize> bytemuck::Zeroable for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: Scalar + bytemuck::Zeroable,
|
||||||
|
SVector<T, D>: bytemuck::Zeroable,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "bytemuck")]
|
||||||
|
unsafe impl<T, const D: usize> bytemuck::Pod for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: Scalar + bytemuck::Pod,
|
||||||
|
SVector<T, D>: bytemuck::Pod,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "abomonation-serialize")]
|
||||||
|
impl<T, const D: usize> Abomonation for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: Scalar,
|
||||||
|
SVector<T, D>: Abomonation,
|
||||||
|
{
|
||||||
|
unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
|
||||||
|
self.vector.entomb(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extent(&self) -> usize {
|
||||||
|
self.vector.extent()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
|
||||||
|
self.vector.exhume(bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
|
impl<T: Scalar, const D: usize> Serialize for Scale<T, D>
|
||||||
|
where
|
||||||
|
Owned<T, Const<D>>: Serialize,
|
||||||
|
{
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
self.vector.serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
|
impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Scale<T, D>
|
||||||
|
where
|
||||||
|
Owned<T, Const<D>>: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
|
||||||
|
where
|
||||||
|
Des: Deserializer<'a>,
|
||||||
|
{
|
||||||
|
let matrix = SVector::<T, D>::deserialize(deserializer)?;
|
||||||
|
|
||||||
|
Ok(Scale::from(matrix))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "rkyv-serialize-no-std")]
|
||||||
|
mod rkyv_impl {
|
||||||
|
use super::Scale;
|
||||||
|
use crate::base::SVector;
|
||||||
|
use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
|
||||||
|
|
||||||
|
impl<T: Archive, const D: usize> Archive for Scale<T, D> {
|
||||||
|
type Archived = Scale<T::Archived, D>;
|
||||||
|
type Resolver = <SVector<T, D> as Archive>::Resolver;
|
||||||
|
|
||||||
|
fn resolve(
|
||||||
|
&self,
|
||||||
|
pos: usize,
|
||||||
|
resolver: Self::Resolver,
|
||||||
|
out: &mut core::mem::MaybeUninit<Self::Archived>,
|
||||||
|
) {
|
||||||
|
self.vector.resolve(
|
||||||
|
pos + offset_of!(Self::Archived, vector),
|
||||||
|
resolver,
|
||||||
|
project_struct!(out: Self::Archived => vector),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Serialize<S>, S: Fallible + ?Sized, const D: usize> Serialize<S> for Scale<T, D> {
|
||||||
|
fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
|
||||||
|
self.vector.serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Archive, _D: Fallible + ?Sized, const D: usize> Deserialize<Scale<T, D>, _D>
|
||||||
|
for Scale<T::Archived, D>
|
||||||
|
where
|
||||||
|
T::Archived: Deserialize<T, _D>,
|
||||||
|
{
|
||||||
|
fn deserialize(&self, deserializer: &mut _D) -> Result<Scale<T, D>, _D::Error> {
|
||||||
|
Ok(Scale {
|
||||||
|
vector: self.vector.deserialize(deserializer)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar, const D: usize> Scale<T, D> {
|
||||||
|
/// Inverts `self`.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale2, Scale3};
|
||||||
|
/// let t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// assert_eq!(t * t.try_inverse().unwrap(), Scale3::identity());
|
||||||
|
/// assert_eq!(t.try_inverse().unwrap() * t, Scale3::identity());
|
||||||
|
///
|
||||||
|
/// // Work in all dimensions.
|
||||||
|
/// let t = Scale2::new(1.0, 2.0);
|
||||||
|
/// assert_eq!(t * t.try_inverse().unwrap(), Scale2::identity());
|
||||||
|
/// assert_eq!(t.try_inverse().unwrap() * t, Scale2::identity());
|
||||||
|
///
|
||||||
|
/// // Returns None if any coordinate is 0.
|
||||||
|
/// let t = Scale2::new(0.0, 2.0);
|
||||||
|
/// assert_eq!(t.try_inverse(), None);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use = "Did you mean to use try_inverse_mut()?"]
|
||||||
|
pub fn try_inverse(&self) -> Option<Scale<T, D>>
|
||||||
|
where
|
||||||
|
T: ClosedDiv + One + Zero,
|
||||||
|
{
|
||||||
|
for i in 0..D {
|
||||||
|
if self.vector[i] == T::zero() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Some(self.vector.map(|e| T::one() / e).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inverts `self`.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale2, Scale3};
|
||||||
|
///
|
||||||
|
/// unsafe {
|
||||||
|
/// let t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// assert_eq!(t * t.inverse_unchecked(), Scale3::identity());
|
||||||
|
/// assert_eq!(t.inverse_unchecked() * t, Scale3::identity());
|
||||||
|
///
|
||||||
|
/// // Work in all dimensions.
|
||||||
|
/// let t = Scale2::new(1.0, 2.0);
|
||||||
|
/// assert_eq!(t * t.inverse_unchecked(), Scale2::identity());
|
||||||
|
/// assert_eq!(t.inverse_unchecked() * t, Scale2::identity());
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub unsafe fn inverse_unchecked(&self) -> Scale<T, D>
|
||||||
|
where
|
||||||
|
T: ClosedDiv + One,
|
||||||
|
{
|
||||||
|
return self.vector.map(|e| T::one() / e).into();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inverts `self`.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale2, Scale3};
|
||||||
|
/// let t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// assert_eq!(t * t.pseudo_inverse(), Scale3::identity());
|
||||||
|
/// assert_eq!(t.pseudo_inverse() * t, Scale3::identity());
|
||||||
|
///
|
||||||
|
/// // Work in all dimensions.
|
||||||
|
/// let t = Scale2::new(1.0, 2.0);
|
||||||
|
/// assert_eq!(t * t.pseudo_inverse(), Scale2::identity());
|
||||||
|
/// assert_eq!(t.pseudo_inverse() * t, Scale2::identity());
|
||||||
|
///
|
||||||
|
/// // Inverts only non-zero coordinates.
|
||||||
|
/// let t = Scale2::new(0.0, 2.0);
|
||||||
|
/// assert_eq!(t * t.pseudo_inverse(), Scale2::new(0.0, 1.0));
|
||||||
|
/// assert_eq!(t.pseudo_inverse() * t, Scale2::new(0.0, 1.0));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn pseudo_inverse(&self) -> Scale<T, D>
|
||||||
|
where
|
||||||
|
T: ClosedDiv + One + Zero,
|
||||||
|
{
|
||||||
|
return self
|
||||||
|
.vector
|
||||||
|
.map(|e| {
|
||||||
|
if e != T::zero() {
|
||||||
|
T::one() / e
|
||||||
|
} else {
|
||||||
|
T::zero()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts this Scale into its equivalent homogeneous transformation matrix.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale2, Scale3, Matrix3, Matrix4};
|
||||||
|
/// let t = Scale3::new(10.0, 20.0, 30.0);
|
||||||
|
/// let expected = Matrix4::new(10.0, 0.0, 0.0, 0.0,
|
||||||
|
/// 0.0, 20.0, 0.0, 0.0,
|
||||||
|
/// 0.0, 0.0, 30.0, 0.0,
|
||||||
|
/// 0.0, 0.0, 0.0, 1.0);
|
||||||
|
/// assert_eq!(t.to_homogeneous(), expected);
|
||||||
|
///
|
||||||
|
/// let t = Scale2::new(10.0, 20.0);
|
||||||
|
/// let expected = Matrix3::new(10.0, 0.0, 0.0,
|
||||||
|
/// 0.0, 20.0, 0.0,
|
||||||
|
/// 0.0, 0.0, 1.0);
|
||||||
|
/// assert_eq!(t.to_homogeneous(), expected);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn to_homogeneous(&self) -> OMatrix<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
|
||||||
|
where
|
||||||
|
T: Zero + One + Clone,
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
|
||||||
|
+ Allocator<T, DimNameSum<Const<D>, U1>, U1>,
|
||||||
|
{
|
||||||
|
// TODO: use self.vector.push() instead. We can’t right now because
|
||||||
|
// that would require the DimAdd bound (but here we use DimNameAdd).
|
||||||
|
// This should be fixable once Rust gets a more complete support of
|
||||||
|
// const-generics.
|
||||||
|
let mut v = OVector::from_element(T::one());
|
||||||
|
for i in 0..D {
|
||||||
|
v[i] = self.vector[i].clone();
|
||||||
|
}
|
||||||
|
return OMatrix::from_diagonal(&v);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inverts `self` in-place.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale2, Scale3};
|
||||||
|
/// let t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// let mut inv_t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// assert!(inv_t.try_inverse_mut());
|
||||||
|
/// assert_eq!(t * inv_t, Scale3::identity());
|
||||||
|
/// assert_eq!(inv_t * t, Scale3::identity());
|
||||||
|
///
|
||||||
|
/// // Work in all dimensions.
|
||||||
|
/// let t = Scale2::new(1.0, 2.0);
|
||||||
|
/// let mut inv_t = Scale2::new(1.0, 2.0);
|
||||||
|
/// assert!(inv_t.try_inverse_mut());
|
||||||
|
/// assert_eq!(t * inv_t, Scale2::identity());
|
||||||
|
/// assert_eq!(inv_t * t, Scale2::identity());
|
||||||
|
///
|
||||||
|
/// // Does not perform any operation if a coordinate is 0.
|
||||||
|
/// let mut t = Scale2::new(0.0, 2.0);
|
||||||
|
/// assert!(!t.try_inverse_mut());
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn try_inverse_mut(&mut self) -> bool
|
||||||
|
where
|
||||||
|
T: ClosedDiv + One + Zero,
|
||||||
|
{
|
||||||
|
if let Some(v) = self.try_inverse() {
|
||||||
|
self.vector = v.vector;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + ClosedMul, const D: usize> Scale<T, D> {
|
||||||
|
/// Translate the given point.
|
||||||
|
///
|
||||||
|
/// This is the same as the multiplication `self * pt`.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale3, Point3};
|
||||||
|
/// let t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// let transformed_point = t.transform_point(&Point3::new(4.0, 5.0, 6.0));
|
||||||
|
/// assert_eq!(transformed_point, Point3::new(4.0, 10.0, 18.0));
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn transform_point(&self, pt: &Point<T, D>) -> Point<T, D> {
|
||||||
|
self * pt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + ClosedDiv + ClosedMul + One + Zero, const D: usize> Scale<T, D> {
|
||||||
|
/// Translate the given point by the inverse of this Scale.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Scale3, Point3};
|
||||||
|
/// let t = Scale3::new(1.0, 2.0, 3.0);
|
||||||
|
/// let transformed_point = t.try_inverse_transform_point(&Point3::new(4.0, 6.0, 6.0)).unwrap();
|
||||||
|
/// assert_eq!(transformed_point, Point3::new(4.0, 3.0, 2.0));
|
||||||
|
///
|
||||||
|
/// // Returns None if the inverse doesn't exist.
|
||||||
|
/// let t = Scale3::new(1.0, 0.0, 3.0);
|
||||||
|
/// let transformed_point = t.try_inverse_transform_point(&Point3::new(4.0, 6.0, 6.0));
|
||||||
|
/// assert_eq!(transformed_point, None);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
#[must_use]
|
||||||
|
pub fn try_inverse_transform_point(&self, pt: &Point<T, D>) -> Option<Point<T, D>> {
|
||||||
|
self.try_inverse().map(|s| s * pt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + Eq, const D: usize> Eq for Scale<T, D> {}
|
||||||
|
|
||||||
|
impl<T: Scalar + PartialEq, const D: usize> PartialEq for Scale<T, D> {
|
||||||
|
#[inline]
|
||||||
|
fn eq(&self, right: &Scale<T, D>) -> bool {
|
||||||
|
self.vector == right.vector
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + AbsDiffEq, const D: usize> AbsDiffEq for Scale<T, D>
|
||||||
|
where
|
||||||
|
T::Epsilon: Clone,
|
||||||
|
{
|
||||||
|
type Epsilon = T::Epsilon;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn default_epsilon() -> Self::Epsilon {
|
||||||
|
T::default_epsilon()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
|
||||||
|
self.vector.abs_diff_eq(&other.vector, epsilon)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + RelativeEq, const D: usize> RelativeEq for Scale<T, D>
|
||||||
|
where
|
||||||
|
T::Epsilon: Clone,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn default_max_relative() -> Self::Epsilon {
|
||||||
|
T::default_max_relative()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn relative_eq(
|
||||||
|
&self,
|
||||||
|
other: &Self,
|
||||||
|
epsilon: Self::Epsilon,
|
||||||
|
max_relative: Self::Epsilon,
|
||||||
|
) -> bool {
|
||||||
|
self.vector
|
||||||
|
.relative_eq(&other.vector, epsilon, max_relative)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + UlpsEq, const D: usize> UlpsEq for Scale<T, D>
|
||||||
|
where
|
||||||
|
T::Epsilon: Clone,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn default_max_ulps() -> u32 {
|
||||||
|
T::default_max_ulps()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
||||||
|
self.vector.ulps_eq(&other.vector, epsilon, max_ulps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Display
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
impl<T: Scalar + fmt::Display, const D: usize> fmt::Display for Scale<T, D> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
let precision = f.precision().unwrap_or(3);
|
||||||
|
|
||||||
|
writeln!(f, "Scale {{")?;
|
||||||
|
write!(f, "{:.*}", precision, self.vector)?;
|
||||||
|
writeln!(f, "}}")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
use crate::geometry::Scale;
|
||||||
|
|
||||||
|
/// A 1-dimensional scale.
|
||||||
|
pub type Scale1<T> = Scale<T, 1>;
|
||||||
|
|
||||||
|
/// A 2-dimensional scale.
|
||||||
|
pub type Scale2<T> = Scale<T, 2>;
|
||||||
|
|
||||||
|
/// A 3-dimensional scale.
|
||||||
|
pub type Scale3<T> = Scale<T, 3>;
|
||||||
|
|
||||||
|
/// A 4-dimensional scale.
|
||||||
|
pub type Scale4<T> = Scale<T, 4>;
|
||||||
|
|
||||||
|
/// A 5-dimensional scale.
|
||||||
|
pub type Scale5<T> = Scale<T, 5>;
|
||||||
|
|
||||||
|
/// A 6-dimensional scale.
|
||||||
|
pub type Scale6<T> = Scale<T, 6>;
|
|
@ -0,0 +1,123 @@
|
||||||
|
#[cfg(feature = "arbitrary")]
|
||||||
|
use crate::base::storage::Owned;
|
||||||
|
#[cfg(feature = "arbitrary")]
|
||||||
|
use quickcheck::{Arbitrary, Gen};
|
||||||
|
|
||||||
|
use num::One;
|
||||||
|
#[cfg(feature = "rand-no-std")]
|
||||||
|
use rand::{
|
||||||
|
distributions::{Distribution, Standard},
|
||||||
|
Rng,
|
||||||
|
};
|
||||||
|
|
||||||
|
use simba::scalar::{ClosedMul, SupersetOf};
|
||||||
|
|
||||||
|
use crate::base::{SVector, Scalar};
|
||||||
|
use crate::geometry::Scale;
|
||||||
|
|
||||||
|
impl<T: Scalar, const D: usize> Scale<T, D> {
|
||||||
|
/// Creates a new identity scale.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::{Point2, Point3, Scale2, Scale3};
|
||||||
|
/// let t = Scale2::identity();
|
||||||
|
/// let p = Point2::new(1.0, 2.0);
|
||||||
|
/// assert_eq!(t * p, p);
|
||||||
|
///
|
||||||
|
/// // Works in all dimensions.
|
||||||
|
/// let t = Scale3::identity();
|
||||||
|
/// let p = Point3::new(1.0, 2.0, 3.0);
|
||||||
|
/// assert_eq!(t * p, p);
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn identity() -> Scale<T, D>
|
||||||
|
where
|
||||||
|
T: One,
|
||||||
|
{
|
||||||
|
Scale::from(SVector::from_element(T::one()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cast the components of `self` to another type.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # use nalgebra::Scale2;
|
||||||
|
/// let tra = Scale2::new(1.0f64, 2.0);
|
||||||
|
/// let tra2 = tra.cast::<f32>();
|
||||||
|
/// assert_eq!(tra2, Scale2::new(1.0f32, 2.0));
|
||||||
|
/// ```
|
||||||
|
pub fn cast<To: Scalar>(self) -> Scale<To, D>
|
||||||
|
where
|
||||||
|
Scale<To, D>: SupersetOf<Self>,
|
||||||
|
{
|
||||||
|
crate::convert(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + One + ClosedMul, const D: usize> One for Scale<T, D> {
|
||||||
|
#[inline]
|
||||||
|
fn one() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "rand-no-std")]
|
||||||
|
impl<T: Scalar, const D: usize> Distribution<Scale<T, D>> for Standard
|
||||||
|
where
|
||||||
|
Standard: Distribution<T>,
|
||||||
|
{
|
||||||
|
/// Generate an arbitrary random variate for testing purposes.
|
||||||
|
#[inline]
|
||||||
|
fn sample<G: Rng + ?Sized>(&self, rng: &mut G) -> Scale<T, D> {
|
||||||
|
Scale::from(rng.gen::<SVector<T, D>>())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "arbitrary")]
|
||||||
|
impl<T: Scalar + Arbitrary + Send, const D: usize> Arbitrary for Scale<T, D>
|
||||||
|
where
|
||||||
|
Owned<T, crate::Const<D>>: Send,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn arbitrary(rng: &mut Gen) -> Self {
|
||||||
|
let v: SVector<T, D> = Arbitrary::arbitrary(rng);
|
||||||
|
Self::from(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Small Scale construction from components.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
macro_rules! componentwise_constructors_impl(
|
||||||
|
($($doc: expr; $D: expr, $($args: ident:$irow: expr),*);* $(;)*) => {$(
|
||||||
|
impl<T> Scale<T, $D>
|
||||||
|
{
|
||||||
|
#[doc = "Initializes this Scale from its components."]
|
||||||
|
#[doc = "# Example\n```"]
|
||||||
|
#[doc = $doc]
|
||||||
|
#[doc = "```"]
|
||||||
|
#[inline]
|
||||||
|
pub const fn new($($args: T),*) -> Self {
|
||||||
|
Self { vector: SVector::<T, $D>::new($($args),*) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)*}
|
||||||
|
);
|
||||||
|
|
||||||
|
componentwise_constructors_impl!(
|
||||||
|
"# use nalgebra::Scale1;\nlet t = Scale1::new(1.0);\nassert!(t.vector.x == 1.0);";
|
||||||
|
1, x:0;
|
||||||
|
"# use nalgebra::Scale2;\nlet t = Scale2::new(1.0, 2.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0);";
|
||||||
|
2, x:0, y:1;
|
||||||
|
"# use nalgebra::Scale3;\nlet t = Scale3::new(1.0, 2.0, 3.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0);";
|
||||||
|
3, x:0, y:1, z:2;
|
||||||
|
"# use nalgebra::Scale4;\nlet t = Scale4::new(1.0, 2.0, 3.0, 4.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0);";
|
||||||
|
4, x:0, y:1, z:2, w:3;
|
||||||
|
"# use nalgebra::Scale5;\nlet t = Scale5::new(1.0, 2.0, 3.0, 4.0, 5.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0);";
|
||||||
|
5, x:0, y:1, z:2, w:3, a:4;
|
||||||
|
"# use nalgebra::Scale6;\nlet t = Scale6::new(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);\nassert!(t.vector.x == 1.0 && t.vector.y == 2.0 && t.vector.z == 3.0 && t.vector.w == 4.0 && t.vector.a == 5.0 && t.vector.b == 6.0);";
|
||||||
|
6, x:0, y:1, z:2, w:3, a:4, b:5;
|
||||||
|
);
|
|
@ -0,0 +1,233 @@
|
||||||
|
use num::{One, Zero};
|
||||||
|
|
||||||
|
use simba::scalar::{RealField, SubsetOf, SupersetOf};
|
||||||
|
use simba::simd::PrimitiveSimdValue;
|
||||||
|
|
||||||
|
use crate::base::allocator::Allocator;
|
||||||
|
use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
|
||||||
|
use crate::base::{Const, DefaultAllocator, OMatrix, OVector, SVector, Scalar};
|
||||||
|
|
||||||
|
use crate::geometry::{Scale, SuperTCategoryOf, TAffine, Transform};
|
||||||
|
use crate::Point;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This file provides the following conversions:
|
||||||
|
* =============================================
|
||||||
|
*
|
||||||
|
* Scale -> Scale
|
||||||
|
* Scale -> Transform
|
||||||
|
* Scale -> Matrix (homogeneous)
|
||||||
|
*/
|
||||||
|
|
||||||
|
impl<T1, T2, const D: usize> SubsetOf<Scale<T2, D>> for Scale<T1, D>
|
||||||
|
where
|
||||||
|
T1: Scalar,
|
||||||
|
T2: Scalar + SupersetOf<T1>,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn to_superset(&self) -> Scale<T2, D> {
|
||||||
|
Scale::from(self.vector.to_superset())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn is_in_subset(rot: &Scale<T2, D>) -> bool {
|
||||||
|
crate::is_convertible::<_, SVector<T1, D>>(&rot.vector)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn from_superset_unchecked(rot: &Scale<T2, D>) -> Self {
|
||||||
|
Scale {
|
||||||
|
vector: rot.vector.to_subset_unchecked(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T1, T2, C, const D: usize> SubsetOf<Transform<T2, C, D>> for Scale<T1, D>
|
||||||
|
where
|
||||||
|
T1: RealField,
|
||||||
|
T2: RealField + SupersetOf<T1>,
|
||||||
|
C: SuperTCategoryOf<TAffine>,
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T1, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
|
||||||
|
+ Allocator<T1, DimNameSum<Const<D>, U1>, U1>
|
||||||
|
+ Allocator<T2, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn to_superset(&self) -> Transform<T2, C, D> {
|
||||||
|
Transform::from_matrix_unchecked(self.to_homogeneous().to_superset())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn is_in_subset(t: &Transform<T2, C, D>) -> bool {
|
||||||
|
<Self as SubsetOf<_>>::is_in_subset(t.matrix())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn from_superset_unchecked(t: &Transform<T2, C, D>) -> Self {
|
||||||
|
Self::from_superset_unchecked(t.matrix())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T1, T2, const D: usize>
|
||||||
|
SubsetOf<OMatrix<T2, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>> for Scale<T1, D>
|
||||||
|
where
|
||||||
|
T1: RealField,
|
||||||
|
T2: RealField + SupersetOf<T1>,
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T1, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
|
||||||
|
+ Allocator<T1, DimNameSum<Const<D>, U1>, U1>
|
||||||
|
+ Allocator<T2, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn to_superset(&self) -> OMatrix<T2, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>> {
|
||||||
|
self.to_homogeneous().to_superset()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn is_in_subset(m: &OMatrix<T2, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>) -> bool {
|
||||||
|
if m[(D, D)] != T2::one() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
for i in 0..D + 1 {
|
||||||
|
for j in 0..D + 1 {
|
||||||
|
if i != j && m[(i, j)] != T2::zero() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn from_superset_unchecked(
|
||||||
|
m: &OMatrix<T2, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
|
||||||
|
) -> Self {
|
||||||
|
let v = m.fixed_slice::<D, D>(0, 0).diagonal();
|
||||||
|
Self {
|
||||||
|
vector: crate::convert_unchecked(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + Zero + One, const D: usize> From<Scale<T, D>>
|
||||||
|
for OMatrix<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
|
||||||
|
where
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
|
||||||
|
+ Allocator<T, DimNameSum<Const<D>, U1>, U1>
|
||||||
|
+ Allocator<T, Const<D>>,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(t: Scale<T, D>) -> Self {
|
||||||
|
t.to_homogeneous()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar, const D: usize> From<OVector<T, Const<D>>> for Scale<T, D> {
|
||||||
|
#[inline]
|
||||||
|
fn from(vector: OVector<T, Const<D>>) -> Self {
|
||||||
|
Scale { vector }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar, const D: usize> From<[T; D]> for Scale<T, D> {
|
||||||
|
#[inline]
|
||||||
|
fn from(coords: [T; D]) -> Self {
|
||||||
|
Scale {
|
||||||
|
vector: coords.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar, const D: usize> From<Point<T, D>> for Scale<T, D> {
|
||||||
|
#[inline]
|
||||||
|
fn from(pt: Point<T, D>) -> Self {
|
||||||
|
Scale { vector: pt.coords }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar, const D: usize> From<Scale<T, D>> for [T; D] {
|
||||||
|
#[inline]
|
||||||
|
fn from(t: Scale<T, D>) -> Self {
|
||||||
|
t.vector.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + PrimitiveSimdValue, const D: usize> From<[Scale<T::Element, D>; 2]> for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: From<[<T as simba::simd::SimdValue>::Element; 2]>,
|
||||||
|
T::Element: Scalar,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(arr: [Scale<T::Element, D>; 2]) -> Self {
|
||||||
|
Self::from(OVector::from([
|
||||||
|
arr[0].vector.clone(),
|
||||||
|
arr[1].vector.clone(),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + PrimitiveSimdValue, const D: usize> From<[Scale<T::Element, D>; 4]> for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: From<[<T as simba::simd::SimdValue>::Element; 4]>,
|
||||||
|
T::Element: Scalar,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(arr: [Scale<T::Element, D>; 4]) -> Self {
|
||||||
|
Self::from(OVector::from([
|
||||||
|
arr[0].vector.clone(),
|
||||||
|
arr[1].vector.clone(),
|
||||||
|
arr[2].vector.clone(),
|
||||||
|
arr[3].vector.clone(),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + PrimitiveSimdValue, const D: usize> From<[Scale<T::Element, D>; 8]> for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: From<[<T as simba::simd::SimdValue>::Element; 8]>,
|
||||||
|
T::Element: Scalar,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(arr: [Scale<T::Element, D>; 8]) -> Self {
|
||||||
|
Self::from(OVector::from([
|
||||||
|
arr[0].vector.clone(),
|
||||||
|
arr[1].vector.clone(),
|
||||||
|
arr[2].vector.clone(),
|
||||||
|
arr[3].vector.clone(),
|
||||||
|
arr[4].vector.clone(),
|
||||||
|
arr[5].vector.clone(),
|
||||||
|
arr[6].vector.clone(),
|
||||||
|
arr[7].vector.clone(),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar + PrimitiveSimdValue, const D: usize> From<[Scale<T::Element, D>; 16]>
|
||||||
|
for Scale<T, D>
|
||||||
|
where
|
||||||
|
T: From<[<T as simba::simd::SimdValue>::Element; 16]>,
|
||||||
|
T::Element: Scalar,
|
||||||
|
{
|
||||||
|
#[inline]
|
||||||
|
fn from(arr: [Scale<T::Element, D>; 16]) -> Self {
|
||||||
|
Self::from(OVector::from([
|
||||||
|
arr[0].vector.clone(),
|
||||||
|
arr[1].vector.clone(),
|
||||||
|
arr[2].vector.clone(),
|
||||||
|
arr[3].vector.clone(),
|
||||||
|
arr[4].vector.clone(),
|
||||||
|
arr[5].vector.clone(),
|
||||||
|
arr[6].vector.clone(),
|
||||||
|
arr[7].vector.clone(),
|
||||||
|
arr[8].vector.clone(),
|
||||||
|
arr[9].vector.clone(),
|
||||||
|
arr[10].vector.clone(),
|
||||||
|
arr[11].vector.clone(),
|
||||||
|
arr[12].vector.clone(),
|
||||||
|
arr[13].vector.clone(),
|
||||||
|
arr[14].vector.clone(),
|
||||||
|
arr[15].vector.clone(),
|
||||||
|
]))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
|
||||||
|
use crate::base::coordinates::{X, XY, XYZ, XYZW, XYZWA, XYZWAB};
|
||||||
|
use crate::base::Scalar;
|
||||||
|
|
||||||
|
use crate::geometry::Scale;
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Give coordinates to Scale{1 .. 6}
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
macro_rules! deref_impl(
|
||||||
|
($D: expr, $Target: ident $(, $comps: ident)*) => {
|
||||||
|
impl<T: Scalar> Deref for Scale<T, $D> {
|
||||||
|
type Target = $Target<T>;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.vector.deref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Scalar> DerefMut for Scale<T, $D> {
|
||||||
|
#[inline]
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
self.vector.deref_mut()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
deref_impl!(1, X, x);
|
||||||
|
deref_impl!(2, XY, x, y);
|
||||||
|
deref_impl!(3, XYZ, x, y, z);
|
||||||
|
deref_impl!(4, XYZW, x, y, z, w);
|
||||||
|
deref_impl!(5, XYZWA, x, y, z, w, a);
|
||||||
|
deref_impl!(6, XYZWAB, x, y, z, w, a, b);
|
|
@ -0,0 +1,125 @@
|
||||||
|
use std::ops::{Mul, MulAssign};
|
||||||
|
|
||||||
|
use simba::scalar::ClosedMul;
|
||||||
|
|
||||||
|
use crate::base::constraint::{SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
|
||||||
|
use crate::base::dimension::U1;
|
||||||
|
use crate::base::{Const, SVector, Scalar};
|
||||||
|
|
||||||
|
use crate::geometry::{Point, Scale};
|
||||||
|
|
||||||
|
// Scale × Scale
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: &'b Scale<T, D>, Output = Scale<T, D>;
|
||||||
|
Scale::from(self.vector.component_mul(&right.vector));
|
||||||
|
'a, 'b);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: Scale<T, D>, Output = Scale<T, D>;
|
||||||
|
Scale::from(self.vector.component_mul(&right.vector));
|
||||||
|
'a);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: &'b Scale<T, D>, Output = Scale<T, D>;
|
||||||
|
Scale::from(self.vector.component_mul(&right.vector));
|
||||||
|
'b);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: Scale<T, D>, Output = Scale<T, D>;
|
||||||
|
Scale::from(self.vector.component_mul(&right.vector)); );
|
||||||
|
|
||||||
|
// Scale × scalar
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: T, Output = Scale<T, D>;
|
||||||
|
Scale::from(&self.vector * right);
|
||||||
|
'a);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: T, Output = Scale<T, D>;
|
||||||
|
Scale::from(self.vector * right); );
|
||||||
|
|
||||||
|
// Scale × Point
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: &'b Point<T, D>, Output = Point<T, D>;
|
||||||
|
Point::from(self.vector.component_mul(&right.coords));
|
||||||
|
'a, 'b);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: Point<T, D>, Output = Point<T, D>;
|
||||||
|
Point::from(self.vector.component_mul(&right.coords));
|
||||||
|
'a);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: &'b Point<T, D>, Output = Point<T, D>;
|
||||||
|
Point::from(self.vector.component_mul(&right.coords));
|
||||||
|
'b);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: Point<T, D>, Output = Point<T, D>;
|
||||||
|
Point::from(self.vector.component_mul(&right.coords)); );
|
||||||
|
|
||||||
|
// Scale * Vector
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: &'b SVector<T, D>, Output = SVector<T, D>;
|
||||||
|
SVector::from(self.vector.component_mul(&right));
|
||||||
|
'a, 'b);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: &'a Scale<T, D>, right: SVector<T, D>, Output = SVector<T, D>;
|
||||||
|
SVector::from(self.vector.component_mul(&right));
|
||||||
|
'a);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: &'b SVector<T, D>, Output = SVector<T, D>;
|
||||||
|
SVector::from(self.vector.component_mul(&right));
|
||||||
|
'b);
|
||||||
|
|
||||||
|
add_sub_impl!(Mul, mul, ClosedMul;
|
||||||
|
(Const<D>, U1), (Const<D>, U1) -> (Const<D>, U1)
|
||||||
|
const D; for; where;
|
||||||
|
self: Scale<T, D>, right: SVector<T, D>, Output = SVector<T, D>;
|
||||||
|
SVector::from(self.vector.component_mul(&right)); );
|
||||||
|
|
||||||
|
// Scale *= Scale
|
||||||
|
add_sub_assign_impl!(MulAssign, mul_assign, ClosedMul;
|
||||||
|
const D;
|
||||||
|
self: Scale<T, D>, right: &'b Scale<T, D>;
|
||||||
|
self.vector.component_mul_assign(&right.vector);
|
||||||
|
'b);
|
||||||
|
|
||||||
|
add_sub_assign_impl!(MulAssign, mul_assign, ClosedMul;
|
||||||
|
const D;
|
||||||
|
self: Scale<T, D>, right: Scale<T, D>;
|
||||||
|
self.vector.component_mul_assign(&right.vector); );
|
||||||
|
|
||||||
|
// Scale ×= scalar
|
||||||
|
add_sub_assign_impl!(MulAssign, mul_assign, ClosedMul;
|
||||||
|
const D;
|
||||||
|
self: Scale<T, D>, right: T;
|
||||||
|
self.vector *= right; );
|
|
@ -0,0 +1,49 @@
|
||||||
|
use simba::simd::SimdValue;
|
||||||
|
|
||||||
|
use crate::base::OVector;
|
||||||
|
use crate::Scalar;
|
||||||
|
|
||||||
|
use crate::geometry::Scale;
|
||||||
|
|
||||||
|
impl<T: Scalar + SimdValue, const D: usize> SimdValue for Scale<T, D>
|
||||||
|
where
|
||||||
|
T::Element: Scalar,
|
||||||
|
{
|
||||||
|
type Element = Scale<T::Element, D>;
|
||||||
|
type SimdBool = T::SimdBool;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn lanes() -> usize {
|
||||||
|
T::lanes()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn splat(val: Self::Element) -> Self {
|
||||||
|
OVector::splat(val.vector).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn extract(&self, i: usize) -> Self::Element {
|
||||||
|
self.vector.extract(i).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn extract_unchecked(&self, i: usize) -> Self::Element {
|
||||||
|
self.vector.extract_unchecked(i).into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn replace(&mut self, i: usize, val: Self::Element) {
|
||||||
|
self.vector.replace(i, val.vector)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
unsafe fn replace_unchecked(&mut self, i: usize, val: Self::Element) {
|
||||||
|
self.vector.replace_unchecked(i, val.vector)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn select(self, cond: Self::SimdBool, other: Self) -> Self {
|
||||||
|
self.vector.select(cond, other.vector).into()
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,7 +23,11 @@ use crate::geometry::{AbstractRotation, Isometry, Point, Translation};
|
||||||
|
|
||||||
/// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation.
|
/// A similarity, i.e., an uniform scaling, followed by a rotation, followed by a translation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde-serialize-no-std",
|
feature = "serde-serialize-no-std",
|
||||||
|
@ -73,22 +77,6 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy + Zero, R: AbstractRotation<T, D> + Copy, const D: usize> Copy
|
|
||||||
for Similarity<T, R, D>
|
|
||||||
where
|
|
||||||
Owned<T, Const<D>>: Copy,
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar + Zero, R: AbstractRotation<T, D> + Clone, const D: usize> Clone
|
|
||||||
for Similarity<T, R, D>
|
|
||||||
{
|
|
||||||
#[inline]
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Similarity::from_isometry(self.isometry.clone(), self.scaling.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Scalar + Zero, R, const D: usize> Similarity<T, R, D>
|
impl<T: Scalar + Zero, R, const D: usize> Similarity<T, R, D>
|
||||||
where
|
where
|
||||||
R: AbstractRotation<T, D>,
|
R: AbstractRotation<T, D>,
|
||||||
|
@ -415,7 +403,7 @@ where
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
||||||
self.isometry
|
self.isometry
|
||||||
.ulps_eq(&other.isometry, epsilon.clone(), max_ulps.clone())
|
.ulps_eq(&other.isometry, epsilon.clone(), max_ulps)
|
||||||
&& self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps)
|
&& self.scaling.ulps_eq(&other.scaling, epsilon, max_ulps)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,16 @@ use crate::{
|
||||||
Translation, UnitComplex, UnitQuaternion,
|
Translation, UnitComplex, UnitQuaternion,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
impl<T: SimdRealField, R, const D: usize> Default for Similarity<T, R, D>
|
||||||
|
where
|
||||||
|
T::Element: SimdRealField,
|
||||||
|
R: AbstractRotation<T, D>,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: SimdRealField, R, const D: usize> Similarity<T, R, D>
|
impl<T: SimdRealField, R, const D: usize> Similarity<T, R, D>
|
||||||
where
|
where
|
||||||
T::Element: SimdRealField,
|
T::Element: SimdRealField,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
|
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::fmt::Debug;
|
use std::fmt::{self, Debug};
|
||||||
use std::hash;
|
use std::hash;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
@ -60,14 +60,26 @@ where
|
||||||
|
|
||||||
/// Tag representing the most general (not necessarily inversible) `Transform` type.
|
/// Tag representing the most general (not necessarily inversible) `Transform` type.
|
||||||
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub enum TGeneral {}
|
pub enum TGeneral {}
|
||||||
|
|
||||||
/// Tag representing the most general inversible `Transform` type.
|
/// Tag representing the most general inversible `Transform` type.
|
||||||
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub enum TProjective {}
|
pub enum TProjective {}
|
||||||
|
|
||||||
/// Tag representing an affine `Transform`. Its bottom-row is equal to `(0, 0 ... 0, 1)`.
|
/// Tag representing an affine `Transform`. Its bottom-row is equal to `(0, 0 ... 0, 1)`.
|
||||||
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
pub enum TAffine {}
|
pub enum TAffine {}
|
||||||
|
|
||||||
impl TCategory for TGeneral {
|
impl TCategory for TGeneral {
|
||||||
|
@ -157,7 +169,6 @@ super_tcategory_impl!(
|
||||||
/// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a
|
/// It is stored as a matrix with dimensions `(D + 1, D + 1)`, e.g., it stores a 4x4 matrix for a
|
||||||
/// 3D transformation.
|
/// 3D transformation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Transform<T: RealField, C: TCategory, const D: usize>
|
pub struct Transform<T: RealField, C: TCategory, const D: usize>
|
||||||
where
|
where
|
||||||
Const<D>: DimNameAdd<U1>,
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
@ -167,6 +178,16 @@ where
|
||||||
_phantom: PhantomData<C>,
|
_phantom: PhantomData<C>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: RealField + Debug, C: TCategory, const D: usize> Debug for Transform<T, C, D>
|
||||||
|
where
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
|
||||||
|
{
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.matrix.fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: RealField + hash::Hash, C: TCategory, const D: usize> hash::Hash for Transform<T, C, D>
|
impl<T: RealField + hash::Hash, C: TCategory, const D: usize> hash::Hash for Transform<T, C, D>
|
||||||
where
|
where
|
||||||
Const<D>: DimNameAdd<U1>,
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
@ -186,6 +207,16 @@ where
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
|
||||||
|
unsafe impl<T: RealField + cust::memory::DeviceCopy, C: TCategory, const D: usize>
|
||||||
|
cust::memory::DeviceCopy for Transform<T, C, D>
|
||||||
|
where
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
|
||||||
|
Owned<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>: cust::memory::DeviceCopy,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: RealField, C: TCategory, const D: usize> Clone for Transform<T, C, D>
|
impl<T: RealField, C: TCategory, const D: usize> Clone for Transform<T, C, D>
|
||||||
where
|
where
|
||||||
Const<D>: DimNameAdd<U1>,
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
|
|
@ -8,6 +8,16 @@ use crate::base::{Const, DefaultAllocator, OMatrix};
|
||||||
|
|
||||||
use crate::geometry::{TCategory, Transform};
|
use crate::geometry::{TCategory, Transform};
|
||||||
|
|
||||||
|
impl<T: RealField, C: TCategory, const D: usize> Default for Transform<T, C, D>
|
||||||
|
where
|
||||||
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: RealField, C: TCategory, const D: usize> Transform<T, C, D>
|
impl<T: RealField, C: TCategory, const D: usize> Transform<T, C, D>
|
||||||
where
|
where
|
||||||
Const<D>: DimNameAdd<U1>,
|
Const<D>: DimNameAdd<U1>,
|
||||||
|
|
|
@ -22,13 +22,23 @@ use crate::geometry::Point;
|
||||||
|
|
||||||
/// A translation.
|
/// A translation.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Debug)]
|
#[cfg_attr(
|
||||||
|
all(not(target_os = "cuda"), feature = "cuda"),
|
||||||
|
derive(cust::DeviceCopy)
|
||||||
|
)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
pub struct Translation<T, const D: usize> {
|
pub struct Translation<T, const D: usize> {
|
||||||
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
|
/// The translation coordinates, i.e., how much is added to a point's coordinates when it is
|
||||||
/// translated.
|
/// translated.
|
||||||
pub vector: SVector<T, D>,
|
pub vector: SVector<T, D>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug, const D: usize> fmt::Debug for Translation<T, D> {
|
||||||
|
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
self.vector.as_slice().fmt(formatter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Translation<T, D>
|
impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Translation<T, D>
|
||||||
where
|
where
|
||||||
Owned<T, Const<D>>: hash::Hash,
|
Owned<T, Const<D>>: hash::Hash,
|
||||||
|
@ -38,18 +48,6 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Scalar + Copy, const D: usize> Copy for Translation<T, D> {}
|
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> Clone for Translation<T, D>
|
|
||||||
where
|
|
||||||
Owned<T, Const<D>>: Clone,
|
|
||||||
{
|
|
||||||
#[inline]
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Translation::from(self.vector.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "bytemuck")]
|
#[cfg(feature = "bytemuck")]
|
||||||
unsafe impl<T, const D: usize> bytemuck::Zeroable for Translation<T, D>
|
unsafe impl<T, const D: usize> bytemuck::Zeroable for Translation<T, D>
|
||||||
where
|
where
|
||||||
|
|
|
@ -15,6 +15,12 @@ use simba::scalar::{ClosedAdd, SupersetOf};
|
||||||
use crate::base::{SVector, Scalar};
|
use crate::base::{SVector, Scalar};
|
||||||
use crate::geometry::Translation;
|
use crate::geometry::Translation;
|
||||||
|
|
||||||
|
impl<T: Scalar + Zero, const D: usize> Default for Translation<T, D> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Scalar, const D: usize> Translation<T, D> {
|
impl<T: Scalar, const D: usize> Translation<T, D> {
|
||||||
/// Creates a new identity translation.
|
/// Creates a new identity translation.
|
||||||
///
|
///
|
||||||
|
|
|
@ -31,6 +31,9 @@ use std::cmp::{Eq, PartialEq};
|
||||||
/// * [Conversion to a matrix <span style="float:right;">`to_rotation_matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
/// * [Conversion to a matrix <span style="float:right;">`to_rotation_matrix`, `to_homogeneous`…</span>](#conversion-to-a-matrix)
|
||||||
pub type UnitComplex<T> = Unit<Complex<T>>;
|
pub type UnitComplex<T> = Unit<Complex<T>>;
|
||||||
|
|
||||||
|
#[cfg(all(not(target_os = "cuda"), feature = "cuda"))]
|
||||||
|
unsafe impl<T: cust::memory::DeviceCopy> cust::memory::DeviceCopy for UnitComplex<T> {}
|
||||||
|
|
||||||
impl<T: Scalar + PartialEq> PartialEq for UnitComplex<T> {
|
impl<T: Scalar + PartialEq> PartialEq for UnitComplex<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn eq(&self, rhs: &Self) -> bool {
|
fn eq(&self, rhs: &Self) -> bool {
|
||||||
|
@ -458,8 +461,7 @@ impl<T: RealField> UlpsEq for UnitComplex<T> {
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
|
||||||
self.re
|
self.re.ulps_eq(&other.re, epsilon.clone(), max_ulps)
|
||||||
.ulps_eq(&other.re, epsilon.clone(), max_ulps.clone())
|
|
||||||
&& self.im.ulps_eq(&other.im, epsilon, max_ulps)
|
&& self.im.ulps_eq(&other.im, epsilon, max_ulps)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,15 @@ use crate::geometry::{Rotation2, UnitComplex};
|
||||||
use simba::scalar::{RealField, SupersetOf};
|
use simba::scalar::{RealField, SupersetOf};
|
||||||
use simba::simd::SimdRealField;
|
use simba::simd::SimdRealField;
|
||||||
|
|
||||||
|
impl<T: SimdRealField> Default for UnitComplex<T>
|
||||||
|
where
|
||||||
|
T::Element: SimdRealField,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::identity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// # Identity
|
/// # Identity
|
||||||
impl<T: SimdRealField> UnitComplex<T>
|
impl<T: SimdRealField> UnitComplex<T>
|
||||||
where
|
where
|
||||||
|
|
|
@ -71,10 +71,11 @@ an optimized set of tools for computer graphics and physics. Those features incl
|
||||||
* Insertion and removal of rows of columns of a matrix.
|
* Insertion and removal of rows of columns of a matrix.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#![allow(unused_variables, unused_mut)]
|
|
||||||
#![deny(
|
#![deny(
|
||||||
missing_docs,
|
missing_docs,
|
||||||
nonstandard_style,
|
nonstandard_style,
|
||||||
|
unused_variables,
|
||||||
|
unused_mut,
|
||||||
unused_parens,
|
unused_parens,
|
||||||
unused_qualifications,
|
unused_qualifications,
|
||||||
unused_results,
|
unused_results,
|
||||||
|
|
|
@ -74,6 +74,14 @@ where
|
||||||
Cholesky { chol: matrix }
|
Cholesky { chol: matrix }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Uses the given matrix as-is without any checks or modifications as the
|
||||||
|
/// Cholesky decomposition.
|
||||||
|
///
|
||||||
|
/// It is up to the user to ensure all invariants hold.
|
||||||
|
pub fn pack_dirty(matrix: OMatrix<T, D, D>) -> Self {
|
||||||
|
Cholesky { chol: matrix }
|
||||||
|
}
|
||||||
|
|
||||||
/// Retrieves the lower-triangular factor of the Cholesky decomposition with its strictly
|
/// Retrieves the lower-triangular factor of the Cholesky decomposition with its strictly
|
||||||
/// upper-triangular part filled with zeros.
|
/// upper-triangular part filled with zeros.
|
||||||
pub fn unpack(mut self) -> OMatrix<T, D, D> {
|
pub fn unpack(mut self) -> OMatrix<T, D, D> {
|
||||||
|
@ -163,7 +171,32 @@ where
|
||||||
///
|
///
|
||||||
/// Returns `None` if the input matrix is not definite-positive. The input matrix is assumed
|
/// Returns `None` if the input matrix is not definite-positive. The input matrix is assumed
|
||||||
/// to be symmetric and only the lower-triangular part is read.
|
/// to be symmetric and only the lower-triangular part is read.
|
||||||
pub fn new(mut matrix: OMatrix<T, D, D>) -> Option<Self> {
|
pub fn new(matrix: OMatrix<T, D, D>) -> Option<Self> {
|
||||||
|
Self::new_internal(matrix, None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to approximate the Cholesky decomposition of `matrix` by
|
||||||
|
/// replacing non-positive values on the diagonals during the decomposition
|
||||||
|
/// with the given `substitute`.
|
||||||
|
///
|
||||||
|
/// [`try_sqrt`](ComplexField::try_sqrt) will be applied to the `substitute`
|
||||||
|
/// when it has to be used.
|
||||||
|
///
|
||||||
|
/// If your input matrix results only in positive values on the diagonals
|
||||||
|
/// during the decomposition, `substitute` is unused and the result is just
|
||||||
|
/// the same as if you used [`new`](Cholesky::new).
|
||||||
|
///
|
||||||
|
/// This method allows to compensate for matrices with very small or even
|
||||||
|
/// negative values due to numerical errors but necessarily results in only
|
||||||
|
/// an approximation: it is basically a hack. If you don't specifically need
|
||||||
|
/// Cholesky, it may be better to consider alternatives like the
|
||||||
|
/// [`LU`](crate::linalg::LU) decomposition/factorization.
|
||||||
|
pub fn new_with_substitute(matrix: OMatrix<T, D, D>, substitute: T) -> Option<Self> {
|
||||||
|
Self::new_internal(matrix, Some(substitute))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Common implementation for `new` and `new_with_substitute`.
|
||||||
|
fn new_internal(mut matrix: OMatrix<T, D, D>, substitute: Option<T>) -> Option<Self> {
|
||||||
assert!(matrix.is_square(), "The input matrix must be square.");
|
assert!(matrix.is_square(), "The input matrix must be square.");
|
||||||
|
|
||||||
let n = matrix.nrows();
|
let n = matrix.nrows();
|
||||||
|
@ -179,17 +212,25 @@ where
|
||||||
col_j.axpy(factor.conjugate(), &col_k, T::one());
|
col_j.axpy(factor.conjugate(), &col_k, T::one());
|
||||||
}
|
}
|
||||||
|
|
||||||
let diag = unsafe { matrix.get_unchecked((j, j)).clone() };
|
let sqrt_denom = |v: T| {
|
||||||
if !diag.is_zero() {
|
if v.is_zero() {
|
||||||
if let Some(denom) = diag.try_sqrt() {
|
return None;
|
||||||
unsafe {
|
|
||||||
*matrix.get_unchecked_mut((j, j)) = denom.clone();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut col = matrix.slice_range_mut(j + 1.., j);
|
|
||||||
col /= denom;
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
v.try_sqrt()
|
||||||
|
};
|
||||||
|
|
||||||
|
let diag = unsafe { matrix.get_unchecked((j, j)).clone() };
|
||||||
|
|
||||||
|
if let Some(denom) =
|
||||||
|
sqrt_denom(diag).or_else(|| substitute.clone().and_then(sqrt_denom))
|
||||||
|
{
|
||||||
|
unsafe {
|
||||||
|
*matrix.get_unchecked_mut((j, j)) = denom.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut col = matrix.slice_range_mut(j + 1.., j);
|
||||||
|
col /= denom;
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The diagonal element is either zero or its square root could not
|
// The diagonal element is either zero or its square root could not
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use crate::storage::Storage;
|
use crate::storage::Storage;
|
||||||
use crate::{
|
use crate::{
|
||||||
Allocator, Bidiagonal, Cholesky, ColPivQR, ComplexField, DefaultAllocator, Dim, DimDiff,
|
Allocator, Bidiagonal, Cholesky, ColPivQR, ComplexField, DefaultAllocator, Dim, DimDiff,
|
||||||
DimMin, DimMinimum, DimSub, FullPivLU, Hessenberg, Matrix, RealField, Schur, SymmetricEigen,
|
DimMin, DimMinimum, DimSub, FullPivLU, Hessenberg, Matrix, OMatrix, RealField, Schur,
|
||||||
SymmetricTridiagonal, LU, QR, SVD, U1, UDU,
|
SymmetricEigen, SymmetricTridiagonal, LU, QR, SVD, U1, UDU,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// # Rectangular matrix decomposition
|
/// # Rectangular matrix decomposition
|
||||||
|
@ -17,6 +17,7 @@ use crate::{
|
||||||
/// | LU with partial pivoting | `P⁻¹ * L * U` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` is a permutation matrix. |
|
/// | LU with partial pivoting | `P⁻¹ * L * U` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` is a permutation matrix. |
|
||||||
/// | LU with full pivoting | `P⁻¹ * L * U * Q⁻¹` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` and `Q` are permutation matrices. |
|
/// | LU with full pivoting | `P⁻¹ * L * U * Q⁻¹` | `L` is lower-triangular with a diagonal filled with `1` and `U` is upper-triangular. `P` and `Q` are permutation matrices. |
|
||||||
/// | SVD | `U * Σ * Vᵀ` | `U` and `V` are two orthogonal matrices and `Σ` is a diagonal matrix containing the singular values. |
|
/// | SVD | `U * Σ * Vᵀ` | `U` and `V` are two orthogonal matrices and `Σ` is a diagonal matrix containing the singular values. |
|
||||||
|
/// | Polar (Left Polar) | `P' * U` | `U` is semi-unitary/unitary and `P'` is a positive semi-definite Hermitian Matrix
|
||||||
impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
/// Computes the bidiagonalization using householder reflections.
|
/// Computes the bidiagonalization using householder reflections.
|
||||||
pub fn bidiagonalize(self) -> Bidiagonal<T, R, C>
|
pub fn bidiagonalize(self) -> Bidiagonal<T, R, C>
|
||||||
|
@ -74,7 +75,31 @@ impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the Singular Value Decomposition using implicit shift.
|
/// Computes the Singular Value Decomposition using implicit shift.
|
||||||
|
/// The singular values are guaranteed to be sorted in descending order.
|
||||||
|
/// If this order is not required consider using `svd_unordered`.
|
||||||
pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD<T, R, C>
|
pub fn svd(self, compute_u: bool, compute_v: bool) -> SVD<T, R, C>
|
||||||
|
where
|
||||||
|
R: DimMin<C>,
|
||||||
|
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
+ Allocator<T, C>
|
||||||
|
+ Allocator<T, R>
|
||||||
|
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, C>
|
||||||
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
|
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>,
|
||||||
|
{
|
||||||
|
SVD::new(self.into_owned(), compute_u, compute_v)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the Singular Value Decomposition using implicit shift.
|
||||||
|
/// The singular values are not guaranteed to be sorted in any particular order.
|
||||||
|
/// If a descending order is required, consider using `svd` instead.
|
||||||
|
pub fn svd_unordered(self, compute_u: bool, compute_v: bool) -> SVD<T, R, C>
|
||||||
where
|
where
|
||||||
R: DimMin<C>,
|
R: DimMin<C>,
|
||||||
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
||||||
|
@ -88,10 +113,12 @@ impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
+ Allocator<T::RealField, DimMinimum<R, C>>
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
||||||
{
|
{
|
||||||
SVD::new(self.into_owned(), compute_u, compute_v)
|
SVD::new_unordered(self.into_owned(), compute_u, compute_v)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
|
/// The singular values are guaranteed to be sorted in descending order.
|
||||||
|
/// If this order is not required consider using `try_svd_unordered`.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
|
@ -119,10 +146,103 @@ impl<T: ComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
|
||||||
+ Allocator<T, R, DimMinimum<R, C>>
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
+ Allocator<T, DimMinimum<R, C>>
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
+ Allocator<T::RealField, DimMinimum<R, C>>
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
|
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>,
|
||||||
{
|
{
|
||||||
SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter)
|
SVD::try_new(self.into_owned(), compute_u, compute_v, eps, max_niter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
|
/// The singular values are not guaranteed to be sorted in any particular order.
|
||||||
|
/// If a descending order is required, consider using `try_svd` instead.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `compute_u` − set this to `true` to enable the computation of left-singular vectors.
|
||||||
|
/// * `compute_v` − set this to `true` to enable the computation of right-singular vectors.
|
||||||
|
/// * `eps` − tolerance used to determine when a value converged to 0.
|
||||||
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
|
/// continues indefinitely until convergence.
|
||||||
|
pub fn try_svd_unordered(
|
||||||
|
self,
|
||||||
|
compute_u: bool,
|
||||||
|
compute_v: bool,
|
||||||
|
eps: T::RealField,
|
||||||
|
max_niter: usize,
|
||||||
|
) -> Option<SVD<T, R, C>>
|
||||||
|
where
|
||||||
|
R: DimMin<C>,
|
||||||
|
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
+ Allocator<T, C>
|
||||||
|
+ Allocator<T, R>
|
||||||
|
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, C>
|
||||||
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
||||||
|
{
|
||||||
|
SVD::try_new_unordered(self.into_owned(), compute_u, compute_v, eps, max_niter)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the Polar Decomposition of a `matrix` (indirectly uses SVD).
|
||||||
|
pub fn polar(self) -> (OMatrix<T, R, R>, OMatrix<T, R, C>)
|
||||||
|
where
|
||||||
|
R: DimMin<C>,
|
||||||
|
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, R>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, R, R>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, C>
|
||||||
|
+ Allocator<T, R>
|
||||||
|
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, C>
|
||||||
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
||||||
|
{
|
||||||
|
SVD::new_unordered(self.into_owned(), true, true)
|
||||||
|
.to_polar()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to compute the Polar Decomposition of a `matrix` (indirectly uses SVD).
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `eps` − tolerance used to determine when a value converged to 0 when computing the SVD.
|
||||||
|
/// * `max_niter` − maximum total number of iterations performed by the SVD computation algorithm.
|
||||||
|
pub fn try_polar(
|
||||||
|
self,
|
||||||
|
eps: T::RealField,
|
||||||
|
max_niter: usize,
|
||||||
|
) -> Option<(OMatrix<T, R, R>, OMatrix<T, R, C>)>
|
||||||
|
where
|
||||||
|
R: DimMin<C>,
|
||||||
|
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, R>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, R, R>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, C>
|
||||||
|
+ Allocator<T, R>
|
||||||
|
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, C>
|
||||||
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
||||||
|
{
|
||||||
|
SVD::try_new_unordered(self.into_owned(), true, true, eps, max_niter)
|
||||||
|
.and_then(|svd| svd.to_polar())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # Square matrix decomposition
|
/// # Square matrix decomposition
|
||||||
|
|
|
@ -49,7 +49,7 @@ impl<T: ComplexField, D: DimMin<D, Output = D>, S: Storage<T, D, D>> SquareMatri
|
||||||
let m33 = self.get_unchecked((2, 2)).clone();
|
let m33 = self.get_unchecked((2, 2)).clone();
|
||||||
|
|
||||||
let minor_m12_m23 = m22.clone() * m33.clone() - m32.clone() * m23.clone();
|
let minor_m12_m23 = m22.clone() * m33.clone() - m32.clone() * m23.clone();
|
||||||
let minor_m11_m23 = m21.clone() * m33.clone() - m31.clone() * m23.clone();
|
let minor_m11_m23 = m21.clone() * m33 - m31.clone() * m23;
|
||||||
let minor_m11_m22 = m21 * m32 - m31 * m22;
|
let minor_m11_m22 = m21 * m32 - m31 * m22;
|
||||||
|
|
||||||
m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22
|
m11 * minor_m12_m23 - m12 * minor_m11_m23 + m13 * minor_m11_m22
|
||||||
|
|
|
@ -11,6 +11,47 @@ use crate::{
|
||||||
|
|
||||||
use crate::num::Zero;
|
use crate::num::Zero;
|
||||||
|
|
||||||
|
/// Precomputed factorials for integers in range `0..=34`.
|
||||||
|
/// Note: `35!` does not fit into 128 bits.
|
||||||
|
// TODO: find a better place for this array?
|
||||||
|
const FACTORIAL: [u128; 35] = [
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
6,
|
||||||
|
24,
|
||||||
|
120,
|
||||||
|
720,
|
||||||
|
5040,
|
||||||
|
40320,
|
||||||
|
362880,
|
||||||
|
3628800,
|
||||||
|
39916800,
|
||||||
|
479001600,
|
||||||
|
6227020800,
|
||||||
|
87178291200,
|
||||||
|
1307674368000,
|
||||||
|
20922789888000,
|
||||||
|
355687428096000,
|
||||||
|
6402373705728000,
|
||||||
|
121645100408832000,
|
||||||
|
2432902008176640000,
|
||||||
|
51090942171709440000,
|
||||||
|
1124000727777607680000,
|
||||||
|
25852016738884976640000,
|
||||||
|
620448401733239439360000,
|
||||||
|
15511210043330985984000000,
|
||||||
|
403291461126605635584000000,
|
||||||
|
10888869450418352160768000000,
|
||||||
|
304888344611713860501504000000,
|
||||||
|
8841761993739701954543616000000,
|
||||||
|
265252859812191058636308480000000,
|
||||||
|
8222838654177922817725562880000000,
|
||||||
|
263130836933693530167218012160000000,
|
||||||
|
8683317618811886495518194401280000000,
|
||||||
|
295232799039604140847618609643520000000,
|
||||||
|
];
|
||||||
|
|
||||||
// https://github.com/scipy/scipy/blob/c1372d8aa90a73d8a52f135529293ff4edb98fc8/scipy/sparse/linalg/matfuncs.py
|
// https://github.com/scipy/scipy/blob/c1372d8aa90a73d8a52f135529293ff4edb98fc8/scipy/sparse/linalg/matfuncs.py
|
||||||
struct ExpmPadeHelper<T, D>
|
struct ExpmPadeHelper<T, D>
|
||||||
where
|
where
|
||||||
|
@ -321,8 +362,8 @@ where
|
||||||
self.calc_a2();
|
self.calc_a2();
|
||||||
self.calc_a4();
|
self.calc_a4();
|
||||||
self.calc_a6();
|
self.calc_a6();
|
||||||
let mb2 = self.a2.as_ref().unwrap() * convert::<f64, T>(2.0_f64.powf(-2.0 * s.clone()));
|
let mb2 = self.a2.as_ref().unwrap() * convert::<f64, T>(2.0_f64.powf(-2.0 * s));
|
||||||
let mb4 = self.a4.as_ref().unwrap() * convert::<f64, T>(2.0.powf(-4.0 * s.clone()));
|
let mb4 = self.a4.as_ref().unwrap() * convert::<f64, T>(2.0.powf(-4.0 * s));
|
||||||
let mb6 = self.a6.as_ref().unwrap() * convert::<f64, T>(2.0.powf(-6.0 * s));
|
let mb6 = self.a6.as_ref().unwrap() * convert::<f64, T>(2.0.powf(-6.0 * s));
|
||||||
|
|
||||||
let u2 = &mb6 * (&mb6 * b[13].clone() + &mb4 * b[11].clone() + &mb2 * b[9].clone());
|
let u2 = &mb6 * (&mb6 * b[13].clone() + &mb4 * b[11].clone() + &mb2 * b[9].clone());
|
||||||
|
@ -342,15 +383,17 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn factorial(n: u128) -> u128 {
|
/// Compute `n!`
|
||||||
if n == 1 {
|
#[inline(always)]
|
||||||
return 1;
|
fn factorial(n: usize) -> u128 {
|
||||||
|
match FACTORIAL.get(n) {
|
||||||
|
Some(f) => *f,
|
||||||
|
None => panic!("{}! is greater than u128::MAX", n),
|
||||||
}
|
}
|
||||||
n * factorial(n - 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Compute the 1-norm of a non-negative integer power of a non-negative matrix.
|
/// Compute the 1-norm of a non-negative integer power of a non-negative matrix.
|
||||||
fn onenorm_matrix_power_nonm<T, D>(a: &OMatrix<T, D, D>, p: u64) -> T
|
fn onenorm_matrix_power_nonm<T, D>(a: &OMatrix<T, D, D>, p: usize) -> T
|
||||||
where
|
where
|
||||||
T: RealField,
|
T: RealField,
|
||||||
D: Dim,
|
D: Dim,
|
||||||
|
@ -367,7 +410,7 @@ where
|
||||||
v.max()
|
v.max()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ell<T, D>(a: &OMatrix<T, D, D>, m: u64) -> u64
|
fn ell<T, D>(a: &OMatrix<T, D, D>, m: usize) -> u64
|
||||||
where
|
where
|
||||||
T: ComplexField,
|
T: ComplexField,
|
||||||
D: Dim,
|
D: Dim,
|
||||||
|
@ -376,8 +419,6 @@ where
|
||||||
+ Allocator<T::RealField, D>
|
+ Allocator<T::RealField, D>
|
||||||
+ Allocator<T::RealField, D, D>,
|
+ Allocator<T::RealField, D, D>,
|
||||||
{
|
{
|
||||||
// 2m choose m = (2m)!/(m! * (2m-m)!)
|
|
||||||
|
|
||||||
let a_abs = a.map(|x| x.abs());
|
let a_abs = a.map(|x| x.abs());
|
||||||
|
|
||||||
let a_abs_onenorm = onenorm_matrix_power_nonm(&a_abs, 2 * m + 1);
|
let a_abs_onenorm = onenorm_matrix_power_nonm(&a_abs, 2 * m + 1);
|
||||||
|
@ -386,9 +427,11 @@ where
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let choose_2m_m =
|
// 2m choose m = (2m)!/(m! * (2m-m)!) = (2m)!/((m!)^2)
|
||||||
factorial(2 * m as u128) / (factorial(m as u128) * factorial(2 * m as u128 - m as u128));
|
let m_factorial = factorial(m);
|
||||||
let abs_c_recip = choose_2m_m * factorial(2 * m as u128 + 1);
|
let choose_2m_m = factorial(2 * m) / (m_factorial * m_factorial);
|
||||||
|
|
||||||
|
let abs_c_recip = choose_2m_m * factorial(2 * m + 1);
|
||||||
let alpha = a_abs_onenorm / one_norm(a);
|
let alpha = a_abs_onenorm / one_norm(a);
|
||||||
let alpha: f64 = try_convert(alpha).unwrap() / abs_c_recip as f64;
|
let alpha: f64 = try_convert(alpha).unwrap() / abs_c_recip as f64;
|
||||||
|
|
||||||
|
@ -510,6 +553,7 @@ where
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
fn one_norm() {
|
fn one_norm() {
|
||||||
use crate::Matrix3;
|
use crate::Matrix3;
|
||||||
let m = Matrix3::new(-3.0, 5.0, 7.0, 2.0, 6.0, 4.0, 0.0, 2.0, 8.0);
|
let m = Matrix3::new(-3.0, 5.0, 7.0, 2.0, 6.0, 4.0, 0.0, 2.0, 8.0);
|
||||||
|
|
|
@ -47,7 +47,7 @@ impl<T: ComplexField> GivensRotation<T> {
|
||||||
if denom > eps {
|
if denom > eps {
|
||||||
let norm = sign0.scale(denom.clone());
|
let norm = sign0.scale(denom.clone());
|
||||||
let c = mod0 / denom;
|
let c = mod0 / denom;
|
||||||
let s = s.clone() / norm.clone();
|
let s = s / norm.clone();
|
||||||
Some((Self { c, s }, norm))
|
Some((Self { c, s }, norm))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
|
|
@ -317,7 +317,7 @@ where
|
||||||
pub fn is_invertible(&self) -> bool {
|
pub fn is_invertible(&self) -> bool {
|
||||||
assert!(
|
assert!(
|
||||||
self.lu.is_square(),
|
self.lu.is_square(),
|
||||||
"QR: unable to test the invertibility of a non-square matrix."
|
"LU: unable to test the invertibility of a non-square matrix."
|
||||||
);
|
);
|
||||||
|
|
||||||
for i in 0..self.lu.nrows() {
|
for i in 0..self.lu.nrows() {
|
||||||
|
|
|
@ -24,6 +24,8 @@ mod qr;
|
||||||
mod schur;
|
mod schur;
|
||||||
mod solve;
|
mod solve;
|
||||||
mod svd;
|
mod svd;
|
||||||
|
mod svd2;
|
||||||
|
mod svd3;
|
||||||
mod symmetric_eigen;
|
mod symmetric_eigen;
|
||||||
mod symmetric_tridiagonal;
|
mod symmetric_tridiagonal;
|
||||||
mod udu;
|
mod udu;
|
||||||
|
|
|
@ -1,83 +1,71 @@
|
||||||
//! This module provides the matrix exponential (pow) function to square matrices.
|
//! This module provides the matrix exponential (pow) function to square matrices.
|
||||||
|
|
||||||
use std::ops::DivAssign;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
allocator::Allocator,
|
allocator::Allocator,
|
||||||
storage::{Storage, StorageMut},
|
storage::{Storage, StorageMut},
|
||||||
DefaultAllocator, DimMin, Matrix, OMatrix,
|
DefaultAllocator, DimMin, Matrix, OMatrix, Scalar,
|
||||||
};
|
};
|
||||||
use num::PrimInt;
|
use num::{One, Zero};
|
||||||
use simba::scalar::ComplexField;
|
use simba::scalar::{ClosedAdd, ClosedMul};
|
||||||
|
|
||||||
impl<T: ComplexField, D, S> Matrix<T, D, D, S>
|
impl<T, D, S> Matrix<T, D, D, S>
|
||||||
where
|
where
|
||||||
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||||
D: DimMin<D, Output = D>,
|
D: DimMin<D, Output = D>,
|
||||||
S: StorageMut<T, D, D>,
|
S: StorageMut<T, D, D>,
|
||||||
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
{
|
{
|
||||||
/// Attempts to raise this matrix to an integral power `e` in-place. If this
|
/// Raises this matrix to an integral power `exp` in-place.
|
||||||
/// matrix is non-invertible and `e` is negative, it leaves this matrix
|
pub fn pow_mut(&mut self, mut exp: u32) {
|
||||||
/// untouched and returns `false`. Otherwise, it returns `true` and
|
|
||||||
/// overwrites this matrix with the result.
|
|
||||||
pub fn pow_mut<I: PrimInt + DivAssign>(&mut self, mut e: I) -> bool {
|
|
||||||
let zero = I::zero();
|
|
||||||
|
|
||||||
// A matrix raised to the zeroth power is just the identity.
|
// A matrix raised to the zeroth power is just the identity.
|
||||||
if e == zero {
|
if exp == 0 {
|
||||||
self.fill_with_identity();
|
self.fill_with_identity();
|
||||||
return true;
|
} else if exp > 1 {
|
||||||
}
|
// We use the buffer to hold the result of multiplier^2, thus avoiding
|
||||||
|
// extra allocations.
|
||||||
|
let mut x = self.clone_owned();
|
||||||
|
let mut workspace = self.clone_owned();
|
||||||
|
|
||||||
// If e is negative, we compute the inverse matrix, then raise it to the
|
if exp % 2 == 0 {
|
||||||
// power of -e.
|
self.fill_with_identity();
|
||||||
if e < zero && !self.try_inverse_mut() {
|
} else {
|
||||||
return false;
|
// Avoid an useless multiplication by the identity
|
||||||
}
|
// if the exponent is odd.
|
||||||
|
exp -= 1;
|
||||||
let one = I::one();
|
|
||||||
let two = I::from(2u8).unwrap();
|
|
||||||
|
|
||||||
// We use the buffer to hold the result of multiplier ^ 2, thus avoiding
|
|
||||||
// extra allocations.
|
|
||||||
let mut multiplier = self.clone_owned();
|
|
||||||
let mut buf = self.clone_owned();
|
|
||||||
|
|
||||||
// Exponentiation by squares.
|
|
||||||
loop {
|
|
||||||
if e % two == one {
|
|
||||||
self.mul_to(&multiplier, &mut buf);
|
|
||||||
self.copy_from(&buf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
e /= two;
|
// Exponentiation by squares.
|
||||||
multiplier.mul_to(&multiplier, &mut buf);
|
loop {
|
||||||
multiplier.copy_from(&buf);
|
if exp % 2 == 1 {
|
||||||
|
self.mul_to(&x, &mut workspace);
|
||||||
|
self.copy_from(&workspace);
|
||||||
|
}
|
||||||
|
|
||||||
if e == zero {
|
exp /= 2;
|
||||||
return true;
|
|
||||||
|
if exp == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
x.mul_to(&x, &mut workspace);
|
||||||
|
x.copy_from(&workspace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ComplexField, D, S: Storage<T, D, D>> Matrix<T, D, D, S>
|
impl<T, D, S: Storage<T, D, D>> Matrix<T, D, D, S>
|
||||||
where
|
where
|
||||||
|
T: Scalar + Zero + One + ClosedAdd + ClosedMul,
|
||||||
D: DimMin<D, Output = D>,
|
D: DimMin<D, Output = D>,
|
||||||
S: StorageMut<T, D, D>,
|
S: StorageMut<T, D, D>,
|
||||||
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
DefaultAllocator: Allocator<T, D, D> + Allocator<T, D>,
|
||||||
{
|
{
|
||||||
/// Attempts to raise this matrix to an integral power `e`. If this matrix
|
/// Raise this matrix to an integral power `exp`.
|
||||||
/// is non-invertible and `e` is negative, it returns `None`. Otherwise, it
|
|
||||||
/// returns the result as a new matrix. Uses exponentiation by squares.
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn pow<I: PrimInt + DivAssign>(&self, e: I) -> Option<OMatrix<T, D, D>> {
|
pub fn pow(&self, exp: u32) -> OMatrix<T, D, D> {
|
||||||
let mut clone = self.clone_owned();
|
let mut result = self.clone_owned();
|
||||||
|
result.pow_mut(exp);
|
||||||
if clone.pow_mut(e) {
|
result
|
||||||
Some(clone)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -147,6 +147,11 @@ where
|
||||||
&self.qr
|
&self.qr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub(crate) fn diag_internal(&self) -> &OVector<T, DimMinimum<R, C>> {
|
||||||
|
&self.diag
|
||||||
|
}
|
||||||
|
|
||||||
/// Multiplies the provided matrix by the transpose of the `Q` matrix of this decomposition.
|
/// Multiplies the provided matrix by the transpose of the `Q` matrix of this decomposition.
|
||||||
pub fn q_tr_mul<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<T, R2, C2, S2>)
|
pub fn q_tr_mul<R2: Dim, C2: Dim, S2>(&self, rhs: &mut Matrix<T, R2, C2, S2>)
|
||||||
// TODO: do we need a static constraint on the number of rows of rhs?
|
// TODO: do we need a static constraint on the number of rows of rhs?
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
#[cfg(feature = "serde-serialize-no-std")]
|
#[cfg(feature = "serde-serialize-no-std")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::any::TypeId;
|
||||||
|
|
||||||
use approx::AbsDiffEq;
|
use approx::AbsDiffEq;
|
||||||
use num::{One, Zero};
|
use num::{One, Zero};
|
||||||
|
@ -9,6 +10,7 @@ use crate::base::{DefaultAllocator, Matrix, Matrix2x3, OMatrix, OVector, Vector2
|
||||||
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
|
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
|
||||||
use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1};
|
use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1};
|
||||||
use crate::storage::Storage;
|
use crate::storage::Storage;
|
||||||
|
use crate::{Matrix2, Matrix3, RawStorage, U2, U3};
|
||||||
use simba::scalar::{ComplexField, RealField};
|
use simba::scalar::{ComplexField, RealField};
|
||||||
|
|
||||||
use crate::linalg::givens::GivensRotation;
|
use crate::linalg::givens::GivensRotation;
|
||||||
|
@ -78,9 +80,21 @@ where
|
||||||
+ Allocator<T::RealField, DimMinimum<R, C>>
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
||||||
{
|
{
|
||||||
|
fn use_special_always_ordered_svd2() -> bool {
|
||||||
|
TypeId::of::<OMatrix<T, R, C>>() == TypeId::of::<Matrix2<T::RealField>>()
|
||||||
|
&& TypeId::of::<Self>() == TypeId::of::<SVD<T::RealField, U2, U2>>()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn use_special_always_ordered_svd3() -> bool {
|
||||||
|
TypeId::of::<OMatrix<T, R, C>>() == TypeId::of::<Matrix3<T::RealField>>()
|
||||||
|
&& TypeId::of::<Self>() == TypeId::of::<SVD<T::RealField, U3, U3>>()
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes the Singular Value Decomposition of `matrix` using implicit shift.
|
/// Computes the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
pub fn new(matrix: OMatrix<T, R, C>, compute_u: bool, compute_v: bool) -> Self {
|
/// The singular values are not guaranteed to be sorted in any particular order.
|
||||||
Self::try_new(
|
/// If a descending order is required, consider using `new` instead.
|
||||||
|
pub fn new_unordered(matrix: OMatrix<T, R, C>, compute_u: bool, compute_v: bool) -> Self {
|
||||||
|
Self::try_new_unordered(
|
||||||
matrix,
|
matrix,
|
||||||
compute_u,
|
compute_u,
|
||||||
compute_v,
|
compute_v,
|
||||||
|
@ -91,6 +105,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
|
/// The singular values are not guaranteed to be sorted in any particular order.
|
||||||
|
/// If a descending order is required, consider using `try_new` instead.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
|
@ -100,7 +116,7 @@ where
|
||||||
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
/// continues indefinitely until convergence.
|
/// continues indefinitely until convergence.
|
||||||
pub fn try_new(
|
pub fn try_new_unordered(
|
||||||
mut matrix: OMatrix<T, R, C>,
|
mut matrix: OMatrix<T, R, C>,
|
||||||
compute_u: bool,
|
compute_u: bool,
|
||||||
compute_v: bool,
|
compute_v: bool,
|
||||||
|
@ -113,6 +129,21 @@ where
|
||||||
);
|
);
|
||||||
let (nrows, ncols) = matrix.shape_generic();
|
let (nrows, ncols) = matrix.shape_generic();
|
||||||
let min_nrows_ncols = nrows.min(ncols);
|
let min_nrows_ncols = nrows.min(ncols);
|
||||||
|
|
||||||
|
if Self::use_special_always_ordered_svd2() {
|
||||||
|
// SAFETY: the reference transmutes are OK since we checked that the types match exactly.
|
||||||
|
let matrix: &Matrix2<T::RealField> = unsafe { std::mem::transmute(&matrix) };
|
||||||
|
let result = super::svd2::svd_ordered2(matrix, compute_u, compute_v);
|
||||||
|
let typed_result: &Self = unsafe { std::mem::transmute(&result) };
|
||||||
|
return Some(typed_result.clone());
|
||||||
|
} else if Self::use_special_always_ordered_svd3() {
|
||||||
|
// SAFETY: the reference transmutes are OK since we checked that the types match exactly.
|
||||||
|
let matrix: &Matrix3<T::RealField> = unsafe { std::mem::transmute(&matrix) };
|
||||||
|
let result = super::svd3::svd_ordered3(matrix, compute_u, compute_v, eps, max_niter);
|
||||||
|
let typed_result: &Self = unsafe { std::mem::transmute(&result) };
|
||||||
|
return Some(typed_result.clone());
|
||||||
|
}
|
||||||
|
|
||||||
let dim = min_nrows_ncols.value();
|
let dim = min_nrows_ncols.value();
|
||||||
|
|
||||||
let m_amax = matrix.camax();
|
let m_amax = matrix.camax();
|
||||||
|
@ -610,6 +641,144 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// converts SVD results to Polar decomposition form of the original Matrix: `A = P' * U`.
|
||||||
|
///
|
||||||
|
/// The polar decomposition used here is Left Polar Decomposition (or Reverse Polar Decomposition)
|
||||||
|
/// Returns None if the singular vectors of the SVD haven't been calculated
|
||||||
|
pub fn to_polar(&self) -> Option<(OMatrix<T, R, R>, OMatrix<T, R, C>)>
|
||||||
|
where
|
||||||
|
DefaultAllocator: Allocator<T, R, C> //result
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, R> // adjoint
|
||||||
|
+ Allocator<T, DimMinimum<R, C>> // mapped vals
|
||||||
|
+ Allocator<T, R, R> // result
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, DimMinimum<R, C>>, // square matrix
|
||||||
|
{
|
||||||
|
match (&self.u, &self.v_t) {
|
||||||
|
(Some(u), Some(v_t)) => Some((
|
||||||
|
u * OMatrix::from_diagonal(&self.singular_values.map(|e| T::from_real(e)))
|
||||||
|
* u.adjoint(),
|
||||||
|
u * v_t,
|
||||||
|
)),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ComplexField, R: DimMin<C>, C: Dim> SVD<T, R, C>
|
||||||
|
where
|
||||||
|
DimMinimum<R, C>: DimSub<U1>, // for Bidiagonal.
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
+ Allocator<T, C>
|
||||||
|
+ Allocator<T, R>
|
||||||
|
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, C>
|
||||||
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<(usize, usize), DimMinimum<R, C>> // for sorted singular values
|
||||||
|
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>, // for sorted singular values
|
||||||
|
{
|
||||||
|
/// Computes the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
|
/// The singular values are guaranteed to be sorted in descending order.
|
||||||
|
/// If this order is not required consider using `new_unordered`.
|
||||||
|
pub fn new(matrix: OMatrix<T, R, C>, compute_u: bool, compute_v: bool) -> Self {
|
||||||
|
let mut svd = Self::new_unordered(matrix, compute_u, compute_v);
|
||||||
|
|
||||||
|
if !Self::use_special_always_ordered_svd3() && !Self::use_special_always_ordered_svd2() {
|
||||||
|
svd.sort_by_singular_values();
|
||||||
|
}
|
||||||
|
|
||||||
|
svd
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to compute the Singular Value Decomposition of `matrix` using implicit shift.
|
||||||
|
/// The singular values are guaranteed to be sorted in descending order.
|
||||||
|
/// If this order is not required consider using `try_new_unordered`.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `compute_u` − set this to `true` to enable the computation of left-singular vectors.
|
||||||
|
/// * `compute_v` − set this to `true` to enable the computation of right-singular vectors.
|
||||||
|
/// * `eps` − tolerance used to determine when a value converged to 0.
|
||||||
|
/// * `max_niter` − maximum total number of iterations performed by the algorithm. If this
|
||||||
|
/// number of iteration is exceeded, `None` is returned. If `niter == 0`, then the algorithm
|
||||||
|
/// continues indefinitely until convergence.
|
||||||
|
pub fn try_new(
|
||||||
|
matrix: OMatrix<T, R, C>,
|
||||||
|
compute_u: bool,
|
||||||
|
compute_v: bool,
|
||||||
|
eps: T::RealField,
|
||||||
|
max_niter: usize,
|
||||||
|
) -> Option<Self> {
|
||||||
|
Self::try_new_unordered(matrix, compute_u, compute_v, eps, max_niter).map(|mut svd| {
|
||||||
|
if !Self::use_special_always_ordered_svd3() && !Self::use_special_always_ordered_svd2()
|
||||||
|
{
|
||||||
|
svd.sort_by_singular_values();
|
||||||
|
}
|
||||||
|
|
||||||
|
svd
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sort the estimated components of the SVD by its singular values in descending order.
|
||||||
|
/// Such an ordering is often implicitly required when the decompositions are used for estimation or fitting purposes.
|
||||||
|
/// Using this function is only required if `new_unordered` or `try_new_unorderd` were used and the specific sorting is required afterward.
|
||||||
|
pub fn sort_by_singular_values(&mut self) {
|
||||||
|
const VALUE_PROCESSED: usize = usize::MAX;
|
||||||
|
|
||||||
|
// Collect the singular values with their original index, ...
|
||||||
|
let mut singular_values = self.singular_values.map_with_location(|r, _, e| (e, r));
|
||||||
|
assert_ne!(
|
||||||
|
singular_values.data.shape().0.value(),
|
||||||
|
VALUE_PROCESSED,
|
||||||
|
"Too many singular values"
|
||||||
|
);
|
||||||
|
|
||||||
|
// ... sort the singular values, ...
|
||||||
|
singular_values
|
||||||
|
.as_mut_slice()
|
||||||
|
.sort_unstable_by(|(a, _), (b, _)| b.partial_cmp(a).expect("Singular value was NaN"));
|
||||||
|
|
||||||
|
// ... and store them.
|
||||||
|
self.singular_values
|
||||||
|
.zip_apply(&singular_values, |value, (new_value, _)| {
|
||||||
|
value.clone_from(&new_value)
|
||||||
|
});
|
||||||
|
|
||||||
|
// Calculate required permutations given the sorted indices.
|
||||||
|
// We need to identify all circles to calculate the required swaps.
|
||||||
|
let mut permutations =
|
||||||
|
crate::PermutationSequence::identity_generic(singular_values.data.shape().0);
|
||||||
|
|
||||||
|
for i in 0..singular_values.len() {
|
||||||
|
let mut index_1 = i;
|
||||||
|
let mut index_2 = singular_values[i].1;
|
||||||
|
|
||||||
|
// Check whether the value was already visited ...
|
||||||
|
while index_2 != VALUE_PROCESSED // ... or a "double swap" must be avoided.
|
||||||
|
&& singular_values[index_2].1 != VALUE_PROCESSED
|
||||||
|
{
|
||||||
|
// Add the permutation ...
|
||||||
|
permutations.append_permutation(index_1, index_2);
|
||||||
|
// ... and mark the value as visited.
|
||||||
|
singular_values[index_1].1 = VALUE_PROCESSED;
|
||||||
|
|
||||||
|
index_1 = index_2;
|
||||||
|
index_2 = singular_values[index_1].1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permute the optional components
|
||||||
|
if let Some(u) = self.u.as_mut() {
|
||||||
|
permutations.permute_columns(u);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(v_t) = self.v_t.as_mut() {
|
||||||
|
permutations.permute_rows(v_t);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ComplexField, R: DimMin<C>, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S>
|
impl<T: ComplexField, R: DimMin<C>, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S>
|
||||||
|
@ -626,9 +795,11 @@ where
|
||||||
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>,
|
||||||
{
|
{
|
||||||
/// Computes the singular values of this matrix.
|
/// Computes the singular values of this matrix.
|
||||||
|
/// The singular values are not guaranteed to be sorted in any particular order.
|
||||||
|
/// If a descending order is required, consider using `singular_values` instead.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn singular_values(&self) -> OVector<T::RealField, DimMinimum<R, C>> {
|
pub fn singular_values_unordered(&self) -> OVector<T::RealField, DimMinimum<R, C>> {
|
||||||
SVD::new(self.clone_owned(), false, false).singular_values
|
SVD::new_unordered(self.clone_owned(), false, false).singular_values
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the rank of this matrix.
|
/// Computes the rank of this matrix.
|
||||||
|
@ -636,7 +807,7 @@ where
|
||||||
/// All singular values below `eps` are considered equal to 0.
|
/// All singular values below `eps` are considered equal to 0.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn rank(&self, eps: T::RealField) -> usize {
|
pub fn rank(&self, eps: T::RealField) -> usize {
|
||||||
let svd = SVD::new(self.clone_owned(), false, false);
|
let svd = SVD::new_unordered(self.clone_owned(), false, false);
|
||||||
svd.rank(eps)
|
svd.rank(eps)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -647,7 +818,31 @@ where
|
||||||
where
|
where
|
||||||
DefaultAllocator: Allocator<T, C, R>,
|
DefaultAllocator: Allocator<T, C, R>,
|
||||||
{
|
{
|
||||||
SVD::new(self.clone_owned(), true, true).pseudo_inverse(eps)
|
SVD::new_unordered(self.clone_owned(), true, true).pseudo_inverse(eps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ComplexField, R: DimMin<C>, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S>
|
||||||
|
where
|
||||||
|
DimMinimum<R, C>: DimSub<U1>,
|
||||||
|
DefaultAllocator: Allocator<T, R, C>
|
||||||
|
+ Allocator<T, C>
|
||||||
|
+ Allocator<T, R>
|
||||||
|
+ Allocator<T, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>, C>
|
||||||
|
+ Allocator<T, R, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimMinimum<R, C>>
|
||||||
|
+ Allocator<T::RealField, DimDiff<DimMinimum<R, C>, U1>>
|
||||||
|
+ Allocator<(usize, usize), DimMinimum<R, C>>
|
||||||
|
+ Allocator<(T::RealField, usize), DimMinimum<R, C>>,
|
||||||
|
{
|
||||||
|
/// Computes the singular values of this matrix.
|
||||||
|
/// The singular values are guaranteed to be sorted in descending order.
|
||||||
|
/// If this order is not required consider using `singular_values_unordered`.
|
||||||
|
#[must_use]
|
||||||
|
pub fn singular_values(&self) -> OVector<T::RealField, DimMinimum<R, C>> {
|
||||||
|
SVD::new(self.clone_owned(), false, false).singular_values
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
use crate::{Matrix2, RealField, Vector2, SVD, U2};
|
||||||
|
|
||||||
|
// Implementation of the 2D SVD from https://ieeexplore.ieee.org/document/486688
|
||||||
|
// See also https://scicomp.stackexchange.com/questions/8899/robust-algorithm-for-2-times-2-svd
|
||||||
|
pub fn svd_ordered2<T: RealField>(
|
||||||
|
m: &Matrix2<T>,
|
||||||
|
compute_u: bool,
|
||||||
|
compute_v: bool,
|
||||||
|
) -> SVD<T, U2, U2> {
|
||||||
|
let half: T = crate::convert(0.5);
|
||||||
|
let one: T = crate::convert(1.0);
|
||||||
|
|
||||||
|
let e = (m.m11.clone() + m.m22.clone()) * half.clone();
|
||||||
|
let f = (m.m11.clone() - m.m22.clone()) * half.clone();
|
||||||
|
let g = (m.m21.clone() + m.m12.clone()) * half.clone();
|
||||||
|
let h = (m.m21.clone() - m.m12.clone()) * half.clone();
|
||||||
|
let q = (e.clone() * e.clone() + h.clone() * h.clone()).sqrt();
|
||||||
|
let r = (f.clone() * f.clone() + g.clone() * g.clone()).sqrt();
|
||||||
|
|
||||||
|
// Note that the singular values are always sorted because sx >= sy
|
||||||
|
// because q >= 0 and r >= 0.
|
||||||
|
let sx = q.clone() + r.clone();
|
||||||
|
let sy = q - r;
|
||||||
|
let sy_sign = if sy < T::zero() { -one.clone() } else { one };
|
||||||
|
let singular_values = Vector2::new(sx, sy * sy_sign.clone());
|
||||||
|
|
||||||
|
if compute_u || compute_v {
|
||||||
|
let a1 = g.atan2(f);
|
||||||
|
let a2 = h.atan2(e);
|
||||||
|
let theta = (a2.clone() - a1.clone()) * half.clone();
|
||||||
|
let phi = (a2 + a1) * half;
|
||||||
|
let (st, ct) = theta.sin_cos();
|
||||||
|
let (sp, cp) = phi.sin_cos();
|
||||||
|
|
||||||
|
let u = Matrix2::new(cp.clone(), -sp.clone(), sp, cp);
|
||||||
|
let v_t = Matrix2::new(ct.clone(), -st.clone(), st * sy_sign.clone(), ct * sy_sign);
|
||||||
|
|
||||||
|
SVD {
|
||||||
|
u: if compute_u { Some(u) } else { None },
|
||||||
|
singular_values,
|
||||||
|
v_t: if compute_v { Some(v_t) } else { None },
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
SVD {
|
||||||
|
u: None,
|
||||||
|
singular_values,
|
||||||
|
v_t: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
use crate::{Matrix3, SVD, U3};
|
||||||
|
use simba::scalar::RealField;
|
||||||
|
|
||||||
|
// For the 3x3 case, on the GPU, it is much more efficient to compute the SVD
|
||||||
|
// using an eigendecomposition followed by a QR decomposition.
|
||||||
|
//
|
||||||
|
// This is based on the paper "Computing the Singular Value Decomposition of 3 x 3 matrices with
|
||||||
|
// minimal branching and elementary floating point operations" from McAdams, et al.
|
||||||
|
pub fn svd_ordered3<T: RealField>(
|
||||||
|
m: &Matrix3<T>,
|
||||||
|
compute_u: bool,
|
||||||
|
compute_v: bool,
|
||||||
|
eps: T,
|
||||||
|
niter: usize,
|
||||||
|
) -> Option<SVD<T, U3, U3>> {
|
||||||
|
let s = m.tr_mul(&m);
|
||||||
|
let mut v = s.try_symmetric_eigen(eps, niter)?.eigenvectors;
|
||||||
|
let mut b = m * &v;
|
||||||
|
|
||||||
|
// Sort singular values. This is a necessary step to ensure that
|
||||||
|
// the QR decompositions R matrix ends up diagonal.
|
||||||
|
let mut rho0 = b.column(0).norm_squared();
|
||||||
|
let mut rho1 = b.column(1).norm_squared();
|
||||||
|
let mut rho2 = b.column(2).norm_squared();
|
||||||
|
|
||||||
|
if rho0 < rho1 {
|
||||||
|
b.swap_columns(0, 1);
|
||||||
|
b.column_mut(1).neg_mut();
|
||||||
|
v.swap_columns(0, 1);
|
||||||
|
v.column_mut(1).neg_mut();
|
||||||
|
std::mem::swap(&mut rho0, &mut rho1);
|
||||||
|
}
|
||||||
|
if rho0 < rho2 {
|
||||||
|
b.swap_columns(0, 2);
|
||||||
|
b.column_mut(2).neg_mut();
|
||||||
|
v.swap_columns(0, 2);
|
||||||
|
v.column_mut(2).neg_mut();
|
||||||
|
std::mem::swap(&mut rho0, &mut rho2);
|
||||||
|
}
|
||||||
|
if rho1 < rho2 {
|
||||||
|
b.swap_columns(1, 2);
|
||||||
|
b.column_mut(2).neg_mut();
|
||||||
|
v.swap_columns(1, 2);
|
||||||
|
v.column_mut(2).neg_mut();
|
||||||
|
std::mem::swap(&mut rho0, &mut rho2);
|
||||||
|
}
|
||||||
|
|
||||||
|
let qr = b.qr();
|
||||||
|
|
||||||
|
Some(SVD {
|
||||||
|
u: if compute_u { Some(qr.q()) } else { None },
|
||||||
|
singular_values: qr.diag_internal().map(|e| e.abs()),
|
||||||
|
v_t: if compute_v { Some(v.transpose()) } else { None },
|
||||||
|
})
|
||||||
|
}
|
|
@ -8,3 +8,5 @@ mod v015;
|
||||||
mod v016;
|
mod v016;
|
||||||
#[cfg(feature = "glam017")]
|
#[cfg(feature = "glam017")]
|
||||||
mod v017;
|
mod v017;
|
||||||
|
#[cfg(feature = "glam018")]
|
||||||
|
mod v018;
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
#[path = "../common/glam_isometry.rs"]
|
||||||
|
mod glam_isometry;
|
||||||
|
#[path = "../common/glam_matrix.rs"]
|
||||||
|
mod glam_matrix;
|
||||||
|
#[path = "../common/glam_point.rs"]
|
||||||
|
mod glam_point;
|
||||||
|
#[path = "../common/glam_quaternion.rs"]
|
||||||
|
mod glam_quaternion;
|
||||||
|
#[path = "../common/glam_rotation.rs"]
|
||||||
|
mod glam_rotation;
|
||||||
|
#[path = "../common/glam_similarity.rs"]
|
||||||
|
mod glam_similarity;
|
||||||
|
#[path = "../common/glam_translation.rs"]
|
||||||
|
mod glam_translation;
|
||||||
|
#[path = "../common/glam_unit_complex.rs"]
|
||||||
|
mod glam_unit_complex;
|
||||||
|
|
||||||
|
pub(self) use glam018 as glam;
|
|
@ -80,8 +80,8 @@ fn iter() {
|
||||||
#[test]
|
#[test]
|
||||||
fn debug_output_corresponds_to_data_container() {
|
fn debug_output_corresponds_to_data_container() {
|
||||||
let m = Matrix2::new(1.0, 2.0, 3.0, 4.0);
|
let m = Matrix2::new(1.0, 2.0, 3.0, 4.0);
|
||||||
let output_stable = "Matrix { data: [[1, 3], [2, 4]] }"; // Current output on the stable channel.
|
let output_stable = "[[1, 3], [2, 4]]"; // Current output on the stable channel.
|
||||||
let output_nightly = "Matrix { data: [[1.0, 3.0], [2.0, 4.0]] }"; // Current output on the nightly channel.
|
let output_nightly = "[[1.0, 3.0], [2.0, 4.0]]"; // Current output on the nightly channel.
|
||||||
let current_output = format!("{:?}", m);
|
let current_output = format!("{:?}", m);
|
||||||
dbg!(output_stable);
|
dbg!(output_stable);
|
||||||
dbg!(output_nightly);
|
dbg!(output_nightly);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#![cfg(feature = "proptest-support")]
|
#![cfg(feature = "proptest-support")]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
use na::{DualQuaternion, Point3, UnitDualQuaternion, Vector3};
|
use na::{DualQuaternion, Point3, Unit, UnitDualQuaternion, UnitQuaternion, Vector3};
|
||||||
|
|
||||||
use crate::proptest::*;
|
use crate::proptest::*;
|
||||||
use proptest::{prop_assert, proptest};
|
use proptest::{prop_assert, proptest};
|
||||||
|
@ -74,6 +74,98 @@ proptest!(
|
||||||
prop_assert!(relative_eq!((dq * t) * p, dq * (t * p), epsilon = 1.0e-7));
|
prop_assert!(relative_eq!((dq * t) * p, dq * (t * p), epsilon = 1.0e-7));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||||
|
#[test]
|
||||||
|
fn sclerp_is_defined_for_identical_orientations(
|
||||||
|
dq in unit_dual_quaternion(),
|
||||||
|
s in -1.0f64..2.0f64,
|
||||||
|
t in translation3(),
|
||||||
|
) {
|
||||||
|
// Should not panic.
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq, 0.0), dq, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq, 0.5), dq, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq, 1.0), dq, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq, s), dq, epsilon = 1.0e-7));
|
||||||
|
|
||||||
|
let unit = UnitDualQuaternion::identity();
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit, 0.0), unit, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit, 0.5), unit, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit, 1.0), unit, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit, s), unit, epsilon = 1.0e-7));
|
||||||
|
|
||||||
|
// ScLERPing two unit dual quaternions with nearly equal rotation
|
||||||
|
// components should result in a unit dual quaternion with a rotation
|
||||||
|
// component nearly equal to either input.
|
||||||
|
let dq2 = t * dq;
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq2, 0.0).real, dq.real, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq2, 0.5).real, dq.real, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq2, 1.0).real, dq.real, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(dq.sclerp(&dq2, s).real, dq.real, epsilon = 1.0e-7));
|
||||||
|
|
||||||
|
// ScLERPing two unit dual quaternions with nearly equal rotation
|
||||||
|
// components should result in a unit dual quaternion with a translation
|
||||||
|
// component which is nearly equal to linearly interpolating the
|
||||||
|
// translation components of the inputs.
|
||||||
|
prop_assert!(relative_eq!(
|
||||||
|
dq.sclerp(&dq2, s).translation().vector,
|
||||||
|
dq.translation().vector.lerp(&dq2.translation().vector, s),
|
||||||
|
epsilon = 1.0e-7
|
||||||
|
));
|
||||||
|
|
||||||
|
let unit2 = t * unit;
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit2, 0.0).real, unit.real, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit2, 0.5).real, unit.real, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit2, 1.0).real, unit.real, epsilon = 1.0e-7));
|
||||||
|
prop_assert!(relative_eq!(unit.sclerp(&unit2, s).real, unit.real, epsilon = 1.0e-7));
|
||||||
|
|
||||||
|
prop_assert!(relative_eq!(
|
||||||
|
unit.sclerp(&unit2, s).translation().vector,
|
||||||
|
unit.translation().vector.lerp(&unit2.translation().vector, s),
|
||||||
|
epsilon = 1.0e-7
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||||
|
#[test]
|
||||||
|
fn sclerp_is_not_defined_for_opposite_orientations(
|
||||||
|
dq in unit_dual_quaternion(),
|
||||||
|
s in 0.1f64..0.9f64,
|
||||||
|
t in translation3(),
|
||||||
|
t2 in translation3(),
|
||||||
|
v in vector3(),
|
||||||
|
) {
|
||||||
|
let iso = dq.to_isometry();
|
||||||
|
let rot = iso.rotation;
|
||||||
|
if let Some((axis, angle)) = rot.axis_angle() {
|
||||||
|
let flipped = UnitQuaternion::from_axis_angle(&axis, angle + std::f64::consts::PI);
|
||||||
|
let dqf = flipped * rot.inverse() * dq.clone();
|
||||||
|
prop_assert!(dq.try_sclerp(&dqf, 0.5, 1.0e-7).is_none());
|
||||||
|
prop_assert!(dq.try_sclerp(&dqf, s, 1.0e-7).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
let dq2 = t * dq;
|
||||||
|
let iso2 = dq2.to_isometry();
|
||||||
|
let rot2 = iso2.rotation;
|
||||||
|
if let Some((axis, angle)) = rot2.axis_angle() {
|
||||||
|
let flipped = UnitQuaternion::from_axis_angle(&axis, angle + std::f64::consts::PI);
|
||||||
|
let dq3f = t2 * flipped * rot.inverse() * dq.clone();
|
||||||
|
prop_assert!(dq2.try_sclerp(&dq3f, 0.5, 1.0e-7).is_none());
|
||||||
|
prop_assert!(dq2.try_sclerp(&dq3f, s, 1.0e-7).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(axis) = Unit::try_new(v, 1.0e-7) {
|
||||||
|
let unit = UnitDualQuaternion::identity();
|
||||||
|
let flip = UnitQuaternion::from_axis_angle(&axis, std::f64::consts::PI);
|
||||||
|
let unitf = flip * unit;
|
||||||
|
prop_assert!(unit.try_sclerp(&unitf, 0.5, 1.0e-7).is_none());
|
||||||
|
prop_assert!(unit.try_sclerp(&unitf, s, 1.0e-7).is_none());
|
||||||
|
|
||||||
|
let unit2f = t * unit * flip;
|
||||||
|
prop_assert!(unit.try_sclerp(&unit2f, 0.5, 1.0e-7).is_none());
|
||||||
|
prop_assert!(unit.try_sclerp(&unit2f, s, 1.0e-7).is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||||
#[test]
|
#[test]
|
||||||
fn all_op_exist(
|
fn all_op_exist(
|
||||||
|
|
|
@ -33,3 +33,10 @@ mod proptest;
|
||||||
//#[cfg(all(feature = "debug", feature = "compare", feature = "rand"))]
|
//#[cfg(all(feature = "debug", feature = "compare", feature = "rand"))]
|
||||||
//#[cfg(feature = "sparse")]
|
//#[cfg(feature = "sparse")]
|
||||||
//mod sparse;
|
//mod sparse;
|
||||||
|
|
||||||
|
mod utils {
|
||||||
|
/// Checks if a slice is sorted in descending order.
|
||||||
|
pub fn is_sorted_descending<T: PartialOrd>(slice: &[T]) -> bool {
|
||||||
|
slice.windows(2).all(|elts| elts[0] >= elts[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue